提交 87e28b28 authored 作者: Brandon T. Willard's avatar Brandon T. Willard

Remove and rename deprecated outdim parameter in theano.tensor.basic.flatten

上级 e2dd22c4
...@@ -4114,7 +4114,7 @@ def test_make_column_matrix_broadcastable(): ...@@ -4114,7 +4114,7 @@ def test_make_column_matrix_broadcastable():
assert (f(np.zeros((3, 1))) + np.ones(2) == np.ones((3, 2))).all() assert (f(np.zeros((3, 1))) + np.ones(2) == np.ones((3, 2))).all()
def test_flatten_outdimNone(): def test_flatten_ndim_default():
a = dmatrix() a = dmatrix()
c = flatten(a) c = flatten(a)
f = inplace_func([a], c) f = inplace_func([a], c)
...@@ -4178,9 +4178,6 @@ def test_flatten_ndim2_of_3(): ...@@ -4178,9 +4178,6 @@ def test_flatten_ndim2_of_3():
flatten_2 = partial(flatten, ndim=2) flatten_2 = partial(flatten, ndim=2)
utt.verify_grad(flatten_2, [a_val]) utt.verify_grad(flatten_2, [a_val])
# test outdim parameter name
flatten_2 = partial(flatten, outdim=2)
utt.verify_grad(flatten_2, [a_val])
def test_flatten_broadcastable(): def test_flatten_broadcastable():
...@@ -4219,7 +4216,7 @@ def test_flatten_ndim_invalid(): ...@@ -4219,7 +4216,7 @@ def test_flatten_ndim_invalid():
def test_is_flat(): def test_is_flat():
# tests is_flat method for constant and symbolic variables, # tests is_flat method for constant and symbolic variables,
# as well as reshaped constant and symbolic variables on the # as well as reshaped constant and symbolic variables on the
# given outdim # given `ndim`
# Constant variable # Constant variable
assert tt.is_flat(tt.as_tensor_variable(np.zeros(10))) assert tt.is_flat(tt.as_tensor_variable(np.zeros(10)))
...@@ -6251,10 +6248,10 @@ class TestInferShape(utt.InferShapeTester): ...@@ -6251,10 +6248,10 @@ class TestInferShape(utt.InferShapeTester):
# Flatten # Flatten
atens3 = tensor3() atens3 = tensor3()
atens3_val = rand(4, 5, 3) atens3_val = rand(4, 5, 3)
for outdim in (3, 2, 1): for ndim in (3, 2, 1):
self._compile_and_check( self._compile_and_check(
[atens3], [atens3],
[flatten(atens3, outdim)], [flatten(atens3, ndim)],
[atens3_val], [atens3_val],
Reshape, Reshape,
excluding=["local_useless_reshape"], excluding=["local_useless_reshape"],
...@@ -6262,10 +6259,10 @@ class TestInferShape(utt.InferShapeTester): ...@@ -6262,10 +6259,10 @@ class TestInferShape(utt.InferShapeTester):
amat = matrix() amat = matrix()
amat_val = rand(4, 5) amat_val = rand(4, 5)
for outdim in (2, 1): for ndim in (2, 1):
self._compile_and_check( self._compile_and_check(
[amat], [amat],
[flatten(amat, outdim)], [flatten(amat, ndim)],
[amat_val], [amat_val],
Reshape, Reshape,
excluding=["local_useless_reshape"], excluding=["local_useless_reshape"],
...@@ -6273,10 +6270,10 @@ class TestInferShape(utt.InferShapeTester): ...@@ -6273,10 +6270,10 @@ class TestInferShape(utt.InferShapeTester):
avec = vector() avec = vector()
avec_val = rand(4) avec_val = rand(4)
outdim = 1 ndim = 1
self._compile_and_check( self._compile_and_check(
[avec], [avec],
[flatten(avec, outdim)], [flatten(avec, ndim)],
[avec_val], [avec_val],
Reshape, Reshape,
excluding=["local_useless_reshape"], excluding=["local_useless_reshape"],
......
...@@ -5244,8 +5244,8 @@ class Flatten(Op): ...@@ -5244,8 +5244,8 @@ class Flatten(Op):
""" """
Flatten a tensor. Flatten a tensor.
Flattens a tensor to `outdim` dimensions by preserving the leading Flattens a tensor to `ndim` dimensions by preserving the leading
outdim - 1 shape components. ndim - 1 shape components.
.. note:: The interface Flatten(Op) is deprecated, you should use flatten. .. note:: The interface Flatten(Op) is deprecated, you should use flatten.
""" """
...@@ -5253,24 +5253,24 @@ class Flatten(Op): ...@@ -5253,24 +5253,24 @@ class Flatten(Op):
view_map = {0: [0]} view_map = {0: [0]}
check_input = False check_input = False
__props__ = ("outdim",) __props__ = ("ndim",)
def __init__(self, outdim=1): def __init__(self, ndim=1):
warnings.warn( warnings.warn(
"Flatten class is deprecated, " "please use flatten method instead.", "Flatten class is deprecated, " "please use flatten method instead.",
DeprecationWarning, DeprecationWarning,
stacklevel=4, stacklevel=4,
) )
self.outdim = int(outdim) self.ndim = int(ndim)
def __str__(self): def __str__(self):
return f"{self.__class__.__name__}{{{self.outdim}}}" return f"{self.__class__.__name__}{{{self.ndim}}}"
def make_node(self, x): def make_node(self, x):
t_x = as_tensor_variable(x) t_x = as_tensor_variable(x)
if self.outdim < 1 or (x.ndim and self.outdim > x.ndim): if self.ndim < 1 or (x.ndim and self.ndim > x.ndim):
raise ValueError( raise ValueError(
f"invalid output ndimensions ({self.outdim}) for tensor of " f"invalid output ndimensions ({self.ndim}) for tensor of "
f"rank {t_x.ndim}" f"rank {t_x.ndim}"
) )
...@@ -5279,8 +5279,8 @@ class Flatten(Op): ...@@ -5279,8 +5279,8 @@ class Flatten(Op):
# For the dimension resulting from the collapse of other dimensions, # For the dimension resulting from the collapse of other dimensions,
# it should be broadcastable iff all the collapsed dimensions were # it should be broadcastable iff all the collapsed dimensions were
# broadcastable. # broadcastable.
bcast_kept_dims = x.broadcastable[: self.outdim - 1] bcast_kept_dims = x.broadcastable[: self.ndim - 1]
bcast_new_dim = builtins.all(x.broadcastable[self.outdim - 1 :]) bcast_new_dim = builtins.all(x.broadcastable[self.ndim - 1 :])
broadcastable = bcast_kept_dims + (bcast_new_dim,) broadcastable = bcast_kept_dims + (bcast_new_dim,)
return gof.Apply(self, [t_x], [tensor(x.type.dtype, broadcastable)]) return gof.Apply(self, [t_x], [tensor(x.type.dtype, broadcastable)])
...@@ -5288,22 +5288,22 @@ class Flatten(Op): ...@@ -5288,22 +5288,22 @@ class Flatten(Op):
def perform(self, node, inp, out_): def perform(self, node, inp, out_):
(x,) = inp (x,) = inp
(out,) = out_ (out,) = out_
outdim = self.outdim ndim = self.ndim
if outdim == 1: if ndim == 1:
try: try:
out[0] = x.reshape(x.size) out[0] = x.reshape(x.size)
except AttributeError: except AttributeError:
out[0] = x.reshape((np.prod(x.shape),)) out[0] = x.reshape((np.prod(x.shape),))
elif outdim == len(x.shape): elif ndim == len(x.shape):
out[0] = x out[0] = x
else: else:
newshape = x.shape[: outdim - 1] + (np.prod(x.shape[outdim - 1 :]),) newshape = x.shape[: ndim - 1] + (np.prod(x.shape[ndim - 1 :]),)
out[0] = x.reshape(newshape) out[0] = x.reshape(newshape)
def infer_shape(self, fgraph, node, in_shapes): def infer_shape(self, fgraph, node, in_shapes):
(in_shp,) = in_shapes (in_shp,) = in_shapes
part1 = in_shp[: self.outdim - 1] part1 = in_shp[: self.ndim - 1]
part2 = in_shp[self.outdim - 1 :] part2 = in_shp[self.ndim - 1 :]
if len(part2) > 1: if len(part2) > 1:
part2 = (prod(part2, dtype="int64"),) part2 = (prod(part2, dtype="int64"),)
...@@ -5311,11 +5311,11 @@ class Flatten(Op): ...@@ -5311,11 +5311,11 @@ class Flatten(Op):
# We do not want to force an upcast of part2 if its length is 1 # We do not want to force an upcast of part2 if its length is 1
pass pass
else: else:
if len(in_shp) == 0 and self.outdim == 1: if len(in_shp) == 0 and self.ndim == 1:
part2 = (1,) part2 = (1,)
else: else:
raise ValueError( raise ValueError(
f"invalid output ndimensions ({self.outdim}) for tensor " f"invalid output ndimensions ({self.ndim}) for tensor "
f"of rank {len(in_shp)}" f"of rank {len(in_shp)}"
) )
...@@ -5338,11 +5338,11 @@ class Flatten(Op): ...@@ -5338,11 +5338,11 @@ class Flatten(Op):
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
(out,) = outputs (out,) = outputs
outdim = self.outdim ndim = self.ndim
fail = sub["fail"] fail = sub["fail"]
return ( return (
""" """
if (%(outdim)s == PyArray_NDIM(%(x)s)) if (%(ndim)s == PyArray_NDIM(%(x)s))
{ {
Py_XDECREF(%(out)s); Py_XDECREF(%(out)s);
Py_XINCREF(%(x)s); Py_XINCREF(%(x)s);
...@@ -5352,7 +5352,7 @@ class Flatten(Op): ...@@ -5352,7 +5352,7 @@ class Flatten(Op):
{ {
Py_XDECREF(%(out)s); Py_XDECREF(%(out)s);
if (%(outdim)s == 1) if (%(ndim)s == 1)
{ {
npy_intp size = PyArray_SIZE(%(x)s); npy_intp size = PyArray_SIZE(%(x)s);
PyArray_Dims newshape; PyArray_Dims newshape;
...@@ -5365,20 +5365,20 @@ class Flatten(Op): ...@@ -5365,20 +5365,20 @@ class Flatten(Op):
else else
{ {
npy_intp *oldshape = PyArray_DIMS(%(x)s); npy_intp *oldshape = PyArray_DIMS(%(x)s);
npy_intp newshape_dims[%(outdim)s]; npy_intp newshape_dims[%(ndim)s];
int i; int i;
for (i = 0; i < %(outdim)s - 1; ++i) for (i = 0; i < %(ndim)s - 1; ++i)
newshape_dims[i] = oldshape[i]; newshape_dims[i] = oldshape[i];
newshape_dims[i] = 1; newshape_dims[i] = 1;
for (int j = %(outdim)s - 1; j < PyArray_NDIM(%(x)s); ++j) for (int j = %(ndim)s - 1; j < PyArray_NDIM(%(x)s); ++j)
newshape_dims[i] *= oldshape[j]; newshape_dims[i] *= oldshape[j];
PyArray_Dims newshape; PyArray_Dims newshape;
newshape.ptr = newshape_dims; newshape.ptr = newshape_dims;
newshape.len = %(outdim)s; newshape.len = %(ndim)s;
%(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s, %(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
&newshape, &newshape,
NPY_CORDER); NPY_CORDER);
...@@ -5428,39 +5428,29 @@ def is_flat(var, ndim=None, outdim=None): ...@@ -5428,39 +5428,29 @@ def is_flat(var, ndim=None, outdim=None):
return var.ndim == ndim return var.ndim == ndim
def flatten(x, ndim=None, outdim=None): def flatten(x, ndim=1):
""" """Return a copy of the array collapsed into one dimension.
Reshapes the variable x by keeping
the first outdim-1 dimension size(s) of x the same, Reshapes the variable `x` by keeping the first outdim-1 dimension size(s)
and making the last dimension size of x equal to of `x` the same, and making the last dimension size of `x` equal to the
the multiplication of its remaining dimension size(s). multiplication of its remaining dimension size(s).
Parameters Parameters
---------- ----------
x : theano.tensor.var.TensorVariable x : theano.tensor.var.TensorVariable
the variable that should be reshaped. The variable to be reshaped.
ndim : int ndim : int
the number of dimensions of the returned variable The number of dimensions of the returned variable
Default 1. The default value is ``1``.
outdim : int
DEPRECATED synonym for ndim
Returns Returns
------- -------
theano.tensor.var.TensorVariable theano.tensor.var.TensorVariable
the flattend variable with dimensionality of outdim the flattend variable with dimensionality of outdim
""" """
if outdim is None and ndim is None: if ndim is None:
ndim = 1 ndim = 1
elif outdim is not None and ndim is not None:
raise ValueError("You should only specify ndim")
elif outdim is not None:
warnings.warn(
"flatten outdim parameter is deprecated, use ndim instead.",
category=DeprecationWarning,
)
ndim = outdim
# Any input variable can be flattened to have ndim of 1, # Any input variable can be flattened to have ndim of 1,
# even if it's a scalar. Otherwise, ndim must be positive # even if it's a scalar. Otherwise, ndim must be positive
# and smaller than x.ndim. # and smaller than x.ndim.
...@@ -5476,32 +5466,11 @@ def flatten(x, ndim=None, outdim=None): ...@@ -5476,32 +5466,11 @@ def flatten(x, ndim=None, outdim=None):
bcast_new_dim = builtins.all(x.broadcastable[ndim - 1 :]) bcast_new_dim = builtins.all(x.broadcastable[ndim - 1 :])
broadcastable = bcast_kept_dims + (bcast_new_dim,) broadcastable = bcast_kept_dims + (bcast_new_dim,)
x_reshaped = theano.tensor.addbroadcast( x_reshaped = theano.tensor.addbroadcast(
x_reshaped, *filter(lambda i: broadcastable[i], range(ndim)) x_reshaped, *[i for i in range(ndim) if broadcastable[i]]
) )
return x_reshaped return x_reshaped
# class TileGrad(Op):
# """
# Calculates the gradient of the Tile Op.
# """
# # this is so weird, I can't think of how to make this a general thing.
# def make_node(self, x, reps, g_out):
# return gof.Apply(self, [x, reps, g_out], [x.type()])
#
# def perform(self, node, inp, out):
# x, reps, g_out = inp
# gx, = out
# xsh = x.shape
# if len(reps) == 2 and reps[1] == 1 and len(x.shape) == 1:
# gx[0] = numpy.sum(g_out, axis=0)
# else:
# raise NotImplementedError('x.shape, reps combination not '
# 'supported', (x.shape, reps))
#
# tilegrad = TileGrad()
class Tile(Op): class Tile(Op):
""" """
Construct an array by repeating the input x according to reps pattern. Construct an array by repeating the input x according to reps pattern.
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论