提交 e2e23668 authored 作者: Hector Munoz's avatar Hector Munoz 提交者: Ricardo Vieira

Remove Tile Op

上级 efd2d19a
...@@ -2920,81 +2920,6 @@ def flatten(x, ndim=1): ...@@ -2920,81 +2920,6 @@ def flatten(x, ndim=1):
return x_reshaped return x_reshaped
class Tile(Op):
"""
Construct an array by repeating the input x according to reps pattern.
.. note:: Deprecated
Use tile() instead.
Tiles its input according to reps. The length of reps is the number of
dimension of x and contains the number of times to tile x in each
dimension.
See Also
--------
numpy.tile : http://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html
"""
__props__ = ("ndim",)
def __init__(self, ndim):
self.ndim = ndim
def __str__(self):
return f"{self.__class__.__name__ }{{ndim={self.ndim}}}"
def make_node(self, x, reps):
warnings.warn(
("Tile op is deprecated, use tile function instead."), stacklevel=3
)
x = as_tensor_variable(x)
reps = as_tensor_variable(reps)
return Apply(self, [x, reps], [tensor(x.type.dtype, [False] * self.ndim)])
def perform(self, node, inp, out_):
x, reps = inp
(out,) = out_
res = np.tile(x, reps)
if res.ndim != self.ndim:
raise ValueError("Tile.perform produced incorrect number of dimensions")
if (np.asarray(reps) == 1).all():
# In that case, some NumPy version return a view! As this
# op isn't declared as inplace, we need to check that and
# copy the data.
if np.may_share_memory(res, x):
res = res.copy()
out[0] = res
def infer_shape(self, fgraph, node, in_shapes):
# Note: in contrast with numpy, it is assumed that x.shape and reps
# have equal length; see also tile function below
# Note: if reps were to be allowed not to be a constant and x.shape
# and reps to be unequal, the following block of code could be used:
# prepend 1 to x.shape if needed
# if self.ndim > x.ndim:
# shp = concatenate(ones(self.ndim - x.ndim), shp)
# prepend 1 to reps if needed
# reps = concatenate(ones(self.ndim - reps.shape[0]), reps)
x, reps = node.inputs
shp = in_shapes[0]
tiled_shp = shp * reps
out_shape = []
for i in range(self.ndim):
out_shape.append(tiled_shp[i])
return [out_shape]
def grad(self, inp, grads):
x, reps = inp
(g_out,) = grads
# return [tilegrad(x, reps, g_out), None]
raise NotImplementedError()
def tile(x, reps, ndim=None): def tile(x, reps, ndim=None):
""" """
Tile input array `x` according to `reps`. Tile input array `x` according to `reps`.
......
...@@ -51,7 +51,6 @@ from aesara.tensor.basic import ( ...@@ -51,7 +51,6 @@ from aesara.tensor.basic import (
ScalarFromTensor, ScalarFromTensor,
Split, Split,
TensorFromScalar, TensorFromScalar,
Tile,
alloc, alloc,
as_tensor_variable, as_tensor_variable,
cast, cast,
...@@ -59,7 +58,6 @@ from aesara.tensor.basic import ( ...@@ -59,7 +58,6 @@ from aesara.tensor.basic import (
extract_constant, extract_constant,
fill, fill,
get_scalar_constant_value, get_scalar_constant_value,
get_vector_length,
join, join,
ones_like, ones_like,
patternbroadcast, patternbroadcast,
...@@ -2598,49 +2596,6 @@ def local_merge_switch_same_cond(fgraph, node): ...@@ -2598,49 +2596,6 @@ def local_merge_switch_same_cond(fgraph, node):
] ]
@register_useless
@register_canonicalize
@register_stabilize
@local_optimizer([Tile])
def local_useless_tile(fgraph, node):
"""Tile(x, (1,)*N) -> x
This is useless tile. (1,)*N, just mean a vector with all element
being 1.
"""
if isinstance(node.op, Tile):
try:
a = get_scalar_constant_value(node.inputs[1], only_process_constants=True)
if a == 1:
try:
l = get_vector_length(node.inputs[1])
if l == node.inputs[0].ndim:
# No need to copy over any stacktrace as previous
# input variable already has a stacktrace
return [node.inputs[0]]
elif l < node.inputs[0].ndim:
# The Op don't support that case, so we can't
# implement the opt and test it.
return
return [node.inputs[0]]
else:
# The Op don't support that case, so we can't
# implement the opt and test it.
return
x_nd = node.inputs[0].ndim
broad = ["x"] * (l - x_nd) + range(x_nd)
ret = node.inputs[0].dimshuffle(broad)
# Copy over stacktrace from previous output node,
# and from node before tiling operation.
copy_stack_trace(node.outputs + node.inputs[0], ret)
return [ret]
except ValueError:
return
except NotScalarConstantError:
return
@register_useless @register_useless
@register_canonicalize @register_canonicalize
@register_specialize @register_specialize
......
...@@ -66,7 +66,6 @@ Partial list of ops without support for R-op: ...@@ -66,7 +66,6 @@ Partial list of ops without support for R-op:
* All sparse ops * All sparse ops
* All linear algebra ops. * All linear algebra ops.
* PermuteRowElements * PermuteRowElements
* Tile
* AdvancedSubtensor * AdvancedSubtensor
* TensorDot * TensorDot
* Outer * Outer
......
...@@ -38,7 +38,6 @@ from aesara.tensor.basic import ( ...@@ -38,7 +38,6 @@ from aesara.tensor.basic import (
ScalarFromTensor, ScalarFromTensor,
Split, Split,
TensorFromScalar, TensorFromScalar,
Tile,
Tri, Tri,
addbroadcast, addbroadcast,
alloc, alloc,
...@@ -4104,33 +4103,6 @@ class TestInferShape(utt.InferShapeTester): ...@@ -4104,33 +4103,6 @@ class TestInferShape(utt.InferShapeTester):
ARange, ARange,
) )
def test_Tile(self):
# Tile op is deprecated so the tile function doesn't use it
# anymore, we'll test here the op directly
advec = dvector()
advec_val = random(5)
aivec_val = [3]
ndim = 1
self._compile_and_check(
[advec], [Tile(ndim)(advec, aivec_val)], [advec_val], Tile
)
admat = dmatrix()
admat_val = random(2, 4)
aivec_val = [2, 3]
ndim = 2
self._compile_and_check(
[admat], [Tile(ndim)(admat, aivec_val)], [admat_val], Tile
)
adtens4 = dtensor4()
adtens4_val = random(2, 4, 3, 5)
aivec_val = [2, 3, 1, 4]
ndim = 4
self._compile_and_check(
[adtens4], [Tile(ndim)(adtens4, aivec_val)], [adtens4_val], Tile
)
class TestTensorInstanceMethods: class TestTensorInstanceMethods:
def setup_method(self): def setup_method(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论