提交 8e3aaba6 authored 作者: Larry Dong's avatar Larry Dong 提交者: Brandon T. Willard

Removed DiffOp class and tests

上级 9e547465
...@@ -39,7 +39,6 @@ from aesara.tensor.extra_ops import ( ...@@ -39,7 +39,6 @@ from aesara.tensor.extra_ops import (
Bartlett, Bartlett,
BroadcastTo, BroadcastTo,
CumOp, CumOp,
DiffOp,
FillDiagonal, FillDiagonal,
FillDiagonalOffset, FillDiagonalOffset,
RavelMultiIndex, RavelMultiIndex,
...@@ -938,17 +937,6 @@ def jax_funcify_CumOp(op, **kwargs): ...@@ -938,17 +937,6 @@ def jax_funcify_CumOp(op, **kwargs):
return cumop return cumop
@jax_funcify.register(DiffOp)
def jax_funcify_DiffOp(op, **kwargs):
n = op.n
axis = op.axis
def diffop(x, n=n, axis=axis):
return jnp.diff(x, n=n, axis=axis)
return diffop
@jax_funcify.register(Repeat) @jax_funcify.register(Repeat)
def jax_funcify_Repeat(op, **kwargs): def jax_funcify_Repeat(op, **kwargs):
axis = op.axis axis = op.axis
......
...@@ -3,7 +3,6 @@ import warnings ...@@ -3,7 +3,6 @@ import warnings
import numba import numba
import numpy as np import numpy as np
from numba.misc.special import literal_unroll from numba.misc.special import literal_unroll
from numpy.core.multiarray import normalize_axis_index
from aesara import config from aesara import config
from aesara.link.numba.dispatch import basic as numba_basic from aesara.link.numba.dispatch import basic as numba_basic
...@@ -12,7 +11,6 @@ from aesara.tensor.extra_ops import ( ...@@ -12,7 +11,6 @@ from aesara.tensor.extra_ops import (
Bartlett, Bartlett,
BroadcastTo, BroadcastTo,
CumOp, CumOp,
DiffOp,
FillDiagonal, FillDiagonal,
FillDiagonalOffset, FillDiagonalOffset,
RavelMultiIndex, RavelMultiIndex,
...@@ -67,36 +65,6 @@ def numba_funcify_CumOp(op, node, **kwargs): ...@@ -67,36 +65,6 @@ def numba_funcify_CumOp(op, node, **kwargs):
return cumop return cumop
@numba_funcify.register(DiffOp)
def numba_funcify_DiffOp(op, node, **kwargs):
n = op.n
axis = op.axis
ndim = node.inputs[0].ndim
dtype = node.outputs[0].dtype
axis = normalize_axis_index(axis, ndim)
slice1 = [slice(None)] * ndim
slice2 = [slice(None)] * ndim
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
op = np.not_equal if dtype == "bool" else np.subtract
@numba_basic.numba_njit(boundscheck=False, fastmath=config.numba__fastmath)
def diffop(x):
res = x.copy()
for _ in range(n):
res = op(res[slice1], res[slice2])
return res
return diffop
@numba_funcify.register(FillDiagonal) @numba_funcify.register(FillDiagonal)
def numba_funcify_FillDiagonal(op, **kwargs): def numba_funcify_FillDiagonal(op, **kwargs):
@numba_basic.numba_njit @numba_basic.numba_njit
......
...@@ -469,66 +469,6 @@ class CumprodOp(Op): ...@@ -469,66 +469,6 @@ class CumprodOp(Op):
return obj return obj
class DiffOp(Op):
# See function diff for docstring
__props__ = ("n", "axis")
def __init__(self, n=1, axis=-1):
self.n = n
self.axis = axis
# numpy return a view in that case.
# TODO, make an optimization that remove this op in this case.
if n == 0:
self.view_map = {0: [0]}
def make_node(self, x):
x = at.as_tensor_variable(x)
axis = normalize_axis_index(self.axis, x.ndim)
shape = [None] * x.type.ndim
for i, shape_i in enumerate(x.type.shape):
if shape_i is None:
continue
if i == axis:
shape[i] = max(0, shape_i - self.n)
else:
shape[i] = shape_i
out_type = TensorType(dtype=x.type.dtype, shape=shape)
return Apply(self, [x], [out_type()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = np.diff(x, n=self.n, axis=self.axis)
def grad(self, inputs, outputs_gradients):
inputs = inputs[0]
if inputs.ndim != 1:
raise NotImplementedError(
"Grad is not implemented for inputs with"
"number of dimension other than 1."
)
z = outputs_gradients[0]
def _grad_helper(z):
pre = at.concatenate([[0.0], z])
app = at.concatenate([z, [0.0]])
return pre - app
# FIXME: This fails when n is larger than the input size
for k in range(self.n):
z = _grad_helper(z)
return [z]
def infer_shape(self, fgraph, node, ins_shapes):
i0_shapes = ins_shapes[0]
out_shape = list(i0_shapes)
out_shape[self.axis] = at_max((0, out_shape[self.axis] - self.n))
return [out_shape]
def diff(x, n=1, axis=-1): def diff(x, n=1, axis=-1):
"""Calculate the `n`-th order discrete difference along the given `axis`. """Calculate the `n`-th order discrete difference along the given `axis`.
......
...@@ -18,7 +18,6 @@ from aesara.tensor.extra_ops import ( ...@@ -18,7 +18,6 @@ from aesara.tensor.extra_ops import (
BroadcastTo, BroadcastTo,
CpuContiguous, CpuContiguous,
CumOp, CumOp,
DiffOp,
FillDiagonal, FillDiagonal,
FillDiagonalOffset, FillDiagonalOffset,
RavelMultiIndex, RavelMultiIndex,
...@@ -334,33 +333,6 @@ class TestDiffOp(utt.InferShapeTester): ...@@ -334,33 +333,6 @@ class TestDiffOp(utt.InferShapeTester):
else: else:
assert out.type.shape[i] == out_test.shape[i] assert out.type.shape[i] == out_test.shape[i]
def test_infer_shape(self):
x = matrix("x")
a = np.random.random((30, 50)).astype(config.floatX)
# Test default n and axis
self._compile_and_check([x], [DiffOp()(x)], [a], DiffOp)
for axis in (-2, -1, 0, 1):
for n in (0, 1, 2, a.shape[0], a.shape[0] + 1):
self._compile_and_check([x], [diff(x, n=n, axis=axis)], [a], DiffOp)
def test_grad(self):
a = np.random.random(50).astype(config.floatX)
# Test default n and axis
utt.verify_grad(DiffOp(), [a])
for n in (0, 1, 2, a.shape[0]):
utt.verify_grad(DiffOp(n=n), [a], eps=7e-3)
@pytest.mark.xfail(reason="gradient is wrong when n is larger than input size")
def test_grad_n_larger_than_input(self):
# Gradient is wrong when n is larger than the input size. Until it is fixed,
# this test ensures the behavior is documented
a = np.random.random(10).astype(config.floatX)
utt.verify_grad(DiffOp(n=11), [a], eps=7e-3)
def test_grad_not_implemented(self): def test_grad_not_implemented(self):
x = at.matrix("x") x = at.matrix("x")
with pytest.raises(NotImplementedError): with pytest.raises(NotImplementedError):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论