提交 195bee05 authored 作者: Marc-Alexandre Cote's avatar Marc-Alexandre Cote

Implemented cumprod function similar to numpy's one.

上级 100d336e
......@@ -62,4 +62,4 @@ from theano.gradient import Rop, Lop, grad, numeric_grad, verify_grad, \
from theano.tensor.sort import sort, argsort
from theano.tensor.extra_ops import (DiffOp, bincount, squeeze,
repeat, bartlett, fill_diagonal, cumsum)
repeat, bartlett, fill_diagonal, cumsum, cumprod)
......@@ -92,7 +92,7 @@ class CumsumOp(theano.Op):
return code
def c_code_cache_version(self):
return (2,)
return (3,)
def __str__(self):
return "%s{%s}" % (self.__class__.__name__, self.axis)
......@@ -112,6 +112,114 @@ def cumsum(x, axis=None):
"""
return CumsumOp(axis=axis)(x)
class CumprodOp(theano.Op):
# See function cumprod for docstring
def __init__(self, axis=None):
self.axis = axis
def __eq__(self, other):
return (type(self) == type(other) and
self.axis == other.axis)
def __hash__(self):
return hash(type(self)) ^ hash(self.axis)
def make_node(self, x):
x = basic.as_tensor_variable(x)
out_type = x.type()
if self.axis is None:
out_type = theano.tensor.vector(dtype=x.dtype) # Flatten
return theano.Apply(self, [x], [out_type])
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = np.cumprod(x, axis=self.axis)
def grad(self, inputs, output_gradients):
x, = inputs
gi, = output_gradients
fx = cumprod(x, axis=self.axis)
if self.axis is None:
return [cumsum((fx * gi)[::-1])[::-1].reshape(inputs[0].shape) / x]
# We need to reverse the gradients along ``self.axis``,
# compute cumsum, then reverse again
reverse_slicing = [slice(None,None,None)] * gi.ndim
reverse_slicing[self.axis] = slice(None,None,-1)
reverse_slicing = tuple(reverse_slicing)
return [cumsum((fx * gi)[reverse_slicing], self.axis)[reverse_slicing] / x]
def infer_shape(self, node, shapes):
if self.axis is None:
return [(tensor.prod(shapes[0]),)] # Flatten
return shapes
def c_code(self, node, name, inames, onames, sub):
x, = inames
z, = onames
axis = self.axis
fail = sub['fail']
if self.axis is None or (self.axis == 0 and node.inputs[0].ndim == 1):
code = """
npy_intp shape[1] = { PyArray_SIZE(%(x)s) };
if(!(%(z)s && PyArray_DIMS(%(z)s)[0] == shape[0]))
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, type_num_%(x)s);
}
if (!%(z)s)
%(fail)s;
{
PyArray_CumProd(%(x)s, NPY_MAXDIMS, type_num_%(x)s, %(z)s);
}
""" % locals()
else:
code = """
if(!(%(z)s && PyArray_CompareLists(PyArray_DIMS(%(z)s), PyArray_DIMS(%(x)s), PyArray_NDIM(%(x)s)) ))
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SimpleNew(PyArray_NDIM(%(x)s), PyArray_DIMS(%(x)s), type_num_%(x)s);
}
if (!%(z)s)
%(fail)s;
{
PyArray_CumProd(%(x)s, %(axis)s, type_num_%(x)s, %(z)s);
}
""" % locals()
return code
def c_code_cache_version(self):
return (2,)
def __str__(self):
return "%s{%s}" % (self.__class__.__name__, self.axis)
def cumprod(x, axis=None):
"""Return the cumulative product of the elements along a given axis.
Wraping of numpy.cumprod.
:param x: Input tensor variable.
:param axis: The axis along which the cumulative product is computed.
The default (None) is to compute the cumprod over the flattened array.
.. versionadded:: 0.6.1
"""
return CumprodOp(axis=axis)(x)
class DiffOp(theano.Op):
# See function diff for docstring
def __init__(self, n=1, axis=-1):
......
......@@ -3,7 +3,9 @@ import numpy
import theano
from theano.tests import unittest_tools as utt
from theano.tensor.extra_ops import (CumsumOp, cumsum, BinCountOp, bincount, DiffOp, diff,
from theano.tensor.extra_ops import (CumsumOp, cumsum, CumprodOp, cumprod,
BinCountOp, bincount, DiffOp, diff,
squeeze, RepeatOp, repeat, Bartlett, bartlett,
FillDiagonal, fill_diagonal)
from theano import tensor as T
......@@ -44,7 +46,7 @@ class TestCumsumOp(utt.InferShapeTester):
def test_infer_shape(self):
x = T.tensor3('x')
a = np.random.random((30, 50, 20)).astype(config.floatX)
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis=None
self._compile_and_check([x],
......@@ -67,6 +69,60 @@ class TestCumsumOp(utt.InferShapeTester):
utt.verify_grad(self.op_class(axis=axis), [a])
class TestCumprodOp(utt.InferShapeTester):
def setUp(self):
super(TestCumprodOp, self).setUp()
self.op_class = CumprodOp
self.op = CumprodOp()
def test_CumprodOp(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
b = np.random.random((30, 5, 2)).astype(config.floatX)
f = theano.function([x], cumprod(x), mode="DebugMode")
assert np.allclose(np.cumprod(a), f(a)) # Test axis=None
# Test without garbage collector
f = theano.function([x], cumprod(x).sum(), mode=theano.compile.Mode(linker="cvm_nogc", optimizer="fast_run") )
assert np.allclose(np.cumprod(a).sum(), f(a)) # Test axis=None
assert np.allclose(np.cumprod(b).sum(), f(b)) # Would fail without re-allocation
for axis in range(len(a.shape)):
f = theano.function([x], cumprod(x, axis=axis), mode="DebugMode")
assert np.allclose(np.cumprod(a, axis=axis), f(a))
# Test without garbage collector
f = theano.function([x], cumprod(x, axis=axis).sum(), mode=theano.compile.Mode(linker="cvm_nogc", optimizer="fast_run"))
assert np.allclose(np.cumprod(a, axis=axis).sum(), f(a))
assert np.allclose(np.cumprod(b, axis=axis).sum(), f(b)) # Would fail without re-allocation
def test_infer_shape(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis=None
self._compile_and_check([x],
[self.op(x)],
[a],
self.op_class)
for axis in range(len(a.shape)):
self._compile_and_check([x],
[cumprod(x, axis=axis)],
[a],
self.op_class)
def test_grad(self):
a = np.random.random((3, 5, 2)).astype(config.floatX)
utt.verify_grad(self.op, [a]) # Test axis=None
for axis in range(len(a.shape)):
utt.verify_grad(self.op_class(axis=axis), [a])
class TestBinCountOp(utt.InferShapeTester):
def setUp(self):
super(TestBinCountOp, self).setUp()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论