提交 ef079e7a authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Merge pull request #5 from nouiz/lamblin-fix_set_subtensor1_grad

Tests for fix and do the fix for AdvancedSubtesor.
...@@ -2034,9 +2034,13 @@ class AdvancedIncSubtensor(Op): ...@@ -2034,9 +2034,13 @@ class AdvancedIncSubtensor(Op):
'later, or to the latest development version. ' 'later, or to the latest development version. '
'You may need to clear the cache (theano-cache clear) ' 'You may need to clear the cache (theano-cache clear) '
'afterwards.') 'afterwards.')
new_inputs = []
for inp in inputs:
if isinstance(inp, (list, tuple)):
inp = theano.tensor.as_tensor_variable(inp)
new_inputs.append(inp)
return gof.Apply(op, return gof.Apply(op,
(x, y) + inputs, (x, y) + tuple(new_inputs),
[theano.tensor.tensor( [theano.tensor.tensor(
dtype=x.type.dtype, dtype=x.type.dtype,
broadcastable=x.type.broadcastable)]) broadcastable=x.type.broadcastable)])
...@@ -2091,9 +2095,25 @@ class AdvancedIncSubtensor(Op): ...@@ -2091,9 +2095,25 @@ class AdvancedIncSubtensor(Op):
x, y = inpt[:2] x, y = inpt[:2]
idxs = inpt[2:] idxs = inpt[2:]
outgrad, = output_gradients outgrad, = output_gradients
d_x_wrt_C = outgrad if x.dtype in theano.tensor.discrete_dtypes:
d_y_wrt_C = AdvancedSubtensor()(outgrad, *idxs) # The output dtype is the same as x
return [d_x_wrt_C, d_y_wrt_C] + \ gx = x.zeros_like(dtype=theano.config.floatX)
if y.dtype in theano.tensor.discrete_dtypes:
gy = y.zeros_like(dtype=theano.config.floatX)
else:
gy = y.zeros_like()
elif x.dtype in theano.tensor.complex_dtypes:
raise NotImplementedError("No support for complex grad yet")
else:
if self.set_instead_of_inc:
gx = advanced_set_subtensor(
outgrad,
y.zeros_like(),
*idxs)
else:
gx = outgrad
gy = advanced_subtensor(outgrad, *idxs)
return [gx, gy] + \
[DisconnectedType()() for _ in idxs] [DisconnectedType()() for _ in idxs]
def R_op(self, inputs, eval_points): def R_op(self, inputs, eval_points):
...@@ -2102,6 +2122,7 @@ class AdvancedIncSubtensor(Op): ...@@ -2102,6 +2122,7 @@ class AdvancedIncSubtensor(Op):
return self.make_node(eval_points[0], eval_points[1], return self.make_node(eval_points[0], eval_points[1],
*inputs[2:]).outputs *inputs[2:]).outputs
advanced_inc_subtensor = AdvancedIncSubtensor() advanced_inc_subtensor = AdvancedIncSubtensor()
advanced_set_subtensor = AdvancedIncSubtensor(set_instead_of_inc=True)
def take(a, indices, axis=None, mode='raise'): def take(a, indices, axis=None, mode='raise'):
......
...@@ -18,6 +18,10 @@ import theano.scalar as scal ...@@ -18,6 +18,10 @@ import theano.scalar as scal
import theano.tensor as tensor import theano.tensor as tensor
from theano.tests import unittest_tools as utt from theano.tests import unittest_tools as utt
from theano.tensor.subtensor import (inc_subtensor, set_subtensor, from theano.tensor.subtensor import (inc_subtensor, set_subtensor,
advanced_inc_subtensor1,
advanced_set_subtensor1,
advanced_inc_subtensor,
advanced_set_subtensor,
Subtensor, IncSubtensor, Subtensor, IncSubtensor,
AdvancedSubtensor1, AdvancedSubtensor, AdvancedSubtensor1, AdvancedSubtensor,
advanced_subtensor1, inplace_increment, advanced_subtensor1, inplace_increment,
...@@ -519,6 +523,19 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin): ...@@ -519,6 +523,19 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
self.assertTrue(g_00.shape == (1, 3)) self.assertTrue(g_00.shape == (1, 3))
self.assertTrue(numpy.allclose(g_00, 2)) self.assertTrue(numpy.allclose(g_00, 2))
utt.verify_grad(lambda m: m[[1, 3]],
[numpy.random.rand(5, 5).astype(self.dtype)])
def fun(x, y):
return advanced_inc_subtensor1(x, y, [1, 3])
utt.verify_grad(fun, [numpy.random.rand(5, 5).astype(self.dtype),
numpy.random.rand(2, 5).astype(self.dtype)])
def fun(x, y):
return advanced_set_subtensor1(x, y, [1, 3])
utt.verify_grad(fun, [numpy.random.rand(5, 5).astype(self.dtype),
numpy.random.rand(2, 5).astype(self.dtype)])
def test_adv_sub1_idx_broadcast(self): def test_adv_sub1_idx_broadcast(self):
# The idx can be a broadcastable vector. # The idx can be a broadcastable vector.
ones = numpy.ones((4, 3), dtype=self.dtype) ones = numpy.ones((4, 3), dtype=self.dtype)
...@@ -1291,6 +1308,27 @@ class TestAdvancedSubtensor(unittest.TestCase): ...@@ -1291,6 +1308,27 @@ class TestAdvancedSubtensor(unittest.TestCase):
cmd = f2(0, 1, 2) == aa[[0, 1, 2], :, 0:2] cmd = f2(0, 1, 2) == aa[[0, 1, 2], :, 0:2]
self.assertTrue(cmd.all()) self.assertTrue(cmd.all())
def test_grad(self):
ones = numpy.ones((1, 3), dtype=self.dtype)
n = self.shared(ones * 5, broadcastable=(True, False))
idx = tensor.lvector()
idx2 = tensor.lvector()
t = n[idx, idx2]
self.assertTrue(isinstance(t.owner.op, tensor.AdvancedSubtensor))
utt.verify_grad(lambda m: m[[1, 3], [2, 4]],
[numpy.random.rand(5, 5).astype(self.dtype)])
def fun(x, y):
return advanced_inc_subtensor(x, y, [1, 3], [2, 4])
utt.verify_grad(fun, [numpy.random.rand(5, 5).astype(self.dtype),
numpy.random.rand(2).astype(self.dtype)])
def fun(x, y):
return advanced_set_subtensor(x, y, [1, 3], [2, 4])
utt.verify_grad(fun, [numpy.random.rand(5, 5).astype(self.dtype),
numpy.random.rand(2).astype(self.dtype)])
class TestInferShape(utt.InferShapeTester): class TestInferShape(utt.InferShapeTester):
def test_infer_shape(self): def test_infer_shape(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论