提交 8a3961b5 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #2565 from lamblin/fix_grad_none

Correctly handle gradient of identity on int
...@@ -1856,7 +1856,7 @@ class Identity(UnaryScalarOp): ...@@ -1856,7 +1856,7 @@ class Identity(UnaryScalarOp):
if x.type in continuous_types: if x.type in continuous_types:
return gz, return gz,
else: else:
return None, return x.zeros_like(dtype=theano.config.floatX),
identity = Identity(same_out, name='identity') identity = Identity(same_out, name='identity')
......
...@@ -438,6 +438,16 @@ def test_grad_switch(): ...@@ -438,6 +438,16 @@ def test_grad_switch():
theano.gradient.grad(l, x) theano.gradient.grad(l, x)
def test_grad_identity():
# Check that the grad method of Identity correctly handles int dytpes
x = theano.tensor.imatrix('x')
# tensor_copy is Elemwise{Identity}
y = theano.tensor.tensor_copy(x)
l = y.sum(dtype=theano.config.floatX)
theano.gradient.grad(l, x)
# Testing of Composite is done in tensor/tests/test_opt.py # Testing of Composite is done in tensor/tests/test_opt.py
# in test_fusion, TestCompositeCodegen # in test_fusion, TestCompositeCodegen
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论