提交 1b5d4fc4 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #5070 from JesseLivezey/softmax_dnn_grad_opt_fix

fix softmaxgrad dnn opt error fixes #5056
......@@ -3110,8 +3110,9 @@ if True:
return
ins = []
for n in node.inputs:
if isinstance(n.owner.op, HostFromGpu):
n = n.owner.inputs[0]
if n.owner is not None:
if isinstance(n.owner.op, HostFromGpu):
n = n.owner.inputs[0]
if n.ndim != 2:
return
ins.append(n.dimshuffle(0, 'x', 1, 'x'))
......
......@@ -582,6 +582,19 @@ class test_DnnSoftMax(test_nnet.test_SoftMax):
if cuda.dnn.version() != (3000, 3000):
utt.verify_grad(softmax_op, [x_val2], mode=mode_with_gpu)
def test_local_softmax_dnn_grad(self):
"""
Check for optimization error when grad of summed
softmax is taken over tensor with fixed shape.
"""
x = T.fvector('x')
xp = x.reshape((5, 5))
y = T.nnet.softmax(xp.flatten()).sum()
g = T.grad(y, x)
f = theano.function(inputs=[x], outputs=g, mode=self.mode)
assert(any(n for n in f.maker.fgraph.toposort() if
isinstance(n.op, dnn.GpuDnnSoftmaxGrad)))
def test_cudnn_softmax_grad_opt(self):
# Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad optimization is
# applied when cudnn is required
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论