提交 9c15d602 authored 作者: Jesse Livezey's avatar Jesse Livezey

added test and fix

上级 421ca4bd
...@@ -3110,8 +3110,9 @@ if True: ...@@ -3110,8 +3110,9 @@ if True:
return return
ins = [] ins = []
for n in node.inputs: for n in node.inputs:
if isinstance(n.owner.op, HostFromGpu): if n.owner is not None:
n = n.owner.inputs[0] if isinstance(n.owner.op, HostFromGpu):
n = n.owner.inputs[0]
if n.ndim != 2: if n.ndim != 2:
return return
ins.append(n.dimshuffle(0, 'x', 1, 'x')) ins.append(n.dimshuffle(0, 'x', 1, 'x'))
......
...@@ -582,6 +582,17 @@ class test_DnnSoftMax(test_nnet.test_SoftMax): ...@@ -582,6 +582,17 @@ class test_DnnSoftMax(test_nnet.test_SoftMax):
if cuda.dnn.version() != (3000, 3000): if cuda.dnn.version() != (3000, 3000):
utt.verify_grad(softmax_op, [x_val2], mode=mode_with_gpu) utt.verify_grad(softmax_op, [x_val2], mode=mode_with_gpu)
def test_local_softmax_dnn_grad(self):
"""
Check for optimization error when grad of summed
softmax is taken over tensor with fixed shape.
"""
x = T.fvector('x')
xp = x.reshape((5, 5))
y = T.nnet.softmax(xp.flatten()).sum()
g = T.grad(y, x)
f = theano.function(inputs=[x], outputs=g, mode=self.mode)
def test_cudnn_softmax_grad_opt(self): def test_cudnn_softmax_grad_opt(self):
# Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad optimization is # Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad optimization is
# applied when cudnn is required # applied when cudnn is required
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论