提交 391f859b authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron 提交者: Shawn Tan

Clean up the Alloc test since we have AdvancedIncSubtensor on the GPU now.

上级 e4c4c346
...@@ -2479,16 +2479,12 @@ class TestAlloc(unittest.TestCase): ...@@ -2479,16 +2479,12 @@ class TestAlloc(unittest.TestCase):
grad_derp = theano.grad(derp, some_vector) grad_derp = theano.grad(derp, some_vector)
fgrad = theano.function([some_vector], grad_derp, fgrad = theano.function([some_vector], grad_derp,
mode=self.mode) mode=self.mode)
topo_obj = fobj.maker.fgraph.toposort() topo_obj = fobj.maker.fgraph.toposort()
# <= is needed as the GPU currently don't implement
# AdvancedIncSubtensor. When this is the case it can be
# replaced with ==.
assert np.sum([isinstance(node.op, type(alloc_)) assert np.sum([isinstance(node.op, type(alloc_))
for node in topo_obj]) <= 1 for node in topo_obj]) == 0
topo_grad = fgrad.maker.fgraph.toposort()
# print subtensor topo_grad = fgrad.maker.fgraph.toposort()
# theano.printing.debugprint(fgrad)
assert np.sum([isinstance(node.op, type(alloc_)) assert np.sum([isinstance(node.op, type(alloc_))
for node in topo_grad]) == n_alloc, ( for node in topo_grad]) == n_alloc, (
alloc_, subtensor, n_alloc, topo_grad) alloc_, subtensor, n_alloc, topo_grad)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论