提交 39ee11ff authored 作者: Frederic's avatar Frederic

[CRASH] remove assert in opt and silence it as it is normal that it can't be applied.

上级 8da054b7
...@@ -1626,18 +1626,13 @@ def local_upcast_elemwise_constant_inputs(node): ...@@ -1626,18 +1626,13 @@ def local_upcast_elemwise_constant_inputs(node):
if new_inputs != node.inputs: if new_inputs != node.inputs:
rval = [node.op(*new_inputs)] rval = [node.op(*new_inputs)]
if rval[0].type != node.outputs[0].type: if rval[0].type != node.outputs[0].type:
print >> sys.stderr, "NODE:", node # This can happen for example when floatX=float32
print >> sys.stderr, "NODE INPUT TYPES:", [i.type for i # and we do the true division between and int64
in node.inputs] # and a constant that will get typed as int8.
print >> sys.stderr, "NODE OUTPUT TYPES:", [
o.type for o in node.outputs] # As this is just to allow merging more case, if
print >> sys.stderr, "RVAL:", rval # the upcast don't work, we can just skip it.
print >> sys.stderr, "NEW INPUT TYPES:", [i.type for i return
in new_inputs]
print >> sys.stderr, "RVAL INPUT TYPES:", [
i.type for i in rval[0].owner.inputs]
print >> sys.stderr, "RVAL TYPES:", [o.type for o in rval]
assert rval[0].type == node.outputs[0].type, (node, rval[0])
return rval return rval
################## ##################
......
...@@ -4204,6 +4204,15 @@ def test_local_upcast_elemwise_constant_inputs(): ...@@ -4204,6 +4204,15 @@ def test_local_upcast_elemwise_constant_inputs():
f = function([s], [tensor.grad(x, s)]) f = function([s], [tensor.grad(x, s)])
f([-42, -2.1, -1, -0.5, 0, 0.2, 1, 2, 12]) f([-42, -2.1, -1, -0.5, 0, 0.2, 1, 2, 12])
# This test a corner where the optimization should not be applied.
old = theano.config.floatX
theano.config.floatX = 'float32'
try:
v = lvector() / 2
function([v], theano.tensor.basic.true_div(v, 2))
finally:
theano.config.floatX = old
class TestShape_i(utt.InferShapeTester): class TestShape_i(utt.InferShapeTester):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论