提交 39ee11ff authored 作者: Frederic's avatar Frederic

[CRASH] remove assert in opt and silence it as it is normal that it can't be applied.

上级 8da054b7
......@@ -1626,18 +1626,13 @@ def local_upcast_elemwise_constant_inputs(node):
if new_inputs != node.inputs:
rval = [node.op(*new_inputs)]
if rval[0].type != node.outputs[0].type:
print >> sys.stderr, "NODE:", node
print >> sys.stderr, "NODE INPUT TYPES:", [i.type for i
in node.inputs]
print >> sys.stderr, "NODE OUTPUT TYPES:", [
o.type for o in node.outputs]
print >> sys.stderr, "RVAL:", rval
print >> sys.stderr, "NEW INPUT TYPES:", [i.type for i
in new_inputs]
print >> sys.stderr, "RVAL INPUT TYPES:", [
i.type for i in rval[0].owner.inputs]
print >> sys.stderr, "RVAL TYPES:", [o.type for o in rval]
assert rval[0].type == node.outputs[0].type, (node, rval[0])
# This can happen for example when floatX=float32
# and we do the true division between and int64
# and a constant that will get typed as int8.
# As this is just to allow merging more case, if
# the upcast don't work, we can just skip it.
return
return rval
##################
......
......@@ -4204,6 +4204,15 @@ def test_local_upcast_elemwise_constant_inputs():
f = function([s], [tensor.grad(x, s)])
f([-42, -2.1, -1, -0.5, 0, 0.2, 1, 2, 12])
# This test a corner where the optimization should not be applied.
old = theano.config.floatX
theano.config.floatX = 'float32'
try:
v = lvector() / 2
function([v], theano.tensor.basic.true_div(v, 2))
finally:
theano.config.floatX = old
class TestShape_i(utt.InferShapeTester):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论