提交 ea3dcffc authored 作者: Frederic's avatar Frederic

remove local_mul_to_neg as it doing a subset of local_mul_specialize

上级 1cf4a13c
......@@ -3385,33 +3385,6 @@ def local_sum_alloc(node):
pass
@gof.local_optimizer([T.mul])
def local_mul_to_neg(node):
"""
mul(-1, x) -> neg(x)
This is not done if we would add more nodes in the graph, like with:
mul(-1, x, y) -/-> neg(mul(x, y))
"""
if (node.op == T.mul and
local_mul_canonizer.get_constant(node.inputs[0]) == -1.0):
num = node.inputs[1:]
if len(num) == 1:
other_prod = num[0]
else:
# This would add extra node in the graph
return False
other_prod = local_mul_canonizer.main(*num)
if other_prod.type == node.outputs[0].type:
return [-other_prod]
# else the multiplication is also acting as a cast, so we
# might as well leave it alone. I don't think it's better to
# turn this into a negation in the wrong type, followed by an
# explicit cast.
register_specialize(local_mul_to_neg)
@register_specialize
@gof.local_optimizer([T.neg])
def local_neg_neg(node):
......
......@@ -4000,27 +4000,6 @@ def test_local_join_1():
assert f.maker.fgraph.outputs[0].dtype == config.floatX
def test_local_mul_to_neg():
"""
Test that a multiplication by -1 or -1.0 yields the appropriate data type
"""
a = T.imatrix()
f1 = theano.function([a], -1 * a)
f2 = theano.function([a], -1.0 * a)
aval = numpy.random.randint(0, 10, (2, 2)).astype('int32')
if config.cast_policy == 'custom':
assert f1(aval).dtype == a.dtype
assert f2(aval).dtype == 'float64'
elif config.cast_policy == 'numpy':
assert f1(aval).dtype == str(numpy.array(0).dtype)
assert f2(aval).dtype == 'float64'
elif config.cast_policy == 'numpy+floatX':
assert f1(aval).dtype == str(numpy.array(0).dtype)
assert f2(aval).dtype == config.floatX
else:
raise NotImplementedError(config.cast_policy)
def test_local_add_specialize():
# test of non-zero dimension
a = tensor.vector()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论