提交 1d954660 authored 作者: Frederic's avatar Frederic

Remove now useless try..except

上级 eace612d
......@@ -452,195 +452,193 @@ class test_canonize(unittest.TestCase):
# We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode = compile.mode.get_default_mode()
try:
opt = gof.Query(["canonicalize"])
opt = opt.including('ShapeOpt')
opt = opt.excluding(
'local_elemwise_fusion')
mode = mode.__class__(linker=mode.linker, optimizer=opt)
# test x / x -> 1
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx, [fx], [fxv], 'float32'),
(dx/dx, [dx], [dxv], 'float64'),
(fv/fv, [fv], [fvv], 'float32'),
(dv/dv, [dv], [dvv], 'float64'),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert (out == numpy.ones(shp, dtype=out_dtype)).all()
topo = f.maker.fgraph.toposort()
if sym_inputs[0].broadcastable[0]:
assert len(topo) == 2
assert isinstance(topo[0].op, Shape_i)
assert isinstance(topo[1].op, tensor.Alloc)
else:
assert len(topo) == 3
assert isinstance(topo[0].op, Shape_i)
assert isinstance(topo[1].op, Shape_i)
assert isinstance(topo[2].op, tensor.Alloc)
assert(out_dtype == out.dtype)
# test (x * y) / x -> y
for id, (g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([
((dx*dy)/dx, [dx, dy], [dxv, dyv], 0, 'float64'),
((fx*fy)/fx, [fx, fy], [fxv, fyv], 0, 'float32'),
((dv*dy)/dv, [dv, dy], [dvv, dyv], 0, 'float64'),
((fv*fy)/fv, [fv, fy], [fvv, fyv], 0, 'float32'),
# must broadcast as their is a dimshuffle in the computation
((dx*dv)/dx, [dx, dv], [dxv, dvv], 1, 'float64'),
# topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]
((fx*fv)/fx, [fx, fv], [fxv, fvv], 1, 'float32')
# topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert(out_dtype == out.dtype)
assert numpy.allclose(out, val_inputs[1])
topo = f.maker.fgraph.toposort()
if topo and not(len(topo) == 1 and topo[0].op == deep_copy_op):
for node in topo[:-1]:
assert isinstance(node.op, Shape_i)
assert isinstance(topo[-1].op, tensor.Alloc)
# test x / y / x -> 1 / y
for id, (g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([
((dx/dy)/dx, [dx, dy], [dxv, dyv], 1, 'float64'),
((fx/fy)/fx, [fx, fy], [fxv, fyv], 1, 'float32'),
((dv/dy)/dv, [dv, dy], [dvv, dyv], 1, 'float64'),
((fv/fy)/fv, [fv, fy], [fvv, fyv], 1, 'float32'),
# must broadcast as their is a dimshuffle in the computation
((dx/dv)/dx, [dx, dv], [dxv, dvv], 1, 'float64'),
# topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]
((fx/fv)/fx, [fx, fv], [fxv, fvv], 1, 'float32'),
# topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, (1 / val_inputs[1]))
topo = f.maker.fgraph.toposort()
elem = [t for t in topo if isinstance(t.op, T.Elemwise)]
assert len(elem) == nb_elemwise
assert isinstance(elem[0].op, (T.Elemwise, ))
assert isinstance(elem[0].op.scalar_op, (
theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))
assert(out_dtype == out.dtype)
# test (a / b) * (b / c) * (c / d) -> a / d
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
((dx / dy) * (dy / dz) * (dz / dw), [dx, dy, dz, dw], [dxv, dyv, dzv, dwv], 'float64'),
((fx / fy) * (fy / fz) * (fz / fw), [fx, fy, fz, fw], [fxv, fyv, fzv, fwv], 'float32'),
((dv / dy) * (dy / dz) * (dz / dw), [dv, dy, dz, dw], [dvv, dyv, dzv, dwv], 'float64'),
((fv / fy) * (fy / fz) * (fz / fw), [fv, fy, fz, fw], [fvv, fyv, fzv, fwv], 'float32'),
((dx / dv) * (dv / dz) * (dz / dw), [dx, dv, dz, dw], [dxv, dvv, dzv, dwv], 'float64'),
((fx / fv) * (fv / fz) * (fz / fw), [fx, fv, fz, fw], [fxv, fvv, fzv, fwv], 'float32'),
((dx / dy) * (dy / dv) * (dv / dw), [dx, dy, dv, dw], [dxv, dyv, dvv, dwv], 'float64'),
((fx / fy) * (fy / fv) * (fv / fw), [fx, fy, fv, fw], [fxv, fyv, fvv, fwv], 'float32'),
((dx / dy) * (dy / dz) * (dz / dv), [dx, dy, dz, dv], [dxv, dyv, dzv, dvv], 'float64'),
((fx / fy) * (fy / fz) * (fz / fv), [fx, fy, fz, fv], [fxv, fyv, fzv, fvv], 'float32'),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.TrueDiv)
assert len(topo[0].inputs) == 2
assert(out_dtype == out.dtype)
# test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
(((2.0*dx)/(4.0*dy)), [dx, dy], [dxv, dyv], 'float64'),
(((2.0*fx)/(4.0*fy)), [fx, fy], [fxv, fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(((2.0*dv)/(4.0*dy)), [dv, dy], [dvv, dyv], 'float64'),
(((2.0*fv)/(4.0*fy)), [fv, fy], [fvv, fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(((2.0*dx)/(4.0*dv)), [dx, dv], [dxv, dvv], 'float64'),
(((2.0*fx)/(4.0*fv)), [fx, fv], [fxv, fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, (0.5 *
val_inputs[0] / val_inputs[1]))
topo = f.maker.fgraph.toposort()
opt = gof.Query(["canonicalize"])
opt = opt.including('ShapeOpt')
opt = opt.excluding(
'local_elemwise_fusion')
mode = mode.__class__(linker=mode.linker, optimizer=opt)
# test x / x -> 1
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx, [fx], [fxv], 'float32'),
(dx/dx, [dx], [dxv], 'float64'),
(fv/fv, [fv], [fvv], 'float32'),
(dv/dv, [dv], [dvv], 'float64'),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert (out == numpy.ones(shp, dtype=out_dtype)).all()
topo = f.maker.fgraph.toposort()
if sym_inputs[0].broadcastable[0]:
assert len(topo) == 2
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.Mul)
assert len(topo[0].inputs) == 2
assert isinstance(topo[1].op, (T.Elemwise, ))
assert isinstance(topo[1].op.scalar_op,
theano.scalar.basic.TrueDiv)
assert len(topo[1].inputs) == 2
assert(out_dtype == out.dtype)
# test 2 * x / 2 -> x
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
((2*dx)/2, [dx], [dxv], 'float64'),
((2*fx)/2, [fx], [fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2*dv)/2, [dv], [dvv], 'float64'),
((2*fv)/2, [fv], [fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, val_inputs[0])
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
topo[0].op == deep_copy_op
assert(out_dtype == out.dtype)
# test x / abs(x) -> sign(x)
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
(dx/abs(dx), [dx], [0.5-dxv], 'float64'),
(fx/abs(fx), [fx], [0.5-fxv], 'float32'),
(dx/abs(dx), [dx], [0.1*dxv], 'float64'),
(fx/abs(fx), [fx], [0.1*fxv], 'float32'),
(dv/abs(dv), [dv], [0.5-dvv], 'float64'),
(fv/abs(fv), [fv], [0.5-fvv], 'float32'),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.all(numpy.isfinite(out))
assert numpy.allclose(out, numpy.sign(val_inputs[0]))
assert(out_dtype == out.dtype)
assert len(f.maker.fgraph.toposort()) == 1
# test (2*x) / (3*abs(x)) -> sign(x)
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),
((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],
{'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),
((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],
{'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),
((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],
{'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
mode=mode)
topo = f.maker.fgraph.toposort()
out = f(*val_inputs)
assert numpy.all(numpy.isfinite(out))
assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)
assert(out_dtype == out.dtype)
finally:
pass
assert isinstance(topo[0].op, Shape_i)
assert isinstance(topo[1].op, tensor.Alloc)
else:
assert len(topo) == 3
assert isinstance(topo[0].op, Shape_i)
assert isinstance(topo[1].op, Shape_i)
assert isinstance(topo[2].op, tensor.Alloc)
assert(out_dtype == out.dtype)
# test (x * y) / x -> y
for id, (g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([
((dx*dy)/dx, [dx, dy], [dxv, dyv], 0, 'float64'),
((fx*fy)/fx, [fx, fy], [fxv, fyv], 0, 'float32'),
((dv*dy)/dv, [dv, dy], [dvv, dyv], 0, 'float64'),
((fv*fy)/fv, [fv, fy], [fvv, fyv], 0, 'float32'),
# must broadcast as their is a dimshuffle in the computation
((dx*dv)/dx, [dx, dv], [dxv, dvv], 1, 'float64'),
# topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]
((fx*fv)/fx, [fx, fv], [fxv, fvv], 1, 'float32')
# topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert(out_dtype == out.dtype)
assert numpy.allclose(out, val_inputs[1])
topo = f.maker.fgraph.toposort()
if topo and not(len(topo) == 1 and topo[0].op == deep_copy_op):
for node in topo[:-1]:
assert isinstance(node.op, Shape_i)
assert isinstance(topo[-1].op, tensor.Alloc)
# test x / y / x -> 1 / y
for id, (g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([
((dx/dy)/dx, [dx, dy], [dxv, dyv], 1, 'float64'),
((fx/fy)/fx, [fx, fy], [fxv, fyv], 1, 'float32'),
((dv/dy)/dv, [dv, dy], [dvv, dyv], 1, 'float64'),
((fv/fy)/fv, [fv, fy], [fvv, fyv], 1, 'float32'),
# must broadcast as their is a dimshuffle in the computation
((dx/dv)/dx, [dx, dv], [dxv, dvv], 1, 'float64'),
# topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]
((fx/fv)/fx, [fx, fv], [fxv, fvv], 1, 'float32'),
# topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, (1 / val_inputs[1]))
topo = f.maker.fgraph.toposort()
elem = [t for t in topo if isinstance(t.op, T.Elemwise)]
assert len(elem) == nb_elemwise
assert isinstance(elem[0].op, (T.Elemwise, ))
assert isinstance(elem[0].op.scalar_op, (
theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))
assert(out_dtype == out.dtype)
# test (a / b) * (b / c) * (c / d) -> a / d
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
((dx / dy) * (dy / dz) * (dz / dw), [dx, dy, dz, dw], [dxv, dyv, dzv, dwv], 'float64'),
((fx / fy) * (fy / fz) * (fz / fw), [fx, fy, fz, fw], [fxv, fyv, fzv, fwv], 'float32'),
((dv / dy) * (dy / dz) * (dz / dw), [dv, dy, dz, dw], [dvv, dyv, dzv, dwv], 'float64'),
((fv / fy) * (fy / fz) * (fz / fw), [fv, fy, fz, fw], [fvv, fyv, fzv, fwv], 'float32'),
((dx / dv) * (dv / dz) * (dz / dw), [dx, dv, dz, dw], [dxv, dvv, dzv, dwv], 'float64'),
((fx / fv) * (fv / fz) * (fz / fw), [fx, fv, fz, fw], [fxv, fvv, fzv, fwv], 'float32'),
((dx / dy) * (dy / dv) * (dv / dw), [dx, dy, dv, dw], [dxv, dyv, dvv, dwv], 'float64'),
((fx / fy) * (fy / fv) * (fv / fw), [fx, fy, fv, fw], [fxv, fyv, fvv, fwv], 'float32'),
((dx / dy) * (dy / dz) * (dz / dv), [dx, dy, dz, dv], [dxv, dyv, dzv, dvv], 'float64'),
((fx / fy) * (fy / fz) * (fz / fv), [fx, fy, fz, fv], [fxv, fyv, fzv, fvv], 'float32'),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.TrueDiv)
assert len(topo[0].inputs) == 2
assert(out_dtype == out.dtype)
# test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
(((2.0*dx)/(4.0*dy)), [dx, dy], [dxv, dyv], 'float64'),
(((2.0*fx)/(4.0*fy)), [fx, fy], [fxv, fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(((2.0*dv)/(4.0*dy)), [dv, dy], [dvv, dyv], 'float64'),
(((2.0*fv)/(4.0*fy)), [fv, fy], [fvv, fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(((2.0*dx)/(4.0*dv)), [dx, dv], [dxv, dvv], 'float64'),
(((2.0*fx)/(4.0*fv)), [fx, fv], [fxv, fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, (0.5 *
val_inputs[0] / val_inputs[1]))
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.Mul)
assert len(topo[0].inputs) == 2
assert isinstance(topo[1].op, (T.Elemwise, ))
assert isinstance(topo[1].op.scalar_op,
theano.scalar.basic.TrueDiv)
assert len(topo[1].inputs) == 2
assert(out_dtype == out.dtype)
# test 2 * x / 2 -> x
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
((2*dx)/2, [dx], [dxv], 'float64'),
((2*fx)/2, [fx], [fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2*dv)/2, [dv], [dvv], 'float64'),
((2*fv)/2, [fv], [fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, val_inputs[0])
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
topo[0].op == deep_copy_op
assert(out_dtype == out.dtype)
# test x / abs(x) -> sign(x)
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
(dx/abs(dx), [dx], [0.5-dxv], 'float64'),
(fx/abs(fx), [fx], [0.5-fxv], 'float32'),
(dx/abs(dx), [dx], [0.1*dxv], 'float64'),
(fx/abs(fx), [fx], [0.1*fxv], 'float32'),
(dv/abs(dv), [dv], [0.5-dvv], 'float64'),
(fv/abs(fv), [fv], [0.5-fvv], 'float32'),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.all(numpy.isfinite(out))
assert numpy.allclose(out, numpy.sign(val_inputs[0]))
assert(out_dtype == out.dtype)
assert len(f.maker.fgraph.toposort()) == 1
# test (2*x) / (3*abs(x)) -> sign(x)
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),
((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],
{'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),
((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],
{'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),
((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],
{'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
mode=mode)
topo = f.maker.fgraph.toposort()
out = f(*val_inputs)
assert numpy.all(numpy.isfinite(out))
assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)
assert(out_dtype == out.dtype)
def test_abs_mul_div(self):
"""
......@@ -701,50 +699,47 @@ class test_canonize(unittest.TestCase):
# We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode = compile.mode.get_default_mode()
try:
opt = gof.Query(["canonicalize"])
opt = opt.excluding(
'local_elemwise_fusion')
mode = mode.__class__(linker=mode.linker, optimizer=opt)
# test fail!
# test x / y / z -> x / (y * z)
for (g, sym_inputs, val_inputs, out_dtype) in [
((dx/dy)/dz, [dx, dy, dz], [dxv, dyv, dzv], 'float64'),
((fx/fy)/fz, [fx, fy, fz], [fxv, fyv, fzv], 'float32')
]:
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, val_inputs[0] /
val_inputs[1] / val_inputs[2])
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.Inv)
assert len(topo[0].inputs) == 1
assert(out_dtype == out.dtype)
# test x / (y / z) -> (x * z) / y
for (g, sym_inputs, val_inputs, out_dtype) in [
(dx/(dy/dz), [dx, dy, dz], [dxv, dyv, dzv], 'float64'),
(fx/(fy/fz), [fx, fy, fz], [fxv, fyv, fzv], 'float32')
]:
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, val_inputs[0] / (
val_inputs[1] / val_inputs[2]))
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.Inv)
assert len(topo[0].inputs) == 1
assert(out_dtype == out.dtype)
finally:
pass
opt = gof.Query(["canonicalize"])
opt = opt.excluding(
'local_elemwise_fusion')
mode = mode.__class__(linker=mode.linker, optimizer=opt)
# test fail!
# test x / y / z -> x / (y * z)
for (g, sym_inputs, val_inputs, out_dtype) in [
((dx/dy)/dz, [dx, dy, dz], [dxv, dyv, dzv], 'float64'),
((fx/fy)/fz, [fx, fy, fz], [fxv, fyv, fzv], 'float32')
]:
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, val_inputs[0] /
val_inputs[1] / val_inputs[2])
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.Inv)
assert len(topo[0].inputs) == 1
assert(out_dtype == out.dtype)
# test x / (y / z) -> (x * z) / y
for (g, sym_inputs, val_inputs, out_dtype) in [
(dx/(dy/dz), [dx, dy, dz], [dxv, dyv, dzv], 'float64'),
(fx/(fy/fz), [fx, fy, fz], [fxv, fyv, fzv], 'float32')
]:
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, val_inputs[0] / (
val_inputs[1] / val_inputs[2]))
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.Inv)
assert len(topo[0].inputs) == 1
assert(out_dtype == out.dtype)
def test_dont_merge_if_multiple_client(self):
""" test those case take from the comment in Canonizer
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论