提交 b75cf2e1 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Merge pull request #2823 from nouiz/faster_test

finish gh-2768
......@@ -717,7 +717,7 @@ def clone_get_equiv(inputs, outputs,
def general_toposort(r_out, deps, debug_print=False,
_deps=None, deps_cache=None):
compute_deps_cache=None, deps_cache=None):
"""WRITEME
:note:
......@@ -729,15 +729,24 @@ def general_toposort(r_out, deps, debug_print=False,
:note:
The order of the return value list is determined by the order of nodes returned by the deps() function.
:param deps: a python function that take a node as input and
return its dependence.
:param compute_deps_cache: Optional,
if provided deps_cache should also be provided. This is a
function like deps, but that also cache its results in a dict
passed as deps_cache.
:param deps_cache: a dict. Must be used with compute_deps_cache.
:note: deps should be provided or can be None and the caller
provide _deps and deps_cache. The second option remove a
Python function call, so is faster.
provide compute_deps_cache and deps_cache. The second option
remove a Python function call, and allow for more specialized
code, so it can be faster.
"""
if _deps is None:
if compute_deps_cache is None:
deps_cache = {}
def _deps(io):
def compute_deps_cache(io):
if io not in deps_cache:
d = deps(io)
if d:
......@@ -751,10 +760,12 @@ def general_toposort(r_out, deps, debug_print=False,
return d
else:
return deps_cache[io]
assert deps_cache is not None
assert isinstance(r_out, (tuple, list, deque))
reachable, clients = stack_search(deque(r_out), _deps, 'dfs', True)
reachable, clients = stack_search(deque(r_out), compute_deps_cache,
'dfs', True)
sources = deque([r for r in reachable if not deps_cache.get(r, None)])
rset = set()
......@@ -800,12 +811,12 @@ def io_toposort(inputs, outputs, orderings=None):
# We build 2 functions as a speed up
deps_cache = {}
deps = None
_deps = None
compute_deps = None
compute_deps_cache = None
if not orderings: # can be None or empty dict
# Specialized function that is faster when no ordering.
# Also include the cache in the function itself for speed up.
def _deps(obj):
def compute_deps_cache(obj):
if obj in deps_cache:
return deps_cache[io]
rval = []
......@@ -827,7 +838,7 @@ def io_toposort(inputs, outputs, orderings=None):
deps_cache[obj] = rval
return rval
else:
def deps(obj):
def compute_deps(obj):
rval = []
if obj not in iset:
if isinstance(obj, Variable):
......@@ -840,7 +851,8 @@ def io_toposort(inputs, outputs, orderings=None):
assert not orderings.get(obj, [])
return rval
topo = general_toposort(outputs, deps=deps, _deps=_deps,
topo = general_toposort(outputs, deps=compute_deps,
compute_deps_cache=compute_deps_cache,
deps_cache=deps_cache)
return [o for o in topo if isinstance(o, Apply)]
......
......@@ -452,195 +452,193 @@ class test_canonize(unittest.TestCase):
# We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode = compile.mode.get_default_mode()
try:
opt = gof.Query(["canonicalize"])
opt = opt.including('ShapeOpt')
opt = opt.excluding(
'local_elemwise_fusion')
mode = mode.__class__(linker=mode.linker, optimizer=opt)
# test x / x -> 1
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx, [fx], [fxv], 'float32'),
(dx/dx, [dx], [dxv], 'float64'),
(fv/fv, [fv], [fvv], 'float32'),
(dv/dv, [dv], [dvv], 'float64'),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert (out == numpy.ones(shp, dtype=out_dtype)).all()
topo = f.maker.fgraph.toposort()
if sym_inputs[0].broadcastable[0]:
assert len(topo) == 2
assert isinstance(topo[0].op, Shape_i)
assert isinstance(topo[1].op, tensor.Alloc)
else:
assert len(topo) == 3
assert isinstance(topo[0].op, Shape_i)
assert isinstance(topo[1].op, Shape_i)
assert isinstance(topo[2].op, tensor.Alloc)
assert(out_dtype == out.dtype)
# test (x * y) / x -> y
for id, (g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([
((dx*dy)/dx, [dx, dy], [dxv, dyv], 0, 'float64'),
((fx*fy)/fx, [fx, fy], [fxv, fyv], 0, 'float32'),
((dv*dy)/dv, [dv, dy], [dvv, dyv], 0, 'float64'),
((fv*fy)/fv, [fv, fy], [fvv, fyv], 0, 'float32'),
# must broadcast as their is a dimshuffle in the computation
((dx*dv)/dx, [dx, dv], [dxv, dvv], 1, 'float64'),
# topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]
((fx*fv)/fx, [fx, fv], [fxv, fvv], 1, 'float32')
# topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert(out_dtype == out.dtype)
assert numpy.allclose(out, val_inputs[1])
topo = f.maker.fgraph.toposort()
if topo and not(len(topo) == 1 and topo[0].op == deep_copy_op):
for node in topo[:-1]:
assert isinstance(node.op, Shape_i)
assert isinstance(topo[-1].op, tensor.Alloc)
# test x / y / x -> 1 / y
for id, (g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([
((dx/dy)/dx, [dx, dy], [dxv, dyv], 1, 'float64'),
((fx/fy)/fx, [fx, fy], [fxv, fyv], 1, 'float32'),
((dv/dy)/dv, [dv, dy], [dvv, dyv], 1, 'float64'),
((fv/fy)/fv, [fv, fy], [fvv, fyv], 1, 'float32'),
# must broadcast as their is a dimshuffle in the computation
((dx/dv)/dx, [dx, dv], [dxv, dvv], 1, 'float64'),
# topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]
((fx/fv)/fx, [fx, fv], [fxv, fvv], 1, 'float32'),
# topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, (1 / val_inputs[1]))
topo = f.maker.fgraph.toposort()
elem = [t for t in topo if isinstance(t.op, T.Elemwise)]
assert len(elem) == nb_elemwise
assert isinstance(elem[0].op, (T.Elemwise, ))
assert isinstance(elem[0].op.scalar_op, (
theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))
assert(out_dtype == out.dtype)
# test (a / b) * (b / c) * (c / d) -> a / d
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
((dx / dy) * (dy / dz) * (dz / dw), [dx, dy, dz, dw], [dxv, dyv, dzv, dwv], 'float64'),
((fx / fy) * (fy / fz) * (fz / fw), [fx, fy, fz, fw], [fxv, fyv, fzv, fwv], 'float32'),
((dv / dy) * (dy / dz) * (dz / dw), [dv, dy, dz, dw], [dvv, dyv, dzv, dwv], 'float64'),
((fv / fy) * (fy / fz) * (fz / fw), [fv, fy, fz, fw], [fvv, fyv, fzv, fwv], 'float32'),
((dx / dv) * (dv / dz) * (dz / dw), [dx, dv, dz, dw], [dxv, dvv, dzv, dwv], 'float64'),
((fx / fv) * (fv / fz) * (fz / fw), [fx, fv, fz, fw], [fxv, fvv, fzv, fwv], 'float32'),
((dx / dy) * (dy / dv) * (dv / dw), [dx, dy, dv, dw], [dxv, dyv, dvv, dwv], 'float64'),
((fx / fy) * (fy / fv) * (fv / fw), [fx, fy, fv, fw], [fxv, fyv, fvv, fwv], 'float32'),
((dx / dy) * (dy / dz) * (dz / dv), [dx, dy, dz, dv], [dxv, dyv, dzv, dvv], 'float64'),
((fx / fy) * (fy / fz) * (fz / fv), [fx, fy, fz, fv], [fxv, fyv, fzv, fvv], 'float32'),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.TrueDiv)
assert len(topo[0].inputs) == 2
assert(out_dtype == out.dtype)
# test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
(((2.0*dx)/(4.0*dy)), [dx, dy], [dxv, dyv], 'float64'),
(((2.0*fx)/(4.0*fy)), [fx, fy], [fxv, fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(((2.0*dv)/(4.0*dy)), [dv, dy], [dvv, dyv], 'float64'),
(((2.0*fv)/(4.0*fy)), [fv, fy], [fvv, fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(((2.0*dx)/(4.0*dv)), [dx, dv], [dxv, dvv], 'float64'),
(((2.0*fx)/(4.0*fv)), [fx, fv], [fxv, fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, (0.5 *
val_inputs[0] / val_inputs[1]))
topo = f.maker.fgraph.toposort()
opt = gof.Query(["canonicalize"])
opt = opt.including('ShapeOpt')
opt = opt.excluding(
'local_elemwise_fusion')
mode = mode.__class__(linker=mode.linker, optimizer=opt)
# test x / x -> 1
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx, [fx], [fxv], 'float32'),
(dx/dx, [dx], [dxv], 'float64'),
(fv/fv, [fv], [fvv], 'float32'),
(dv/dv, [dv], [dvv], 'float64'),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert (out == numpy.ones(shp, dtype=out_dtype)).all()
topo = f.maker.fgraph.toposort()
if sym_inputs[0].broadcastable[0]:
assert len(topo) == 2
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.Mul)
assert len(topo[0].inputs) == 2
assert isinstance(topo[1].op, (T.Elemwise, ))
assert isinstance(topo[1].op.scalar_op,
theano.scalar.basic.TrueDiv)
assert len(topo[1].inputs) == 2
assert(out_dtype == out.dtype)
# test 2 * x / 2 -> x
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
((2*dx)/2, [dx], [dxv], 'float64'),
((2*fx)/2, [fx], [fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2*dv)/2, [dv], [dvv], 'float64'),
((2*fv)/2, [fv], [fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, val_inputs[0])
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
topo[0].op == deep_copy_op
assert(out_dtype == out.dtype)
# test x / abs(x) -> sign(x)
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
(dx/abs(dx), [dx], [0.5-dxv], 'float64'),
(fx/abs(fx), [fx], [0.5-fxv], 'float32'),
(dx/abs(dx), [dx], [0.1*dxv], 'float64'),
(fx/abs(fx), [fx], [0.1*fxv], 'float32'),
(dv/abs(dv), [dv], [0.5-dvv], 'float64'),
(fv/abs(fv), [fv], [0.5-fvv], 'float32'),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.all(numpy.isfinite(out))
assert numpy.allclose(out, numpy.sign(val_inputs[0]))
assert(out_dtype == out.dtype)
assert len(f.maker.fgraph.toposort()) == 1
# test (2*x) / (3*abs(x)) -> sign(x)
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),
((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],
{'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),
((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],
{'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),
((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],
{'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
mode=mode)
topo = f.maker.fgraph.toposort()
out = f(*val_inputs)
assert numpy.all(numpy.isfinite(out))
assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)
assert(out_dtype == out.dtype)
finally:
pass
assert isinstance(topo[0].op, Shape_i)
assert isinstance(topo[1].op, tensor.Alloc)
else:
assert len(topo) == 3
assert isinstance(topo[0].op, Shape_i)
assert isinstance(topo[1].op, Shape_i)
assert isinstance(topo[2].op, tensor.Alloc)
assert(out_dtype == out.dtype)
# test (x * y) / x -> y
for id, (g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([
((dx*dy)/dx, [dx, dy], [dxv, dyv], 0, 'float64'),
((fx*fy)/fx, [fx, fy], [fxv, fyv], 0, 'float32'),
((dv*dy)/dv, [dv, dy], [dvv, dyv], 0, 'float64'),
((fv*fy)/fv, [fv, fy], [fvv, fyv], 0, 'float32'),
# must broadcast as their is a dimshuffle in the computation
((dx*dv)/dx, [dx, dv], [dxv, dvv], 1, 'float64'),
# topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]
((fx*fv)/fx, [fx, fv], [fxv, fvv], 1, 'float32')
# topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert(out_dtype == out.dtype)
assert numpy.allclose(out, val_inputs[1])
topo = f.maker.fgraph.toposort()
if topo and not(len(topo) == 1 and topo[0].op == deep_copy_op):
for node in topo[:-1]:
assert isinstance(node.op, Shape_i)
assert isinstance(topo[-1].op, tensor.Alloc)
# test x / y / x -> 1 / y
for id, (g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([
((dx/dy)/dx, [dx, dy], [dxv, dyv], 1, 'float64'),
((fx/fy)/fx, [fx, fy], [fxv, fyv], 1, 'float32'),
((dv/dy)/dv, [dv, dy], [dvv, dyv], 1, 'float64'),
((fv/fy)/fv, [fv, fy], [fvv, fyv], 1, 'float32'),
# must broadcast as their is a dimshuffle in the computation
((dx/dv)/dx, [dx, dv], [dxv, dvv], 1, 'float64'),
# topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]
((fx/fv)/fx, [fx, fv], [fxv, fvv], 1, 'float32'),
# topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, (1 / val_inputs[1]))
topo = f.maker.fgraph.toposort()
elem = [t for t in topo if isinstance(t.op, T.Elemwise)]
assert len(elem) == nb_elemwise
assert isinstance(elem[0].op, (T.Elemwise, ))
assert isinstance(elem[0].op.scalar_op, (
theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))
assert(out_dtype == out.dtype)
# test (a / b) * (b / c) * (c / d) -> a / d
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
((dx / dy) * (dy / dz) * (dz / dw), [dx, dy, dz, dw], [dxv, dyv, dzv, dwv], 'float64'),
((fx / fy) * (fy / fz) * (fz / fw), [fx, fy, fz, fw], [fxv, fyv, fzv, fwv], 'float32'),
((dv / dy) * (dy / dz) * (dz / dw), [dv, dy, dz, dw], [dvv, dyv, dzv, dwv], 'float64'),
((fv / fy) * (fy / fz) * (fz / fw), [fv, fy, fz, fw], [fvv, fyv, fzv, fwv], 'float32'),
((dx / dv) * (dv / dz) * (dz / dw), [dx, dv, dz, dw], [dxv, dvv, dzv, dwv], 'float64'),
((fx / fv) * (fv / fz) * (fz / fw), [fx, fv, fz, fw], [fxv, fvv, fzv, fwv], 'float32'),
((dx / dy) * (dy / dv) * (dv / dw), [dx, dy, dv, dw], [dxv, dyv, dvv, dwv], 'float64'),
((fx / fy) * (fy / fv) * (fv / fw), [fx, fy, fv, fw], [fxv, fyv, fvv, fwv], 'float32'),
((dx / dy) * (dy / dz) * (dz / dv), [dx, dy, dz, dv], [dxv, dyv, dzv, dvv], 'float64'),
((fx / fy) * (fy / fz) * (fz / fv), [fx, fy, fz, fv], [fxv, fyv, fzv, fvv], 'float32'),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.TrueDiv)
assert len(topo[0].inputs) == 2
assert(out_dtype == out.dtype)
# test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
(((2.0*dx)/(4.0*dy)), [dx, dy], [dxv, dyv], 'float64'),
(((2.0*fx)/(4.0*fy)), [fx, fy], [fxv, fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(((2.0*dv)/(4.0*dy)), [dv, dy], [dvv, dyv], 'float64'),
(((2.0*fv)/(4.0*fy)), [fv, fy], [fvv, fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(((2.0*dx)/(4.0*dv)), [dx, dv], [dxv, dvv], 'float64'),
(((2.0*fx)/(4.0*fv)), [fx, fv], [fxv, fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, (0.5 *
val_inputs[0] / val_inputs[1]))
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.Mul)
assert len(topo[0].inputs) == 2
assert isinstance(topo[1].op, (T.Elemwise, ))
assert isinstance(topo[1].op.scalar_op,
theano.scalar.basic.TrueDiv)
assert len(topo[1].inputs) == 2
assert(out_dtype == out.dtype)
# test 2 * x / 2 -> x
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
((2*dx)/2, [dx], [dxv], 'float64'),
((2*fx)/2, [fx], [fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2*dv)/2, [dv], [dvv], 'float64'),
((2*fv)/2, [fv], [fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, val_inputs[0])
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
topo[0].op == deep_copy_op
assert(out_dtype == out.dtype)
# test x / abs(x) -> sign(x)
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
(dx/abs(dx), [dx], [0.5-dxv], 'float64'),
(fx/abs(fx), [fx], [0.5-fxv], 'float32'),
(dx/abs(dx), [dx], [0.1*dxv], 'float64'),
(fx/abs(fx), [fx], [0.1*fxv], 'float32'),
(dv/abs(dv), [dv], [0.5-dvv], 'float64'),
(fv/abs(fv), [fv], [0.5-fvv], 'float32'),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.all(numpy.isfinite(out))
assert numpy.allclose(out, numpy.sign(val_inputs[0]))
assert(out_dtype == out.dtype)
assert len(f.maker.fgraph.toposort()) == 1
# test (2*x) / (3*abs(x)) -> sign(x)
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([
((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),
((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],
{'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),
((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],
{'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),
((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],
{'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
mode=mode)
topo = f.maker.fgraph.toposort()
out = f(*val_inputs)
assert numpy.all(numpy.isfinite(out))
assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)
assert(out_dtype == out.dtype)
def test_abs_mul_div(self):
"""
......@@ -701,50 +699,47 @@ class test_canonize(unittest.TestCase):
# We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode = compile.mode.get_default_mode()
try:
opt = gof.Query(["canonicalize"])
opt = opt.excluding(
'local_elemwise_fusion')
mode = mode.__class__(linker=mode.linker, optimizer=opt)
# test fail!
# test x / y / z -> x / (y * z)
for (g, sym_inputs, val_inputs, out_dtype) in [
((dx/dy)/dz, [dx, dy, dz], [dxv, dyv, dzv], 'float64'),
((fx/fy)/fz, [fx, fy, fz], [fxv, fyv, fzv], 'float32')
]:
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, val_inputs[0] /
val_inputs[1] / val_inputs[2])
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.Inv)
assert len(topo[0].inputs) == 1
assert(out_dtype == out.dtype)
# test x / (y / z) -> (x * z) / y
for (g, sym_inputs, val_inputs, out_dtype) in [
(dx/(dy/dz), [dx, dy, dz], [dxv, dyv, dzv], 'float64'),
(fx/(fy/fz), [fx, fy, fz], [fxv, fyv, fzv], 'float32')
]:
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, val_inputs[0] / (
val_inputs[1] / val_inputs[2]))
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.Inv)
assert len(topo[0].inputs) == 1
assert(out_dtype == out.dtype)
finally:
pass
opt = gof.Query(["canonicalize"])
opt = opt.excluding(
'local_elemwise_fusion')
mode = mode.__class__(linker=mode.linker, optimizer=opt)
# test fail!
# test x / y / z -> x / (y * z)
for (g, sym_inputs, val_inputs, out_dtype) in [
((dx/dy)/dz, [dx, dy, dz], [dxv, dyv, dzv], 'float64'),
((fx/fy)/fz, [fx, fy, fz], [fxv, fyv, fzv], 'float32')
]:
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, val_inputs[0] /
val_inputs[1] / val_inputs[2])
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.Inv)
assert len(topo[0].inputs) == 1
assert(out_dtype == out.dtype)
# test x / (y / z) -> (x * z) / y
for (g, sym_inputs, val_inputs, out_dtype) in [
(dx/(dy/dz), [dx, dy, dz], [dxv, dyv, dzv], 'float64'),
(fx/(fy/fz), [fx, fy, fz], [fxv, fyv, fzv], 'float32')
]:
f = compile.function(list(sym_inputs), g,
mode=mode)
out = f(*val_inputs)
assert numpy.allclose(out, val_inputs[0] / (
val_inputs[1] / val_inputs[2]))
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, (T.Elemwise, ))
assert isinstance(topo[0].op.scalar_op,
theano.scalar.basic.Inv)
assert len(topo[0].inputs) == 1
assert(out_dtype == out.dtype)
def test_dont_merge_if_multiple_client(self):
""" test those case take from the comment in Canonizer
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论