提交 63da6caa authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Merge pull request #4440 from vmichals/keep_stack_trace_fix_tensor/tests/test_opt

keep stack trace: fix optimizations or tests (tensor/tests/test_opt.py)
...@@ -3281,20 +3281,21 @@ def local_adv_sub1_adv_inc_sub1(node): ...@@ -3281,20 +3281,21 @@ def local_adv_sub1_adv_inc_sub1(node):
cond = [T.all(T.and_(T.lt(idx, x.shape[0]), T.ge(idx, -x.shape[0])))] cond = [T.all(T.and_(T.lt(idx, x.shape[0]), T.ge(idx, -x.shape[0])))]
if not node.fgraph.shape_feature.same_shape(idx, y, 0, 0): if not node.fgraph.shape_feature.same_shape(idx, y, 0, 0):
cond.append(T.eq(idx.shape[0], y.shape[0])) cond.append(T.eq(idx.shape[0], y.shape[0]))
y = Assert("Bad indexing or shapes in a AdvancedIncSubtensor1 " r = Assert("Bad indexing or shapes in a AdvancedIncSubtensor1 "
"that was optimized away")(y, *cond) "that was optimized away")(y, *cond)
copy_stack_trace(y, r)
if y.dtype == node.outputs[0].dtype: if r.dtype == node.outputs[0].dtype:
return [y] return [r]
# It is possible that y is upcast or downcast to x.dtype. # It is possible that y is upcast or downcast to x.dtype.
# In all case, as we set or add with 0, we can just cast y. # In all case, as we set or add with 0, we can just cast y.
r = T.cast(y, node.outputs[0].dtype) r2 = T.cast(r, node.outputs[0].dtype)
# Copy over stacktrace from before casting, since # Copy over stacktrace from before casting, since
# we don't expect problems in the casting operation, # we don't expect problems in the casting operation,
# and any problems in the indexing would have been spotted above. # and any problems in the indexing would have been spotted above.
copy_stack_trace(y, r) copy_stack_trace(r, r2)
return [r] return [r2]
@register_specialize @register_specialize
...@@ -4021,11 +4022,13 @@ def local_useless_split(node): ...@@ -4021,11 +4022,13 @@ def local_useless_split(node):
if node.op.len_splits == 1: if node.op.len_splits == 1:
x, axis, splits = node.inputs x, axis, splits = node.inputs
out = assert_op(x, T.eq(splits.shape[0], 1)) out = assert_op(x, T.eq(splits.shape[0], 1))
out = assert_op(out, T.eq(x.shape[axis], splits[0]))
# Copy over stacktrace from previous output node. # Copy over stacktrace from previous output node.
copy_stack_trace(node.outputs, out) copy_stack_trace(node.outputs, out)
return [out] out2 = assert_op(out, T.eq(x.shape[axis], splits[0]))
# Copy over stacktrace from previous output node.
copy_stack_trace(out, out2)
return [out2]
################ ################
......
...@@ -1946,23 +1946,20 @@ class test_local_subtensor_make_vector(unittest.TestCase): ...@@ -1946,23 +1946,20 @@ class test_local_subtensor_make_vector(unittest.TestCase):
r = f(0, 1, 2) r = f(0, 1, 2)
assert r[0] == 0 and r[1] == 2 assert r[0] == 0 and r[1] == 2
def test_stacktrace(self): def test_stack_trace(self):
x, y, z = tensor.lscalars('xyz') x, y, z = tensor.lscalars('xyz')
v = make_vector(x, y, z) v = make_vector(x, y, z)
# Compile function using only the 'local_subtensor_make_vector' optimization, mode = theano.compile.mode.get_default_mode().including(
# which requires us to add the 'canonicalize' phase. "local_subtensor_make_vector")
mode = theano.compile.mode.Mode(optimizer=None).including('canonicalize_db').including("local_subtensor_make_vector")
f = function([x, y, z], v[0], mode=mode)
# Compile function using all optimizations in fast_compile mode, # list of subtensor cases, where local_subtensor_make_vector
# including the 'local_subtensor_make_vector' optimization # inserts a new MakeVector node
mode = theano.compile.mode.get_mode('FAST_COMPILE').including("local_subtensor_make_vector") v_subtensors = [v[:2], v[::2], v[[0, 2]]]
f = function([x, y, z], v[0], mode=mode)
# The two cases in this test do not check the case where for v_subtensor in v_subtensors:
# local_subtensor_make_vector inserts a Subtensor node (See issue #4421) f = function([x, y, z], v_subtensor, mode=mode)
# self.assertTrue(check_stack_trace(f, ops_to_check='all')) self.assertTrue(check_stack_trace(f, ops_to_check='all'))
class test_local_subtensor_lift(unittest.TestCase): class test_local_subtensor_lift(unittest.TestCase):
...@@ -2787,32 +2784,23 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase): ...@@ -2787,32 +2784,23 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
self.assertRaises((AssertionError, ValueError), self.assertRaises((AssertionError, ValueError),
f, dx, dy, [1]) f, dx, dy, [1])
def test_stacktrace(self): def test_stack_trace(self):
x = tensor.matrix("x") x = tensor.matrix("x")
y = tensor.matrix("y") # test cases with y.dtype
# - equal to x.dtype
# - different from x.dtype (to trigger the cast in
# local_adv_sub1_adv_inc_sub1)
ys = [tensor.matrix("y"), tensor.dmatrix("y")]
idx = tensor.ivector() idx = tensor.ivector()
dx = numpy.random.rand(4, 5).astype(config.floatX) # set_subtensor and then subtensor with both ys
dy = numpy.random.rand(2, 5).astype(config.floatX) incs = [tensor.set_subtensor(x[idx], y) for y in ys]
didx = numpy.asarray([1, 3], "int32") outs = [inc[idx] for inc in incs]
# set_subtensor for y, out in zip(ys, outs):
inc = tensor.set_subtensor(x[idx], y) f = theano.function([x, y, idx], out, self.mode)
o = inc[idx] self.assertTrue(check_stack_trace(
# Compile function using only the 'local_subtensor_make_vector' optimization, f, ops_to_check=(Assert, scal.Cast)))
# which requires us to add the 'canonicalize' phase.
mode = theano.compile.mode.Mode(optimizer=None).including('canonicalize').including("local_adv_sub1_adv_inc_sub1")
f = theano.function([x, y, idx], o, self.mode)
# The opt only removes nodes in this case, no check_stack_trace needed
# Compile function using all optimizations in fast_compile mode,
# including the 'local_subtensor_make_vector' optimization
mode = theano.compile.mode.get_mode('FAST_COMPILE').including("local_adv_sub1_adv_inc_sub1")
f = theano.function([x, y, idx], o, self.mode)
# The opt only removes nodes in this case, no check_stack_trace needed
# See if there are use cases which add nodes and need check_stack_trace
# See issue #4421
class Test_alloc_zero(unittest.TestCase): class Test_alloc_zero(unittest.TestCase):
...@@ -4162,7 +4150,6 @@ class T_Tile(unittest.TestCase): ...@@ -4162,7 +4150,6 @@ class T_Tile(unittest.TestCase):
f(data) f(data)
# In this case the opt only removes nodes, # In this case the opt only removes nodes,
# no need to check_stack_trace # no need to check_stack_trace
# See issue #4421
def speed_local_pow_specialize_range(): def speed_local_pow_specialize_range():
...@@ -6055,9 +6042,7 @@ def test_local_useless_split(): ...@@ -6055,9 +6042,7 @@ def test_local_useless_split():
assert len(graph_nonopt)==1 assert len(graph_nonopt)==1
assert isinstance(graph_nonopt[0].op, tensor.Split) assert isinstance(graph_nonopt[0].op, tensor.Split)
# Check if there are use cases that are not covered here assert check_stack_trace(f_opt, ops_to_check=[Assert])
# and if the line below is necessary and correct (See issue #4421)
# assert check_stack_trace(f_opt, ops_to_check=[Assert])
assert check_stack_trace(f_nonopt, ops_to_check='all') assert check_stack_trace(f_nonopt, ops_to_check='all')
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论