提交 63da6caa authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Merge pull request #4440 from vmichals/keep_stack_trace_fix_tensor/tests/test_opt

keep stack trace: fix optimizations or tests (tensor/tests/test_opt.py)
......@@ -3281,20 +3281,21 @@ def local_adv_sub1_adv_inc_sub1(node):
cond = [T.all(T.and_(T.lt(idx, x.shape[0]), T.ge(idx, -x.shape[0])))]
if not node.fgraph.shape_feature.same_shape(idx, y, 0, 0):
cond.append(T.eq(idx.shape[0], y.shape[0]))
y = Assert("Bad indexing or shapes in a AdvancedIncSubtensor1 "
r = Assert("Bad indexing or shapes in a AdvancedIncSubtensor1 "
"that was optimized away")(y, *cond)
copy_stack_trace(y, r)
if y.dtype == node.outputs[0].dtype:
return [y]
if r.dtype == node.outputs[0].dtype:
return [r]
# It is possible that y is upcast or downcast to x.dtype.
# In all case, as we set or add with 0, we can just cast y.
r = T.cast(y, node.outputs[0].dtype)
r2 = T.cast(r, node.outputs[0].dtype)
# Copy over stacktrace from before casting, since
# we don't expect problems in the casting operation,
# and any problems in the indexing would have been spotted above.
copy_stack_trace(y, r)
return [r]
copy_stack_trace(r, r2)
return [r2]
@register_specialize
......@@ -4021,11 +4022,13 @@ def local_useless_split(node):
if node.op.len_splits == 1:
x, axis, splits = node.inputs
out = assert_op(x, T.eq(splits.shape[0], 1))
out = assert_op(out, T.eq(x.shape[axis], splits[0]))
# Copy over stacktrace from previous output node.
copy_stack_trace(node.outputs, out)
return [out]
out2 = assert_op(out, T.eq(x.shape[axis], splits[0]))
# Copy over stacktrace from previous output node.
copy_stack_trace(out, out2)
return [out2]
################
......
......@@ -1946,24 +1946,21 @@ class test_local_subtensor_make_vector(unittest.TestCase):
r = f(0, 1, 2)
assert r[0] == 0 and r[1] == 2
def test_stacktrace(self):
def test_stack_trace(self):
x, y, z = tensor.lscalars('xyz')
v = make_vector(x, y, z)
# Compile function using only the 'local_subtensor_make_vector' optimization,
# which requires us to add the 'canonicalize' phase.
mode = theano.compile.mode.Mode(optimizer=None).including('canonicalize_db').including("local_subtensor_make_vector")
f = function([x, y, z], v[0], mode=mode)
mode = theano.compile.mode.get_default_mode().including(
"local_subtensor_make_vector")
# Compile function using all optimizations in fast_compile mode,
# including the 'local_subtensor_make_vector' optimization
mode = theano.compile.mode.get_mode('FAST_COMPILE').including("local_subtensor_make_vector")
f = function([x, y, z], v[0], mode=mode)
# list of subtensor cases, where local_subtensor_make_vector
# inserts a new MakeVector node
v_subtensors = [v[:2], v[::2], v[[0, 2]]]
for v_subtensor in v_subtensors:
f = function([x, y, z], v_subtensor, mode=mode)
self.assertTrue(check_stack_trace(f, ops_to_check='all'))
# The two cases in this test do not check the case where
# local_subtensor_make_vector inserts a Subtensor node (See issue #4421)
# self.assertTrue(check_stack_trace(f, ops_to_check='all'))
class test_local_subtensor_lift(unittest.TestCase):
def test0(self):
......@@ -2787,32 +2784,23 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
self.assertRaises((AssertionError, ValueError),
f, dx, dy, [1])
def test_stacktrace(self):
def test_stack_trace(self):
x = tensor.matrix("x")
y = tensor.matrix("y")
# test cases with y.dtype
# - equal to x.dtype
# - different from x.dtype (to trigger the cast in
# local_adv_sub1_adv_inc_sub1)
ys = [tensor.matrix("y"), tensor.dmatrix("y")]
idx = tensor.ivector()
dx = numpy.random.rand(4, 5).astype(config.floatX)
dy = numpy.random.rand(2, 5).astype(config.floatX)
didx = numpy.asarray([1, 3], "int32")
# set_subtensor
inc = tensor.set_subtensor(x[idx], y)
o = inc[idx]
# Compile function using only the 'local_subtensor_make_vector' optimization,
# which requires us to add the 'canonicalize' phase.
mode = theano.compile.mode.Mode(optimizer=None).including('canonicalize').including("local_adv_sub1_adv_inc_sub1")
f = theano.function([x, y, idx], o, self.mode)
# The opt only removes nodes in this case, no check_stack_trace needed
# set_subtensor and then subtensor with both ys
incs = [tensor.set_subtensor(x[idx], y) for y in ys]
outs = [inc[idx] for inc in incs]
# Compile function using all optimizations in fast_compile mode,
# including the 'local_subtensor_make_vector' optimization
mode = theano.compile.mode.get_mode('FAST_COMPILE').including("local_adv_sub1_adv_inc_sub1")
f = theano.function([x, y, idx], o, self.mode)
# The opt only removes nodes in this case, no check_stack_trace needed
# See if there are use cases which add nodes and need check_stack_trace
# See issue #4421
for y, out in zip(ys, outs):
f = theano.function([x, y, idx], out, self.mode)
self.assertTrue(check_stack_trace(
f, ops_to_check=(Assert, scal.Cast)))
class Test_alloc_zero(unittest.TestCase):
......@@ -3020,7 +3008,7 @@ def test_local_IncSubtensor_serialize():
assert check_stack_trace(f, ops_to_check=[
tensor.IncSubtensor, tensor.AdvancedIncSubtensor,
tensor.AdvancedIncSubtensor1])
def test_local_set_to_inc_subtensor():
v = theano.tensor.fmatrix()
s = v[[2, 1]]
......@@ -3053,8 +3041,8 @@ def test_local_set_to_inc_subtensor():
# before and after optimization.
assert check_stack_trace(f1, ops_to_check=tensor.AdvancedIncSubtensor1)
assert check_stack_trace(f2, ops_to_check='all')
def test_local_subtensor_of_dot():
m1 = theano.tensor.matrix()
m2 = theano.tensor.matrix()
......@@ -3724,11 +3712,11 @@ class Test_local_useless_inc_subtensor_alloc(unittest.TestCase):
r2 = f2(x_value, i_value, y_value)
utt.assert_allclose(r1, r2)
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f1, ops_to_check=tensor.AdvancedIncSubtensor))
self.assertTrue(check_stack_trace(f2, ops_to_check=tensor.AdvancedIncSubtensor))
def test_advanced_inc_subtensor1(self):
if tensor.inplace_increment is None:
......@@ -3758,7 +3746,7 @@ class Test_local_useless_inc_subtensor_alloc(unittest.TestCase):
r2 = f2(x_value, i_value, y_value)
utt.assert_allclose(r1, r2)
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(
f1, ops_to_check=tensor.AdvancedIncSubtensor1))
......@@ -3789,7 +3777,7 @@ class Test_local_useless_inc_subtensor_alloc(unittest.TestCase):
r2 = f2(x_value, i_value, y_value)
utt.assert_allclose(r1, r2)
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f1, ops_to_check='last'))
self.assertTrue(check_stack_trace(f2, ops_to_check='last'))
......@@ -4162,7 +4150,6 @@ class T_Tile(unittest.TestCase):
f(data)
# In this case the opt only removes nodes,
# no need to check_stack_trace
# See issue #4421
def speed_local_pow_specialize_range():
......@@ -6055,9 +6042,7 @@ def test_local_useless_split():
assert len(graph_nonopt)==1
assert isinstance(graph_nonopt[0].op, tensor.Split)
# Check if there are use cases that are not covered here
# and if the line below is necessary and correct (See issue #4421)
# assert check_stack_trace(f_opt, ops_to_check=[Assert])
assert check_stack_trace(f_opt, ops_to_check=[Assert])
assert check_stack_trace(f_nonopt, ops_to_check='all')
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论