提交 72ad300a authored 作者: Iulian Vlad Serban's avatar Iulian Vlad Serban

Implemented tests to check stacktrace is copied over for several opts.…

Implemented tests to check stacktrace is copied over for several opts. Implemented stacktrace copy over for more opts.
上级 97cf87dc
......@@ -2293,6 +2293,9 @@ def local_upcast_elemwise_constant_inputs(node):
# As this is just to allow merging more case, if
# the upcast don't work, we can just skip it.
return
# Copy over output stacktrace from before upcasting
copy_stack_trace(node.outputs[0], rval)
return rval
##################
......@@ -2345,7 +2348,10 @@ def local_useless_inc_subtensor(node):
for e in node.op.idx_list):
# They are the same shape, so we can remore this IncSubtensor
return [node.inputs[1]]
return [Subtensor(node.op.idx_list)(*node.inputs[1:])]
ret = Subtensor(node.op.idx_list)(*node.inputs[1:])
# Copy over previous output stacktrace
copy_stack_trace(node.outputs, ret)
return [ret]
@register_canonicalize
......@@ -2378,7 +2384,11 @@ def local_set_to_inc_subtensor(node):
if (subn.inputs[1] != node.inputs[2] or
subn.inputs[0] != node.inputs[0]):
return
return [advanced_inc_subtensor1(node.inputs[0], other, node.inputs[2])]
ret = advanced_inc_subtensor1(node.inputs[0], other, node.inputs[2])
# Copy over previous output stacktrace
# Julian: I'm not sure about this at all...
copy_stack_trace(node.outputs, ret)
return [ret]
@register_canonicalize
......@@ -2404,7 +2414,8 @@ def local_useless_slice(node):
sl_ins = Subtensor.collapse(slices[:last_slice],
lambda x: isinstance(x, T.Variable))
out = subtens(node.inputs[0], *sl_ins)
# Copy over previous output stacktrace
copy_stack_trace(node.outputs, out)
return [out]
......@@ -2522,6 +2533,8 @@ def local_useless_subtensor(node):
else:
return False
# We don't need to copy over any stacktrace here,
# because previous stacktrace should suffice.
return [node.inputs[0]]
......@@ -2546,7 +2559,13 @@ def local_subtensor_lift(node):
if isinstance(u.owner.op, T.Elemwise) and len(u.owner.inputs) == 1:
idx = node.inputs[1:]
x_idx = node.op(u.owner.inputs[0], *idx)
return [u.owner.op(x_idx)]
# Copy over previous output stacktrace
# Julian: Would it make more sense to copy stacktace before opt is applied, i.e. from u.owner.inputs[0]?
copy_stack_trace(node.outputs, x_idx)
ret = u.owner.op(x_idx)
# Copy over previous output stacktrace
copy_stack_trace(node.outputs, ret)
return []
if isinstance(u.owner.op, T.Elemwise):
new_inputs = []
......
......@@ -108,6 +108,9 @@ class test_dimshuffle_lift(unittest.TestCase):
self.assertTrue(str(g) == "[DimShuffle{1,0}(DimShuffle{1,0}(x))]")
dimshuffle_lift.optimize(g)
self.assertTrue(str(g) == "[x]")
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))
def test_merge2(self):
x, y, z = inputs()
......@@ -118,6 +121,8 @@ class test_dimshuffle_lift(unittest.TestCase):
str(g))
dimshuffle_lift.optimize(g)
self.assertTrue(str(g) == "[DimShuffle{0,1,x,x}(x)]", str(g))
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))
def test_elim3(self):
x, y, z = inputs()
......@@ -129,6 +134,8 @@ class test_dimshuffle_lift(unittest.TestCase):
str(g))
dimshuffle_lift.optimize(g)
self.assertTrue(str(g) == "[x]", str(g))
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))
def test_lift(self):
x, y, z = inputs([False] * 1, [False] * 2, [False] * 3)
......@@ -155,6 +162,9 @@ class test_dimshuffle_lift(unittest.TestCase):
dimshuffle_lift.optimize(g)
self.assertTrue(str(g) in (opt_str_g_inplace, opt_str_g_noinplace),
str(g))
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))
def test_recursive_lift(self):
v = T.vector(dtype="float64")
......@@ -169,6 +179,7 @@ class test_dimshuffle_lift(unittest.TestCase):
"(<TensorType(float64, matrix)>, "
"DimShuffle{x,x}(TensorConstant{84}))))]")
self.assertTrue(str(g) == init_str_g)
new_out = local_dimshuffle_lift.transform(g.outputs[0].owner)[0]
new_g = FunctionGraph(g.inputs, [new_out])
opt_str_g = ("[Elemwise{mul,no_inplace}(Elemwise{add,no_inplace}"
......@@ -178,6 +189,8 @@ class test_dimshuffle_lift(unittest.TestCase):
"(<TensorType(float64, matrix)>), "
"DimShuffle{x,x}(TensorConstant{84})))]")
self.assertTrue(str(new_g) == opt_str_g)
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(hasattr(new_g.outputs[0].tag, 'trace'))
def test_add_canonizer_problem0():
......@@ -1835,6 +1848,20 @@ class test_local_subtensor_make_vector(unittest.TestCase):
r = f(0, 1, 2)
assert r[0] == 0 and r[1] == 2
def test_stacktrace(self):
x, y, z = tensor.lscalars('xyz')
v = make_vector(x, y, z)
#mode = theano.compile.mode.get_default_mode().including("local_subtensor_make_vector")
mode = theano.compile.mode.get_mode('FAST_COMPILE').including("local_subtensor_make_vector")
f = function([x, y, z], v[0], mode=mode)
# TODO Pascal is there some way I can disable ALL optimizations except the 'local_subtensor_make_vector' opt?
# Right now there is some other optimization removing the stack trace
print ('Before optimization')
print (v[0].tag)
print ('After optimization')
print (f.outputs[0].tag)
# Check stacktrace was copied over correctly after opt was applied
#self.assertTrue(hasattr(f.outputs[0].tag, 'trace'))
class test_local_subtensor_lift(unittest.TestCase):
def test0(self):
......@@ -2881,6 +2908,11 @@ class Test_local_elemwise_alloc(unittest.TestCase):
if elem.op is not None]) == count
)
def _verify_stack_trace(self, f):
for output in f.outputs:
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(hasattr(output.tag, 'trace'))
def test_remove_alloc_wo_dimshuffle(self):
# No optimization on alloc
func = function(
......@@ -2890,6 +2922,7 @@ class Test_local_elemwise_alloc(unittest.TestCase):
)
self._verify_alloc_count(func, 1)
self._verify_assert_count(func, 0)
self._verify_stack_trace(func)
# Optimization on alloc with assert
func = function(
......@@ -3792,7 +3825,6 @@ class test_assert(utt.InferShapeTester):
self._compile_and_check([admat, adscal, bdscal], [out],
[admat_val, adscal_val, bdscal_val], Assert)
def test_local_mul_specialize():
mode = theano.config.mode
if mode == 'FAST_COMPILE':
......@@ -5751,6 +5783,8 @@ class Test_lift_transpose_through_dot(unittest.TestCase):
g = self.simple_optimize(FunctionGraph([a, b], [tensor.dot(a, b).T]))
sg = '[dot(DimShuffle{1,0}(b), DimShuffle{1,0}(a))]'
assert str(g) == sg, (str(g), sg)
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))
def test_row_matrix(self):
a = vector('a')
......@@ -5761,6 +5795,8 @@ class Test_lift_transpose_through_dot(unittest.TestCase):
level='stabilize')
sg = '[dot(DimShuffle{1,0}(b), DimShuffle{0,x}(a))]'
assert str(g) == sg, (str(g), sg)
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))
def test_matrix_col(self):
a = vector('a')
......@@ -5771,6 +5807,8 @@ class Test_lift_transpose_through_dot(unittest.TestCase):
level='stabilize')
sg = '[dot(DimShuffle{x,0}(a), DimShuffle{1,0}(b))]'
assert str(g) == sg, (str(g), sg)
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))
def test_local_upcast_elemwise_constant_inputs():
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论