提交 461b7e02 authored 作者: Frederic's avatar Frederic

Make opt local_adv_sub1_adv_inc_sub1 add assert to don't loose error and…

Make opt local_adv_sub1_adv_inc_sub1 add assert to don't loose error and document how to get rid of them
上级 ad4c3a84
......@@ -72,13 +72,33 @@ and use directly the optimized graph from the pickled file.
Faster Theano function
----------------------
You can set the Theano flag `allow_gc` to `False` to get a speed-up by using
You can set the Theano flag ``allow_gc`` to ``False`` to get a speed-up by using
more memory. By default, Theano frees intermediate results when we don't need
them anymore. Doing so prevents us from reusing this memory. So disabling the
garbage collection will keep all intermediate results' memory space to allow to
reuse them during the next call to the same Theano function, if they are of the
correct shape. The shape could change if the shapes of the inputs change.
.. unsafe_optimization:
Unsafe optimization
===================
Some Theano optimizations make the assumption that the user inputs is
valid. What that mean is that if the user provide invalid values (like
not compatible shapes or indexing values that are out of bound) and
the optimizations is applied, the user error will get lost.Most of the
time, the assumption is that the user inputs are valid. So it is good
to have the optimization being applied. But loosing the error is bad.
The newest optimization in Theano with such assumption will add
assertion in the graph to keep the user error message. But computing
those assumption could take some time. If you are sure you all is valid
in your graph and want the fastest possible Theano, you can enable an
optimization that will remove those assertion:
``optimizer_including=local_remove_all_assert``
Faster Small Theano function
----------------------------
......
......@@ -2502,39 +2502,47 @@ def local_setsubtensor_of_constants(node):
return False
@register_canonicalize("rm_idx_err", "rm_shape_err")
@register_stabilize("rm_idx_err", "rm_shape_err")
@register_canonicalize
@register_stabilize
@gof.local_optimizer([AdvancedSubtensor1])
def local_adv_sub1_adv_inc_sub1(node):
""" Optimize the possible AdvSub1(AdvIncSub1(...), ...)
"""Optimize the possible AdvSub1(AdvIncSub1(...), ...)
AdvancedSubtensor1(AdvancedIncSubtensor1(0s, y, idx), idx) -> y
AdvancedSubtensor1(AdvancedSetSubtensor1(x, y, idx), idx) -> y
:note: This opt can remove index errors. We should assert that idx
values are in range and that x and y have compatible shapes.
:note: This opt add AssertOp. Otherwise, it would remove shape and
index error. If you want to get rid of them, see the
:ref:`unsafe_optimization` section.
:todo: add AssertOp to do not remove shape error.
"""
if not isinstance(node.op, AdvancedSubtensor1):
return
x = node.inputs[0]
if (not x.owner or
not isinstance(x.owner.op, AdvancedIncSubtensor1)):
inp = node.inputs[0]
if (not inp.owner or
not isinstance(inp.owner.op, AdvancedIncSubtensor1)):
return
idx = node.inputs[1]
idx2 = x.owner.inputs[2]
y = x.owner.inputs[1]
idx2 = inp.owner.inputs[2]
x = inp.owner.inputs[0]
y = inp.owner.inputs[1]
if idx is not idx2:
return
if (not x.owner.op.set_instead_of_inc and
T.extract_constant(x.owner.inputs[0]) != 0):
if (not inp.owner.op.set_instead_of_inc and
T.extract_constant(x) != 0):
return
cond = [T.all(T.and_(T.lt(idx, x.shape[0]),
T.ge(idx, -x.shape[0])))]
if not node.fgraph.shape_feature.same_shape(idx, y, 0, 0):
cond.append(T.eq(idx.shape[0], y.shape[0]))
y = Assert("Bad indexing or shapes in a AdvancedIncSubtensor1 that was optimized away")(y, *cond)
if y.dtype == node.outputs[0].dtype:
return [y]
# It is possible that y is upcast or downcast to x.dtype.
# In all case, as we set or add with 0, we can just cast y.
return [T.cast(y, x.dtype)]
return [T.cast(y, node.outputs[0].dtype)]
####################
# Rebroadcast opts #
......
......@@ -2421,7 +2421,8 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
def setUp(self):
utt.seed_rng()
mode = theano.compile.mode.get_default_mode()
self.mode = mode.including("local_adv_sub1_adv_inc_sub1")
self.mode = mode.including("local_adv_sub1_adv_inc_sub1").excluding("fusion")
self.mode_no_assert = self.mode.including("local_remove_all_assert")
def test0(self):
for dtype1, dtype2 in [("float32", "float32"),
......@@ -2439,7 +2440,7 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
# set_subtensor
inc = tensor.set_subtensor(x[idx], y)
o = inc[idx]
f = theano.function([x, y, idx], o, self.mode)
f = theano.function([x, y, idx], o, self.mode_no_assert)
res = f(dx, dy, didx)
assert numpy.allclose(dy, res)
......@@ -2453,7 +2454,7 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
# inc_subtensor(data[idx], y)
inc = tensor.inc_subtensor(x[idx], y)
o = inc[idx]
f = theano.function([x, y, idx], o, self.mode)
f = theano.function([x, y, idx], o, self.mode_no_assert)
res = f(dx, dy, didx)
assert numpy.allclose((dx[didx] + dy), res)
......@@ -2463,7 +2464,7 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
# inc_subtensor(0[idx], y)
inc = tensor.inc_subtensor(x.zeros_like()[idx], y)
o = inc[idx]
f = theano.function([x, y, idx], o, self.mode)
f = theano.function([x, y, idx], o, self.mode_no_assert)
res = f(dx, dy, didx)
assert numpy.allclose(dy, res)
......@@ -2474,6 +2475,25 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
else:
assert len(topo) > 2
def test_assert(self):
x = tensor.matrix("x")
y = tensor.matrix("y")
idx = tensor.ivector()
dx = numpy.random.rand(4, 5).astype(config.floatX)
dy = numpy.random.rand(2, 5).astype(config.floatX)
didx = numpy.asarray([1, 3], "int32")
# set_subtensor
inc = tensor.set_subtensor(x[idx], y)
o = inc[idx]
f = theano.function([x, y, idx], o, self.mode)
# test wrong index
for i in [dx.shape[0], -dx.shape[0] - 1]:
self.assertRaises(AssertionError, f, dx, dy, [i, i])
# test wrong shape
self.assertRaises(AssertionError, f, dx, dy, [1])
class Test_alloc_zero(unittest.TestCase):
def setUp(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论