提交 cda18cf5 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #2728 from orhanf/register_local_remove_all_assert

Register local_remove_all_assert
......@@ -79,7 +79,7 @@ garbage collection will keep all intermediate results' memory space to allow to
reuse them during the next call to the same Theano function, if they are of the
correct shape. The shape could change if the shapes of the inputs change.
.. unsafe_optimization:
.. _unsafe_optimization:
Unsafe optimization
===================
......
......@@ -54,6 +54,7 @@ Optimization FAST_RUN FAST_COMPILE
:term:`elemwise fusion` x
:term:`GPU transfer` x
:term:`local_log_softmax` x x
:term:`local_remove_all_assert`
========================================================= ========= ============ =============
......@@ -258,3 +259,13 @@ Optimization FAST_RUN FAST_COMPILE
It can happen due to rounding errors that the softmax probability of one value gets to 0.
Taking the log of 0 would generate -inf that will probably generate NaN later.
We return a closer answer.
local_remove_all_assert
This is an unsafe optimization.
For the fastest possible Theano, this optimization can be enabled by
setting ``optimizer_including=local_remove_all_assert`` which will
remove all assertions in the graph for checking user inputs are valid.
Use this optimization if you are sure everthing is valid in your graph.
See :ref:`unsafe_optimization`
......@@ -97,6 +97,11 @@ register_opt(name='gpu_constant_folding')(
tensor.opt.constant_folding)
register_opt()(theano.tensor.opt.local_subtensor_make_vector)
# Register local_remove_all_assert as a global opt
gpu_optimizer.register('local_remove_all_assert',
theano.tensor.opt.local_remove_all_assert,
'unsafe')
# This is a partial list of CPU ops that can be in some circonstance
# moved to the GPU. This list is used by an optimization.
......
......@@ -56,6 +56,29 @@ def test_local_assert():
assert isinstance(a_op[0].inputs[0].type, CudaNdarrayType)
def test_local_remove_all_assert():
x = theano.tensor.fmatrix()
a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())
# By default `unsafe` should not be there
f = theano.function([x], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
# Put `unsafe`
f = theano.function([x], a, mode=mode_with_gpu.including('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 0
# Remove `unsafe`
f = theano.function([x], a, mode=mode_with_gpu.excluding('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
def test_int_pow():
a = CudaNdarrayType([False])()
......@@ -609,3 +632,4 @@ if __name__ == '__main__':
test_gpualloc()
test_opt_gpujoin_onlyajoin()
test_opt_gpujoin_joinvectors_elemwise_then_minusone()
......@@ -60,6 +60,10 @@ def register_opt(*tags, **kwargs):
register_opt('fast_compile')(theano.tensor.opt.local_track_shape_i)
gpu_optimizer.register('local_remove_all_assert',
theano.tensor.opt.local_remove_all_assert,
'unsafe')
def safe_to_gpu(x):
if isinstance(x.type, tensor.TensorType):
......
......@@ -25,6 +25,29 @@ def test_local_assert():
assert isinstance(a_op[0].inputs[0].type, GpuArrayType)
def test_local_remove_all_assert():
x = theano.tensor.fmatrix()
a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())
# By default `unsafe` should not be there
f = theano.function([x], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
# Put `unsafe`
f = theano.function([x], a, mode=mode_with_gpu.including('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 0
# Remove `unsafe`
f = theano.function([x], a, mode=mode_with_gpu.excluding('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
def test_flatten():
m = theano.tensor.fmatrix()
f = theano.function([m], m.flatten(), mode=mode_with_gpu)
......
......@@ -1670,12 +1670,15 @@ def local_remove_all_assert(node):
# Disabled by default
compile.optdb['canonicalize'].register('local_remove_all_assert',
local_remove_all_assert,
'unsafe',
use_db_name_as_tag=False)
compile.optdb['stabilize'].register('local_remove_all_assert',
local_remove_all_assert,
'unsafe',
use_db_name_as_tag=False)
compile.optdb['specialize'].register('local_remove_all_assert',
local_remove_all_assert,
'unsafe',
use_db_name_as_tag=False)
......
......@@ -3553,6 +3553,13 @@ class test_assert(utt.InferShapeTester):
assert len(topo) == 1, topo
assert topo[0].op == deep_copy_op, topo
mode = compile.mode.get_default_mode()
a = theano.tensor.opt.assert_op(x, T.eq(x, 0).any())
f = theano.function([x], a, mode=mode.excluding('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, T.opt.Assert)]
assert len(a_op) == 1
def test_infer_shape(self):
adscal = dscalar()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论