提交 347988d8 authored 作者: abergeron's avatar abergeron

Merge pull request #2477 from nouiz/test_crash

Fix tests, crash and clean up
......@@ -343,9 +343,16 @@ can be achieved as follows:
import theano
# This is the current suggested detect_nan implementation to
# show you how it work. That way, you can modify it for your
# need. If you want exactly this method, you can use
# ``theano.compile.monitormode.detect_nan`` that will always
# contain the current suggested version.
def detect_nan(i, node, fn):
for output in fn.outputs:
if numpy.isnan(output[0]).any():
if (not isinstance(numpy.random.RandomState, output[0]) and
numpy.isnan(output[0]).any()):
print '*** NaN detected ***'
theano.printing.debugprint(node)
print 'Inputs : %s' % [input[0] for input in fn.inputs]
......
......@@ -2226,17 +2226,14 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
# optimize the fgraph
compute_test_value_orig = theano.config.compute_test_value
add_stack_trace_on_call = gof.Op.add_stack_trace_on_call
try:
theano.config.compute_test_value = theano.config.compute_test_value_opt
gof.Op.add_stack_trace_on_call = False # Should it be 0 == i?
optimizer(fgraph)
theano.compile.function_module.insert_deepcopy(fgraph, inputs,
outputs + additional_outputs)
finally:
theano.config.compute_test_value = compute_test_value_orig
gof.Op.add_stack_trace_on_call = add_stack_trace_on_call
if i:
li = fgraph.equivalence_tracker.event_list
......
......@@ -1235,13 +1235,11 @@ class FunctionMaker(object):
optimizer, linker = mode.optimizer, copy.copy(mode.linker)
if need_opt:
compute_test_value_orig = theano.config.compute_test_value
add_stack_trace_on_call_orig = gof.Op.add_stack_trace_on_call
limit_orig = theano.config.traceback.limit
# Why we add stack on node when it get done in output var?
try:
# optimize the fgraph
theano.config.compute_test_value = theano.config.compute_test_value_opt
gof.Op.add_stack_trace_on_call = False
theano.config.traceback.limit = 0
start_optimizer = time.time()
......@@ -1264,7 +1262,6 @@ class FunctionMaker(object):
insert_deepcopy(fgraph, inputs, outputs + additional_outputs)
finally:
theano.config.compute_test_value = compute_test_value_orig
gof.Op.add_stack_trace_on_call = add_stack_trace_on_call_orig
theano.config.traceback.limit = limit_orig
# initialize the linker
......@@ -1411,15 +1408,12 @@ class FunctionMaker(object):
# Get a function instance
start_linker = time.time()
start_import_time = theano.gof.cmodule.import_time
add_stack_trace_on_call_orig = gof.Op.add_stack_trace_on_call
limit_orig = theano.config.traceback.limit
try:
gof.Op.add_stack_trace_on_call = False
theano.config.traceback.limit = 0
_fn, _i, _o = self.linker.make_thunk(
input_storage=input_storage_lists)
finally:
gof.Op.add_stack_trace_on_call = add_stack_trace_on_call_orig
theano.config.traceback.limit = limit_orig
end_linker = time.time()
......
......@@ -80,3 +80,14 @@ class MonitorMode(Mode):
ret.pre_func = self.pre_func
ret.post_func = self.post_func
return ret
def detect_nan(i, node, fn):
for output in fn.outputs:
if (not isinstance(numpy.random.RandomState, output[0]) and
numpy.isnan(output[0]).any()):
print '*** NaN detected ***'
theano.printing.debugprint(node)
print 'Inputs : %s' % [input[0] for input in fn.inputs]
print 'Outputs: %s' % [output[0] for output in fn.outputs]
break
......@@ -101,7 +101,7 @@ def raise_with_op(node, thunk=None, exc_info=None, storage_map=None):
# print a simple traceback from KeyboardInterrupt
raise exc_type, exc_value, exc_trace
try:
trace = node.tag.trace
trace = node.outputs[0].tag.trace
except AttributeError:
try:
trace = node.op.tag.trace
......@@ -114,11 +114,6 @@ def raise_with_op(node, thunk=None, exc_info=None, storage_map=None):
else:
exc_value.__applynode_index__ = None
# nose and unittest catch the exception and do not run th thunk_hook
# so it can be useful to just blurt out errors right here
if raise_with_op.print_thunk_trace:
log_thunk_trace(exc_value)
hints = []
detailed_err_msg = "\nApply node that caused the error: " + str(node)
......@@ -151,7 +146,7 @@ def raise_with_op(node, thunk=None, exc_info=None, storage_map=None):
" have the inputs shapes and strides printed.")
# Print node backtrace
tr = getattr(node.tag, 'trace', None)
tr = getattr(node.outputs[0].tag, 'trace', None)
if tr:
sio = StringIO.StringIO()
traceback.print_list(tr, sio)
......@@ -210,8 +205,6 @@ def raise_with_op(node, thunk=None, exc_info=None, storage_map=None):
'\n' + '\n'.join(hints))
raise exc_type, exc_value, exc_trace
raise_with_op.print_thunk_trace = False
class Linker(object):
"""WRITEME"""
......
......@@ -413,13 +413,6 @@ class PureOp(object):
"""
add_stack_trace_on_call = True
"""This class variable governs whether __call__ adds a stack trace to the node it creates.
The tag trace is meant to connect a node to the line a user typed. It is nice for
debugging. It does not make as much sense during optimizations to store this information.
"""
#############
# make_node #
#############
......@@ -486,8 +479,6 @@ class PureOp(object):
"""
return_list = kwargs.pop('return_list', False)
node = self.make_node(*inputs, **kwargs)
if self.add_stack_trace_on_call:
self.add_tag_trace(node)
if config.compute_test_value != 'off':
run_perform = True
......
......@@ -4852,13 +4852,15 @@ def test_tile_grad():
rng = numpy.random.RandomState(utt.fetch_seed())
# test vector
grad_tile(vector('x'), [3], rng.randn(5))
grad_tile(vector('x'), [3], rng.randn(5).astype(config.floatX))
# test matrix
grad_tile(matrix('x'), [3, 4], rng.randn(2, 3))
grad_tile(matrix('x'), [3, 4], rng.randn(2, 3).astype(config.floatX))
# test tensor3
grad_tile(tensor3('x'), [3, 4, 5], rng.randn(2, 4, 3))
grad_tile(tensor3('x'), [3, 4, 5],
rng.randn(2, 4, 3).astype(config.floatX))
# test tensor4
grad_tile(tensor4('x'), [3, 4, 5, 6], rng.randn(2, 4, 3, 5))
grad_tile(tensor4('x'), [3, 4, 5, 6],
rng.randn(2, 4, 3, 5).astype(config.floatX))
class TestARange(unittest.TestCase):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论