提交 795be453 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

merge

...@@ -39,9 +39,26 @@ Some kinds of errors can only be detected for certain input value combinations. ...@@ -39,9 +39,26 @@ Some kinds of errors can only be detected for certain input value combinations.
In the example above, there is no way to guarantee that a future call to say, In the example above, there is no way to guarantee that a future call to say,
``f(-1)`` won't cause a problem. DebugMode is not a silver bullet. ``f(-1)`` won't cause a problem. DebugMode is not a silver bullet.
If you instantiate DebugMode using the constructor ``compile.DebugMode``
rather than the keyword ``DEBUG_MODE`` you can configure its behaviour via
constructor arguments. See :api:`DebugMode` for details.
The keyword version of DebugMode (which you get by using ``mode='DEBUG_MODE``)
is quite strict, and can raise several different Exception types.
There following are DebugMode exceptions you might encounter: There following are DebugMode exceptions you might encounter:
DebugModeError
--------------
This is a generic error. All the other exceptions inherit from this one.
This error is typically not raised directly.
However, you can use ``except DebugModeError: ...`` to catch any of the more
specific types of Exception.
For detailed documentation see :api:`DebugModeError`.
BadCLinkerOutput BadCLinkerOutput
---------------- ----------------
...@@ -105,18 +122,6 @@ whereby we debug in DEBUG_MODE and then run the full-size jobs in FAST_RUN. ...@@ -105,18 +122,6 @@ whereby we debug in DEBUG_MODE and then run the full-size jobs in FAST_RUN.
For detailed documentation see :api:`StochasticOrder`. For detailed documentation see :api:`StochasticOrder`.
FloatError
----------
This happens when invalid floating-point values such as NaN and Inf are
introduced into the computations. It indicates which Op created the first
NaN.
Currently this exception is never raised because the check is not being
performed, but the plan is that it will be. (see ticket #320)
For detailed documentation see :api:`FloatError`.
InvalidValueError InvalidValueError
----------------- -----------------
...@@ -126,14 +131,11 @@ an output that is invalid with respect to the type of the corresponding output ...@@ -126,14 +131,11 @@ an output that is invalid with respect to the type of the corresponding output
variable. Like if it returned a complex-valued ndarray for a ``dscalar`` variable. Like if it returned a complex-valued ndarray for a ``dscalar``
Type. Type.
For detailed documentation see :api:`InvalidValueError`. This can also be triggered when floating-point values such as NaN and Inf are
introduced into the computations. It indicates which Op created the first
NaN. These floating-point values can be allowed by passing the
DebugModeError ``check_isfinite=False`` argument to DebugMode.
--------------
This is a generic error, pretty unhelpful. You'll generally have to look at the For detailed documentation see :api:`InvalidValueError`.
stack trace and then in the code to figure out why DebugMode is complaining.
For detailed documentation see :api:`DebugModeError`.
...@@ -170,13 +170,6 @@ class StochasticOrder(DebugModeError): ...@@ -170,13 +170,6 @@ class StochasticOrder(DebugModeError):
""" """
pass pass
class FloatError(DebugModeError):
"""Exception: Inf or NaN has crept into calculations
:note: See #320 for what this exception is for
"""
pass
class InvalidValueError(DebugModeError): class InvalidValueError(DebugModeError):
"""Exception: some Op an output value that is inconsistent with the Type of that output""" """Exception: some Op an output value that is inconsistent with the Type of that output"""
def __init__(self, r, v): def __init__(self, r, v):
...@@ -785,6 +778,11 @@ class _Linker(gof.link.LocalLinker): ...@@ -785,6 +778,11 @@ class _Linker(gof.link.LocalLinker):
for x in no_recycling: for x in no_recycling:
x[0] = None x[0] = None
# nest all this in try-finally to put storage *back* into storage_map when an
# exception is raised
original_storage_map_keys = [r for r in storage_map if r.owner is None]
try:
equiv_vals = {} equiv_vals = {}
problematic = set() problematic = set()
# r_vals are the true values associated with each variable in the graph # r_vals are the true values associated with each variable in the graph
...@@ -830,18 +828,19 @@ class _Linker(gof.link.LocalLinker): ...@@ -830,18 +828,19 @@ class _Linker(gof.link.LocalLinker):
if thunk_py: if thunk_py:
thunk_py() thunk_py()
# check output values for type-correctness
for r in node.outputs:
if not r.type.is_valid_value(storage_map[r][0]):
raise InvalidValueError(r, storage_map[r][0])
#if r in r_vals:
_check_inputs(node, storage_map, r_vals, dr_vals, active_order_set, _check_inputs(node, storage_map, r_vals, dr_vals, active_order_set,
clobber_dr_vals=True) clobber_dr_vals=True)
_check_viewmap(node, storage_map) _check_viewmap(node, storage_map)
# check output values for type-correctness
#retrieve each output from the storage_map #retrieve each output from the storage_map
for r in node.outputs: for r in node.outputs:
if not r.type.is_valid_value(storage_map[r][0]):
raise InvalidValueError(r, storage_map[r][0])
#if r in r_vals:
#print >> sys.stderr, 'OUTPUT', r, 'ALREADY HAS_VALUE!', r_vals[r], 'WHAT ABOUT', storage_map[r][0]
assert r not in r_vals assert r not in r_vals
r_vals[r] = storage_map[r][0] r_vals[r] = storage_map[r][0]
storage_map[r][0] = None #clear the storage_map of outputs for the thunk_c storage_map[r][0] = None #clear the storage_map of outputs for the thunk_c
...@@ -854,16 +853,17 @@ class _Linker(gof.link.LocalLinker): ...@@ -854,16 +853,17 @@ class _Linker(gof.link.LocalLinker):
thunk_c() thunk_c()
for r in node.outputs:
# check output values for type-correctness
if not r.type.is_valid_value(storage_map[r][0]):
raise InvalidValueError(r, storage_map[r][0])
_check_inputs(node, storage_map, r_vals, dr_vals, active_order_set, _check_inputs(node, storage_map, r_vals, dr_vals, active_order_set,
clobber_dr_vals=False) clobber_dr_vals=False)
_check_viewmap(node, storage_map) _check_viewmap(node, storage_map)
for r in node.outputs: for r in node.outputs:
# check output values for type-correctness
if not r.type.is_valid_value(storage_map[r][0]):
raise InvalidValueError(r, storage_map[r][0])
if r in r_vals: if r in r_vals:
# compares the version from thunk_py (in r_vals) # compares the version from thunk_py (in r_vals)
# to the version produced by thunk_c (in storage_map) # to the version produced by thunk_c (in storage_map)
...@@ -920,6 +920,11 @@ class _Linker(gof.link.LocalLinker): ...@@ -920,6 +920,11 @@ class _Linker(gof.link.LocalLinker):
storage_map[r][0].itemset(dr_vals[r][0]) storage_map[r][0].itemset(dr_vals[r][0])
else: else:
storage_map[r][0] = dr_vals[r][0] storage_map[r][0] = dr_vals[r][0]
except:
for r in original_storage_map_keys:
if storage_map[r][0] is None:
storage_map[r][0] = r_vals[r]
raise
#print "" #print ""
#print output_storage #print output_storage
#print dr_vals #print dr_vals
...@@ -961,8 +966,16 @@ class _Maker(FunctionMaker): #inheritance buys a few helper functions ...@@ -961,8 +966,16 @@ class _Maker(FunctionMaker): #inheritance buys a few helper functions
:param accept_inplace: True iff it is acceptable to have inplace operations :param accept_inplace: True iff it is acceptable to have inplace operations
in the graph from the inputs to the outputs in the graph from the inputs to the outputs
:note: this function sets TensorType.filter_checks_isfinite when `mode.check_isfinite` is True
""" """
# WARNING: this is a global mechanism... so it will screw up if we are trying to use
# multiple modes at once.
from ..tensor import TensorType #to set filter_check_isfinite
TensorType.filter_checks_isfinite = mode.check_isfinite
# Handle the case where inputs and/or outputs is a single Variable (not in a list) # Handle the case where inputs and/or outputs is a single Variable (not in a list)
unpack_single = False unpack_single = False
return_none = False return_none = False
...@@ -1182,6 +1195,12 @@ class DebugMode(Mode): ...@@ -1182,6 +1195,12 @@ class DebugMode(Mode):
Should we evaluate (and check) the `perform` implementations? Should we evaluate (and check) the `perform` implementations?
""" """
check_isfinite = True
"""
Should we check for (and complain about) NaN/Inf ndarray elements?
"""
# This function will be used to create a FunctionMaker in # This function will be used to create a FunctionMaker in
# function_module.function # function_module.function
def function_maker(self, i,o,m, *args, **kwargs): def function_maker(self, i,o,m, *args, **kwargs):
...@@ -1191,18 +1210,32 @@ class DebugMode(Mode): ...@@ -1191,18 +1210,32 @@ class DebugMode(Mode):
def __init__(self, def __init__(self,
optimizer='fast_run', optimizer='fast_run',
stability_patience=10, stability_patience=None,
check_c_code=True, check_c_code=None,
check_py_code=True): check_py_code=None,
"""Initialize member variables check_isfinite=None):
"""Initialize member variables.
If any of these arguments (except optimizer) is not None, it overrides the class default.
""" """
if not (check_c_code or check_py_code):
raise ValueError('DebugMode has to check at least one of c and py code')
super(DebugMode, self).__init__( super(DebugMode, self).__init__(
optimizer=optimizer, optimizer=optimizer,
linker=_Linker) linker=_Linker)
if stability_patience is not None:
self.stability_patience = stability_patience self.stability_patience = stability_patience
if check_c_code is not None:
self.check_c_code = check_c_code self.check_c_code = check_c_code
if check_py_code is not None:
self.check_py_code = check_py_code self.check_py_code = check_py_code
if check_isfinite is not None:
self.check_isfinite = check_isfinite
if not (self.check_c_code or self.check_py_code):
raise ValueError('DebugMode has to check at least one of c and py code')
register_mode('DEBUG_MODE',DebugMode(optimizer='fast_run')) register_mode('DEBUG_MODE',DebugMode(optimizer='fast_run'))
...@@ -531,3 +531,63 @@ class Test_ViewMap(unittest.TestCase): ...@@ -531,3 +531,63 @@ class Test_ViewMap(unittest.TestCase):
# input, but guarantees correctness. # input, but guarantees correctness.
#custom_op.view_map = {0:[0], 1:[1]} #custom_op.view_map = {0:[0], 1:[1]}
#f([1,2,3,4],[5,6,7,8]) #f([1,2,3,4],[5,6,7,8])
class Test_check_isfinite(unittest.TestCase):
def setUp(self):
print 'Up'
self.old_val = theano.tensor.TensorType.filter_checks_isfinite
def tearDown(self):
print 'Down'
theano.tensor.TensorType.filter_checks_isfinite = self.old_val
def test_check_isfinite(self):
x = theano.tensor.dvector()
f = theano.function([x], (x+2) * 5, mode='DEBUG_MODE')
# this should work
f(numpy.log([3, 4, 5]))
# this should raise InvalidValueError
try:
# insert a NaN
f(numpy.log([3, -4, 5]))
assert False
except debugmode.InvalidValueError:
pass
# this should raise InvalidValueError
try:
# insert an Nan and Inf
f(numpy.asarray([0, 1.0, 0])/0)
assert False
except debugmode.InvalidValueError:
pass
# this should raise InvalidValueError
try:
# insert several Inf
f(numpy.asarray([1.0, 1.0, 1.0])/0)
assert False
except debugmode.InvalidValueError:
pass
# this should disable the exception
theano.tensor.TensorType.filter_checks_isfinite = False
# insert several Inf
f(numpy.asarray([1.0, 1.0, 1.0])/0)
def test_check_isfinite_disabled(self):
x = theano.tensor.dvector()
f = theano.function([x], (x+2) * 5, mode=debugmode.DebugMode(check_isfinite=False))
# the DestroyMap checker should be triggered by Nan != Nan
try:
f(numpy.log([3, -4, 5]))
assert False
except debugmode.BadDestroyMap:
pass
#inf should go through
f(numpy.asarray([1.0, 1.0, 1.0])/0)
...@@ -435,6 +435,9 @@ class T_module(unittest.TestCase): ...@@ -435,6 +435,9 @@ class T_module(unittest.TestCase):
"""Test that we can manipulate the mutable, strict, etc. flags (see SymbolicInput) of """Test that we can manipulate the mutable, strict, etc. flags (see SymbolicInput) of
Method inputs""" Method inputs"""
if default_mode == 'FAST_COMPILE':
return
M = Module() M = Module()
M.x = T.dvector() M.x = T.dvector()
M.y = T.dvector() M.y = T.dvector()
...@@ -598,7 +601,7 @@ def test_method_updates(): ...@@ -598,7 +601,7 @@ def test_method_updates():
m = M.make() m = M.make()
m.f([9,9]) m.f([9,9])
assert m.x is None assert m.x is None
assert numpy.all(xval == [0, 1]) assert numpy.all(m.f[M.x] == [0, 1])
# when a variable is listed explicitly and in an update, then there's a problem. # when a variable is listed explicitly and in an update, then there's a problem.
......
...@@ -644,6 +644,8 @@ class Abs(UnaryScalarOp): ...@@ -644,6 +644,8 @@ class Abs(UnaryScalarOp):
return "%(z)s = abs(%(x)s);" % locals() return "%(z)s = abs(%(x)s);" % locals()
if type in float_types: if type in float_types:
return "%(z)s = fabs(%(x)s);" % locals() return "%(z)s = fabs(%(x)s);" % locals()
if type in complex_types:
return "%(z)s = sqrt(%(x)s.real*%(x)s.real + %(x)s.imag*%(x)s.imag);" % locals()
#complex, other? #complex, other?
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
abs_ = Abs(same_out) abs_ = Abs(same_out)
......
...@@ -164,6 +164,11 @@ def value(x, name=None, ndim=None): ...@@ -164,6 +164,11 @@ def value(x, name=None, ndim=None):
class TensorType(Type): class TensorType(Type):
"""Symbolic `Type` representing a numpy.ndarray value.""" """Symbolic `Type` representing a numpy.ndarray value."""
filter_checks_isfinite = False
"""
When this is True, strict filtering rejects data containing NaN or Inf entries. (Used in `DebugMode`)
"""
def __init__(self, dtype, broadcastable, name = None): def __init__(self, dtype, broadcastable, name = None):
"""Initialize self.dtype and self.broadcastable. """Initialize self.dtype and self.broadcastable.
...@@ -199,6 +204,8 @@ class TensorType(Type): ...@@ -199,6 +204,8 @@ class TensorType(Type):
raise TypeError("%s expected a ndarray object with dtype = %s (got %s)." % (self, self.dtype, data.dtype)) raise TypeError("%s expected a ndarray object with dtype = %s (got %s)." % (self, self.dtype, data.dtype))
if not data.ndim == self.ndim: if not data.ndim == self.ndim:
raise TypeError("%s expected a ndarray object with %s dimensions (got %s)." % (self, self.ndim, data.ndim)) raise TypeError("%s expected a ndarray object with %s dimensions (got %s)." % (self, self.ndim, data.ndim))
if self.filter_checks_isfinite and (not numpy.all(numpy.isfinite(data))):
raise TypeError("non-finite elements not allowed")
return data return data
else: else:
data = numpy.asarray(data, dtype = self.dtype) data = numpy.asarray(data, dtype = self.dtype)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论