提交 7215c905 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Hijack values_eq_approx to make tests pass in DebugMode

DebugMode is too strict here. It happens rarely, so I decided to have a workaround for these specific tests. Moving the definition of values_eq_approx outside of TensorType made it possible.
上级 fe1083e8
...@@ -67,6 +67,7 @@ from theano.tensor import ( ...@@ -67,6 +67,7 @@ from theano.tensor import (
tile tile
) )
from theano.tensor.elemwise import DimShuffle from theano.tensor.elemwise import DimShuffle
from theano.tensor.type import values_eq_approx_remove_nan
from theano.tests import unittest_tools as utt from theano.tests import unittest_tools as utt
from theano.compile.mode import optdb from theano.compile.mode import optdb
from theano.compile import Mode from theano.compile import Mode
...@@ -4678,6 +4679,28 @@ class T_local_switch_sink(unittest.TestCase): ...@@ -4678,6 +4679,28 @@ class T_local_switch_sink(unittest.TestCase):
self.mode = copy.copy(self.mode) self.mode = copy.copy(self.mode)
self.mode.check_isfinite = False self.mode.check_isfinite = False
def function_remove_nan(self, *args, **kwargs):
"""Wrapper around theano.function for this test.
It disables checking
for NaN removed by optimizations in DebugMode (it has false
positives in that case.
"""
f = theano.function(*args, **kwargs)
def wrapped_f(*args, **kwargs):
# This is a bit ugly since it changes the global value of
# TensorType.values_eq_approx.
old_values_eq_approx = TensorType.values_eq_approx
TensorType.values_eq_approx = staticmethod(values_eq_approx_remove_nan)
try:
out = f(*args, **kwargs)
finally:
TensorType.values_eq_approx = old_values_eq_approx
return out
return wrapped_f
def test_local_mul_switch_sink(self): def test_local_mul_switch_sink(self):
c = T.dscalar() c = T.dscalar()
idx = 0 idx = 0
...@@ -4689,7 +4712,7 @@ class T_local_switch_sink(unittest.TestCase): ...@@ -4689,7 +4712,7 @@ class T_local_switch_sink(unittest.TestCase):
y = T.mul(T.switch(condition[0] > 0, 1. * x[0], 0. * x[0]), y = T.mul(T.switch(condition[0] > 0, 1. * x[0], 0. * x[0]),
T.switch(condition[0] > 0, T.switch(condition[0] > 0,
1. * x[0], T.log(c) * x[0])) 1. * x[0], T.log(c) * x[0]))
f = theano.function([condition[0], x[0], c], f = self.function_remove_nan([condition[0], x[0], c],
[y], mode=self.mode) [y], mode=self.mode)
if type(condition[1]) is list: if type(condition[1]) is list:
for i in xrange(len(condition[1])): for i in xrange(len(condition[1])):
...@@ -4705,7 +4728,7 @@ class T_local_switch_sink(unittest.TestCase): ...@@ -4705,7 +4728,7 @@ class T_local_switch_sink(unittest.TestCase):
# This case caused a missed optimization in the past. # This case caused a missed optimization in the past.
x = T.dscalar('x') x = T.dscalar('x')
y = T.switch(x < 7, x, T.sqrt(x - 7)) y = T.switch(x < 7, x, T.sqrt(x - 7))
f = theano.function([x], T.grad(y, x), self.mode) f = self.function_remove_nan([x], T.grad(y, x), self.mode)
assert f(5) == 1, f(5) assert f(5) == 1, f(5)
@attr('slow') @attr('slow')
...@@ -4716,8 +4739,8 @@ class T_local_switch_sink(unittest.TestCase): ...@@ -4716,8 +4739,8 @@ class T_local_switch_sink(unittest.TestCase):
for x in [(T.dmatrix('x'), self.xm), (T.dvector('x'), self.xv), (T.dscalar('x'), self.xs)]: for x in [(T.dmatrix('x'), self.xm), (T.dvector('x'), self.xv), (T.dscalar('x'), self.xs)]:
y = T.true_div(T.switch(condition[0] > 0, 1. * y = T.true_div(T.switch(condition[0] > 0, 1. *
x[0], 0.*x[0]), T.switch(condition[0] > 0, 1.*x[0], T.log(c)*x[0])) x[0], 0.*x[0]), T.switch(condition[0] > 0, 1.*x[0], T.log(c)*x[0]))
f = theano.function([condition[0], x[0], c] f = self.function_remove_nan([condition[0], x[0], c],
, [y], mode=self.mode) [y], mode=self.mode)
if type(condition[1]) is list: if type(condition[1]) is list:
for i in xrange(len(condition[1])): for i in xrange(len(condition[1])):
res = f(condition[1][i], x[1], -1) res = f(condition[1][i], x[1], -1)
......
...@@ -321,83 +321,8 @@ class TensorType(Type): ...@@ -321,83 +321,8 @@ class TensorType(Type):
@staticmethod @staticmethod
def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False, def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False,
rtol=None, atol=None): rtol=None, atol=None):
""" return values_eq_approx(a, b, allow_remove_inf, allow_remove_nan,
Parameters rtol, atol)
----------
allow_remove_inf
If True, when there is an inf in a, we allow any value in b in
that position. Event -inf
allow_remove_nan
If True, when there is a nan in a, we allow any value in b in
that position. Event +-inf
rtol
Relative tolerance, passed to _allclose.
atol
Absolute tolerance, passed to _allclose.
"""
if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray):
if a.shape != b.shape:
return False
if a.dtype != b.dtype:
return False
if str(a.dtype) not in theano.tensor.continuous_dtypes:
return numpy.all(a == b)
else:
cmp = theano.tensor.basic._allclose(a, b, rtol=rtol, atol=atol)
if cmp:
# Numpy claims they are close, this is good enough for us.
return True
# Numpy is unhappy, but it does not necessarily mean that a and
# b are different. Indeed, Numpy does not like missing values
# and will return False whenever some are found in a or b.
# The proper way would be to use the MaskArray stuff available
# in Numpy. However, it looks like it has been added to Numpy's
# core recently, so it may not be available to everyone. Thus,
# for now we use a home-made recipe, that should probably be
# revisited in the future.
a_missing = numpy.isnan(a)
a_inf = numpy.isinf(a)
if not (a_missing.any() or (allow_remove_inf and a_inf.any())):
# There are no missing values in a, thus this is not the
# reason why numpy.allclose(a, b) returned False.
_logger.info(
'numpy allclose failed for abs_err %f and rel_err %f',
numpy.max(abs(a - b)),
numpy.max(abs(a - b) / (abs(a) + abs(b))))
return False
# The following line is what numpy.allclose bases its decision
# upon, according to its documentation.
rtol = 1.0000000000000001e-05
atol = 1e-8
cmp_elemwise = (numpy.absolute(a - b) <=
(atol + rtol * numpy.absolute(b)))
# Find places where both a and b have missing values.
both_missing = a_missing * numpy.isnan(b)
# Find places where both a and b have inf of the same sign.
both_inf = a_inf * numpy.isinf(b)
# cmp_elemwise is weird when we have inf and -inf.
# set it to False
cmp_elemwise = numpy.where(
both_inf & cmp_elemwise,
a == b,
cmp_elemwise)
# check the sign of the inf
both_inf = numpy.where(both_inf, (a == b), both_inf)
if allow_remove_inf:
both_inf += a_inf
if allow_remove_nan:
both_missing += a_missing
# Combine all information.
return (cmp_elemwise + both_missing + both_inf).all()
return False
def __hash__(self): def __hash__(self):
"""Hash equal for same kinds of TensorType""" """Hash equal for same kinds of TensorType"""
...@@ -681,16 +606,97 @@ class TensorType(Type): ...@@ -681,16 +606,97 @@ class TensorType(Type):
theano.compile.ops.expandable_types += (TensorType,) theano.compile.ops.expandable_types += (TensorType,)
def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False,
rtol=None, atol=None):
"""
Parameters
----------
allow_remove_inf
If True, when there is an inf in a, we allow any value in b in
that position. Event -inf
allow_remove_nan
If True, when there is a nan in a, we allow any value in b in
that position. Event +-inf
rtol
Relative tolerance, passed to _allclose.
atol
Absolute tolerance, passed to _allclose.
"""
if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray):
if a.shape != b.shape:
return False
if a.dtype != b.dtype:
return False
if str(a.dtype) not in theano.tensor.continuous_dtypes:
return numpy.all(a == b)
else:
cmp = theano.tensor.basic._allclose(a, b, rtol=rtol, atol=atol)
if cmp:
# Numpy claims they are close, this is good enough for us.
return True
# Numpy is unhappy, but it does not necessarily mean that a and
# b are different. Indeed, Numpy does not like missing values
# and will return False whenever some are found in a or b.
# The proper way would be to use the MaskArray stuff available
# in Numpy. However, it looks like it has been added to Numpy's
# core recently, so it may not be available to everyone. Thus,
# for now we use a home-made recipe, that should probably be
# revisited in the future.
a_missing = numpy.isnan(a)
a_inf = numpy.isinf(a)
if not (a_missing.any() or (allow_remove_inf and a_inf.any())):
# There are no missing values in a, thus this is not the
# reason why numpy.allclose(a, b) returned False.
_logger.info(
'numpy allclose failed for abs_err %f and rel_err %f',
numpy.max(abs(a - b)),
numpy.max(abs(a - b) / (abs(a) + abs(b))))
return False
# The following line is what numpy.allclose bases its decision
# upon, according to its documentation.
rtol = 1.0000000000000001e-05
atol = 1e-8
cmp_elemwise = (numpy.absolute(a - b) <=
(atol + rtol * numpy.absolute(b)))
# Find places where both a and b have missing values.
both_missing = a_missing * numpy.isnan(b)
# Find places where both a and b have inf of the same sign.
both_inf = a_inf * numpy.isinf(b)
# cmp_elemwise is weird when we have inf and -inf.
# set it to False
cmp_elemwise = numpy.where(
both_inf & cmp_elemwise,
a == b,
cmp_elemwise)
# check the sign of the inf
both_inf = numpy.where(both_inf, (a == b), both_inf)
if allow_remove_inf:
both_inf += a_inf
if allow_remove_nan:
both_missing += a_missing
# Combine all information.
return (cmp_elemwise + both_missing + both_inf).all()
return False
def values_eq_approx_remove_inf(a, b): def values_eq_approx_remove_inf(a, b):
return TensorType.values_eq_approx(a, b, True) return values_eq_approx(a, b, True)
def values_eq_approx_remove_nan(a, b): def values_eq_approx_remove_nan(a, b):
return TensorType.values_eq_approx(a, b, False, True) return values_eq_approx(a, b, False, True)
def values_eq_approx_remove_inf_nan(a, b): def values_eq_approx_remove_inf_nan(a, b):
return TensorType.values_eq_approx(a, b, True, True) return values_eq_approx(a, b, True, True)
def values_eq_approx_always_true(a, b): def values_eq_approx_always_true(a, b):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论