提交 7543b552 authored 作者: carriepl's avatar carriepl

Merge pull request #2789 from thomasmesnard/knownfailuretest

knownfailuretest
......@@ -11,13 +11,13 @@ from theano.compile import function
from theano.compile import UnusedInputError
from theano.gof import MissingInputError
from theano.compat import exc_message
from theano.tests.unittest_tools import SkipTest
from theano import tensor
from theano import tensor as T
import theano
import numpy as N
from numpy.testing.noseclasses import KnownFailureTest
PatternOptimizer = lambda p1, p2, ign=True: gof.OpKeyOptimizer(gof.PatternSub(p1, p2), ignore_newtrees=ign)
......@@ -41,7 +41,8 @@ class T_function(unittest.TestCase):
fn = function([], None) # ok
rval = fn()
if rval == []:
raise KnownFailureTest('See #254: Using None as function output leads to [] return value')
raise SkipTest("See #254: Using None as function output leads "
"to [] return value")
else:
assert rval is None
......
......@@ -14,7 +14,6 @@ from theano.tensor.basic import alloc
from theano.tensor.tests import test_basic
from theano.tensor.tests.test_basic import rand, safe_make_node
from theano.tests.unittest_tools import SkipTest
from numpy.testing.noseclasses import KnownFailureTest
import theano.sandbox.gpuarray
......@@ -73,7 +72,7 @@ def may_fail(msg, EClass):
f()
except Exception as e:
if isinstance(e, EClass):
raise KnownFailureTest(msg, e)
raise SkipTest(msg, e)
raise
wrapper.__name__ = f.__name__
return wrapper
......
......@@ -16,7 +16,7 @@ import theano.sandbox.rng_mrg
from theano import tensor
from theano.compile.pfunc import rebuild_collect_shared
from theano.tests import unittest_tools as utt
from numpy.testing.noseclasses import KnownFailureTest
from theano.tests.unittest_tools import SkipTest
from .test_utils import *
import theano.sandbox.scan_module as scan_module
......@@ -471,8 +471,9 @@ class TestScan(unittest.TestCase):
# place (even when told not to by DebugMode). As this op will change
# soon, and it is in the sandbox and not for user consumption, the
# error is marked as KnownFailure
raise KnownFailureTest('Work-in-progress sandbox ScanOp is not fully '
'functional yet')
raise SkipTest("Work-in-progress sandbox ScanOp is "
"not fully functional yet")
def f_pow2(x_tm1):
return 2 * x_tm1
......@@ -508,9 +509,8 @@ class TestScan(unittest.TestCase):
# place (even when told not to by DebugMode). As this op will change
# soon, and it is in the sandbox and not for user consumption, the
# error is marked as KnownFailure
raise KnownFailureTest('Work-in-progress sandbox ScanOp is not fully '
'functional yet')
raise SkipTest("Work-in-progress sandbox "
"ScanOp is not fully functional yet")
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
......
......@@ -25,8 +25,6 @@ import theano.scalar.sharedvar
from theano.scan_module.scan_op import Scan
from theano.compat import PY3, OrderedDict
from numpy.testing.noseclasses import KnownFailureTest
'''
Questions and notes about scan that should be answered :
......@@ -218,7 +216,7 @@ class T_Scan(unittest.TestCase):
# generator network, only one output , type scalar ; no sequence or
# non sequence arguments
@dec.knownfailureif(
@dec.skipif(
isinstance(theano.compile.mode.get_default_mode(),
theano.compile.debugmode.DebugMode),
("This test fails in DebugMode, because it is not yet picklable."))
......@@ -3055,9 +3053,8 @@ class T_Scan(unittest.TestCase):
if isinstance(x.op, theano.tensor.Elemwise)]) == 0
def test_alloc_inputs2(self):
raise KnownFailureTest((
"This tests depends on an optimization for scan "
"that has not been implemented yet."))
raise SkipTest("This tests depends on an optimization for "
"scan that has not been implemented yet.")
W1 = tensor.matrix()
W2 = tensor.matrix()
h0 = tensor.vector()
......@@ -3135,7 +3132,7 @@ class T_Scan(unittest.TestCase):
# One scan node gets optimnized out
assert len(lssc) == 1
@dec.knownfailureif(True,
@dec.skipif(True,
("This test fails because not typed outputs_info "
"are always gived the smallest dtype. There is "
"no upcast of outputs_info in scan for now."))
......@@ -3284,8 +3281,9 @@ class T_Scan(unittest.TestCase):
assert out == 24
def test_infershape_seq_shorter_nsteps(self):
raise KnownFailureTest('This is a generic problem with infershape'
' that has to be discussed and figured out')
raise SkipTest("This is a generic problem with "
"infershape that has to be discussed "
"and figured out")
x = tensor.vector('x')
[o1, o2], _ = theano.scan(lambda x, y: (x + 1, y + x),
sequences=x,
......
......@@ -18,7 +18,6 @@ from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
import numpy
from numpy.testing import dec, assert_array_equal, assert_allclose
from numpy.testing.noseclasses import KnownFailureTest
from distutils.version import LooseVersion
import theano
......@@ -5911,7 +5910,7 @@ def test_sum_overflow():
assert f([1] * 300) == 300
@dec.knownfailureif(
@dec.skipif(
isinstance(get_default_mode(), theano.compile.debugmode.DebugMode),
("This test fails in DEBUG_MODE, but the generated code is OK. "
"It is actually a problem of DEBUG_MODE, see #626."))
......@@ -5924,7 +5923,7 @@ def test_default():
assert f(1, None) == 1
@dec.knownfailureif(
@dec.skipif(
isinstance(get_default_mode(), theano.compile.debugmode.DebugMode),
("This test fails in DEBUG_MODE, but the generated code is OK. "
"It is actually a problem of DEBUG_MODE, see #626."))
......@@ -6209,17 +6208,12 @@ class test_arithmetic_cast(unittest.TestCase):
bool(numpy_version >= [1, 6]) and
theano_dtype == 'complex128' and
numpy_dtype == 'complex64'):
# In numpy 1.6.x adding a
# complex128 with a float32 or
# float16 may result in a
# complex64. This may be a bug
# (investigation is currently in
# progress), so in the meantime we
# just mark this test as a known
# failure.
raise KnownFailureTest('Known issue with '
'numpy >= 1.6.x see #761')
# In numpy 1.6.x adding a complex128 with
# a float32 may result in a complex64. As
# of 1.9.2. this is still the case so it is
# probably by design
raise SkipTest("Known issue with"
"numpy >= 1.6.x see #761")
# In any other situation: something wrong is
# going on!
assert False
......
......@@ -49,7 +49,7 @@ class TestRealImag(unittest.TestCase):
assert numpy.all(rval == mval[0]), (rval, mval[0])
assert numpy.all(ival == mval[1]), (ival, mval[1])
@dec.knownfailureif(True, "Complex grads not enabled, see #178")
@dec.skipif(True, "Complex grads not enabled, see #178")
def test_complex_grads(self):
def f(m):
c = complex(m[0], m[1])
......@@ -59,7 +59,7 @@ class TestRealImag(unittest.TestCase):
mval = numpy.asarray(rng.randn(2, 5))
utt.verify_grad(f, [mval])
@dec.knownfailureif(True, "Complex grads not enabled, see #178")
@dec.skipif(True, "Complex grads not enabled, see #178")
def test_mul_mixed0(self):
def f(a):
......@@ -75,7 +75,7 @@ class TestRealImag(unittest.TestCase):
print(e.analytic_grad)
raise
@dec.knownfailureif(True, "Complex grads not enabled, see #178")
@dec.skipif(True, "Complex grads not enabled, see #178")
def test_mul_mixed1(self):
def f(a):
......@@ -91,7 +91,7 @@ class TestRealImag(unittest.TestCase):
print(e.analytic_grad)
raise
@dec.knownfailureif(True, "Complex grads not enabled, see #178")
@dec.skipif(True, "Complex grads not enabled, see #178")
def test_mul_mixed(self):
def f(a, b):
......@@ -108,7 +108,7 @@ class TestRealImag(unittest.TestCase):
print(e.analytic_grad)
raise
@dec.knownfailureif(True, "Complex grads not enabled, see #178")
@dec.skipif(True, "Complex grads not enabled, see #178")
def test_polar_grads(self):
def f(m):
c = complex_from_polar(abs(m[0]), m[1])
......@@ -118,7 +118,7 @@ class TestRealImag(unittest.TestCase):
mval = numpy.asarray(rng.randn(2, 5))
utt.verify_grad(f, [mval])
@dec.knownfailureif(True, "Complex grads not enabled, see #178")
@dec.skipif(True, "Complex grads not enabled, see #178")
def test_abs_grad(self):
def f(m):
c = complex(m[0], m[1])
......
......@@ -39,7 +39,7 @@ class TestFourier(utt.InferShapeTester):
[numpy.random.rand(12, 4), 0],
self.op_class)
@dec.knownfailureif(True, "Complex grads not enabled, see #178")
@dec.skipif(True, "Complex grads not enabled, see #178")
def test_gradient(self):
def fft_test1(a):
return self.op(a, None, None)
......
......@@ -1581,8 +1581,8 @@ def test_log_add():
f([10000], [10000]) # causes overflow if handled incorrectly
assert numpy.allclose(f([10000], [10000]), 20000)
except AssertionError:
raise KnownFailureTest(('log(add(exp)) is not stabilized when adding '
'more than 2 elements, see #623'))
raise SkipTest("log(add(exp)) is not stabilized when adding "
"more than 2 elements, see #623")
# TODO: test that the optimization works in the presence of broadcasting.
......@@ -4048,9 +4048,9 @@ def test_constant_get_stabilized():
assert f() == 800, f()
except (AssertionError, theano.compile.debugmode.InvalidValueError):
raise KnownFailureTest((
"Theano optimizes constant before stabilization. "
"This breaks stabilization optimization in some cases. See #504."))
raise SkipTest('Theano optimizes constant before stabilization. '
'This breaks stabilization optimization in some '
'cases. See #504.')
class T_local_switch_sink(unittest.TestCase):
......@@ -4307,8 +4307,11 @@ class T_local_erfc(unittest.TestCase):
0].op.scalar_op.fgraph.apply_nodes) == 22, len(f.maker.fgraph.toposort()[0].fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes)
# TODO: fix this problem
if theano.config.floatX == "float32" and theano.config.mode in ["DebugMode", "DEBUG_MODE"]:
raise KnownFailureTest(
"the python code upcast somewhere internally some value of float32 to python float for part of its computation. That make that the c and python code don't generate the same value. You can ignore this error.")
raise SkipTest('The python code upcast somewhere internally '
'some value of float32 to python float for '
'part of its computation. That make that the '
'c and python code dont generate the same value. '
'You can ignore this error.')
assert all(numpy.isfinite(f(val)))
def test_local_grad_log_erfc_neg(self):
......@@ -4376,8 +4379,13 @@ class T_local_erfc(unittest.TestCase):
# TODO: fix this problem
if theano.config.floatX == "float32" and theano.config.mode in ["DebugMode", "DEBUG_MODE"]:
# The python code upcast somewhere internally some value of float32
# to python float for part of its computation. That make that the c
# and python code do not generate the same value. You can ignore
# this error. This happen in an intermediate step that don't show
# in the final result.
# Showing this test error is a duplicate of the one in test_local_log_erfc. We hide it.
#raise KnownFailureTest("the python code upcast somewhere internally some value of float32 to python float for part of its computation. That make that the c and python code don't generate the same value. You can ignore this error. This happen in an intermediate step that don't show in the final result.")
pass
else:
assert all(numpy.isfinite(f(val)))
......
......@@ -20,7 +20,7 @@ from theano import tensor
import numpy
from theano.gof import Op, Apply
from theano.gradient import grad_undefined
from numpy.testing.noseclasses import KnownFailureTest
from theano.tests.unittest_tools import SkipTest
from theano.tensor.signal.downsample import DownsampleFactorMax
from theano.tensor.nnet import conv
......@@ -190,9 +190,8 @@ class RopLop_checker(unittest.TestCase):
assert numpy.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
if known_fail:
raise KnownFailureTest("Rop doesn't handle non-differentiable "
"inputs correctly. Bug exposed by fixing Add.grad"
" method.")
raise SkipTest('Rop does not handle non-differentiable inputs '
'correctly. Bug exposed by fixing Add.grad method.')
class test_RopLop(RopLop_checker):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论