提交 1118c72d authored 作者: Thomas Mesnard's avatar Thomas Mesnard

Fix details for comments

上级 b4b80693
...@@ -17,7 +17,6 @@ from theano import tensor as T ...@@ -17,7 +17,6 @@ from theano import tensor as T
import theano import theano
import numpy as N import numpy as N
from numpy.testing.noseclasses import KnownFailureTest
PatternOptimizer = lambda p1, p2, ign=True: gof.OpKeyOptimizer(gof.PatternSub(p1, p2), ignore_newtrees=ign) PatternOptimizer = lambda p1, p2, ign=True: gof.OpKeyOptimizer(gof.PatternSub(p1, p2), ignore_newtrees=ign)
......
...@@ -12,7 +12,6 @@ from theano.tensor.basic import alloc ...@@ -12,7 +12,6 @@ from theano.tensor.basic import alloc
from theano.tensor.tests import test_basic from theano.tensor.tests import test_basic
from theano.tensor.tests.test_basic import rand, safe_make_node from theano.tensor.tests.test_basic import rand, safe_make_node
from theano.tests.unittest_tools import SkipTest from theano.tests.unittest_tools import SkipTest
from numpy.testing.noseclasses import KnownFailureTest
import theano.sandbox.gpuarray import theano.sandbox.gpuarray
...@@ -70,7 +69,7 @@ def may_fail(msg, EClass): ...@@ -70,7 +69,7 @@ def may_fail(msg, EClass):
f() f()
except Exception as e: except Exception as e:
if isinstance(e, EClass): if isinstance(e, EClass):
raise SkipTest("Not yet implemented") raise SkipTest(msg, e)
raise raise
wrapper.__name__ = f.__name__ wrapper.__name__ = f.__name__
return wrapper return wrapper
......
...@@ -16,7 +16,6 @@ from theano import tensor ...@@ -16,7 +16,6 @@ from theano import tensor
from theano.compile.pfunc import rebuild_collect_shared from theano.compile.pfunc import rebuild_collect_shared
from theano.tests import unittest_tools as utt from theano.tests import unittest_tools as utt
from theano.tests.unittest_tools import SkipTest from theano.tests.unittest_tools import SkipTest
from numpy.testing.noseclasses import KnownFailureTest
from test_utils import * from test_utils import *
import theano.sandbox.scan_module as scan_module import theano.sandbox.scan_module as scan_module
...@@ -473,9 +472,7 @@ class TestScan(unittest.TestCase): ...@@ -473,9 +472,7 @@ class TestScan(unittest.TestCase):
# soon, and it is in the sandbox and not for user consumption, the # soon, and it is in the sandbox and not for user consumption, the
# error is marked as KnownFailure # error is marked as KnownFailure
raise SkipTest("Not yet implemented") raise SkipTest("Work-in-progress sandbox ScanOp is not fully functional yet")
# Work-in-progress sandbox ScanOp is not fully
# functional yet
def f_pow2(x_tm1): def f_pow2(x_tm1):
return 2 * x_tm1 return 2 * x_tm1
...@@ -511,9 +508,7 @@ class TestScan(unittest.TestCase): ...@@ -511,9 +508,7 @@ class TestScan(unittest.TestCase):
# place (even when told not to by DebugMode). As this op will change # place (even when told not to by DebugMode). As this op will change
# soon, and it is in the sandbox and not for user consumption, the # soon, and it is in the sandbox and not for user consumption, the
# error is marked as KnownFailure # error is marked as KnownFailure
raise SkipTest("Not yet implemented") raise SkipTest("Work-in-progress sandbox ScanOp is not fully functional yet")
# Work-in-progress sandbox ScanOp is not fully
# functional yet
def f_rnn(u_t, x_tm1, W_in, W): def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W return u_t * W_in + x_tm1 * W
......
...@@ -24,8 +24,6 @@ import theano.scalar.sharedvar ...@@ -24,8 +24,6 @@ import theano.scalar.sharedvar
from theano.scan_module.scan_op import Scan from theano.scan_module.scan_op import Scan
from theano.compat import PY3, OrderedDict from theano.compat import PY3, OrderedDict
from numpy.testing.noseclasses import KnownFailureTest
''' '''
Questions and notes about scan that should be answered : Questions and notes about scan that should be answered :
...@@ -3273,9 +3271,7 @@ class T_Scan(unittest.TestCase): ...@@ -3273,9 +3271,7 @@ class T_Scan(unittest.TestCase):
if isinstance(x.op, theano.tensor.Elemwise)]) == 0 if isinstance(x.op, theano.tensor.Elemwise)]) == 0
def test_alloc_inputs2(self): def test_alloc_inputs2(self):
raise SkipTest("Not yet implemented") raise SkipTest("This tests depends on an optimization for scan that has not been implemented yet.")
# This tests depends on an optimization for scan
# that has not been implemented yet.
W1 = tensor.matrix() W1 = tensor.matrix()
W2 = tensor.matrix() W2 = tensor.matrix()
h0 = tensor.vector() h0 = tensor.vector()
...@@ -3502,9 +3498,7 @@ class T_Scan(unittest.TestCase): ...@@ -3502,9 +3498,7 @@ class T_Scan(unittest.TestCase):
assert out == 24 assert out == 24
def test_infershape_seq_shorter_nsteps(self): def test_infershape_seq_shorter_nsteps(self):
raise SkipTest("Not yet implemented") raise SkipTest("This is a generic problem with infershape that has to be discussed and figured out")
# This is a generic problem with infershape
# that has to be discussed and figured out
x = tensor.vector('x') x = tensor.vector('x')
[o1, o2], _ = theano.scan(lambda x, y: (x + 1, y + x), [o1, o2], _ = theano.scan(lambda x, y: (x + 1, y + x),
sequences=x, sequences=x,
......
...@@ -16,7 +16,6 @@ from nose.plugins.skip import SkipTest ...@@ -16,7 +16,6 @@ from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr from nose.plugins.attrib import attr
import numpy import numpy
from numpy.testing import dec, assert_array_equal, assert_allclose from numpy.testing import dec, assert_array_equal, assert_allclose
from numpy.testing.noseclasses import KnownFailureTest
from distutils.version import LooseVersion from distutils.version import LooseVersion
import theano import theano
...@@ -6118,12 +6117,10 @@ class test_arithmetic_cast(unittest.TestCase): ...@@ -6118,12 +6117,10 @@ class test_arithmetic_cast(unittest.TestCase):
theano_dtype == 'complex128' and theano_dtype == 'complex128' and
numpy_dtype == 'complex64'): numpy_dtype == 'complex64'):
# In numpy 1.6.x adding a complex128 with # In numpy 1.6.x adding a complex128 with
# a float32 may result in a complex64. This # a float32 may result in a complex64. As
# may be a bug (investigation is currently # of 1.9.2. this is still the case so it is
# in progress), so in the meantime we just # probably by design
# mark this test as a known failure. raise SkipTest("Known issue with numpy >= 1.6.x see #761")
raise SkipTest("Not yet implemented")
# Known issue with numpy >= 1.6.x see #761'
# In any other situation: something wrong is # In any other situation: something wrong is
# going on! # going on!
assert False assert False
......
...@@ -1558,9 +1558,7 @@ def test_log_add(): ...@@ -1558,9 +1558,7 @@ def test_log_add():
f([10000], [10000]) # causes overflow if handled incorrectly f([10000], [10000]) # causes overflow if handled incorrectly
assert numpy.allclose(f([10000], [10000]), 20000) assert numpy.allclose(f([10000], [10000]), 20000)
except AssertionError: except AssertionError:
raise SkipTest("Not yet implemented") raise SkipTest("log(add(exp)) is not stabilized when adding more than 2 elements, see #623")
# log(add(exp)) is not stabilized when adding
# more than 2 elements, see #623
# TODO: test that the optimization works in the presence of broadcasting. # TODO: test that the optimization works in the presence of broadcasting.
...@@ -4026,9 +4024,8 @@ def test_constant_get_stabilized(): ...@@ -4026,9 +4024,8 @@ def test_constant_get_stabilized():
assert f() == 800, f() assert f() == 800, f()
except (AssertionError, theano.compile.debugmode.InvalidValueError): except (AssertionError, theano.compile.debugmode.InvalidValueError):
raise SkipTest("Not yet implemented") raise SkipTest('Theano optimizes constant before stabilization.'
# Theano optimizes constant before stabilization. 'This breaks stabilization optimization in some cases. See #504.')
# This breaks stabilization optimization in some cases. See #504.
class T_local_switch_sink(unittest.TestCase): class T_local_switch_sink(unittest.TestCase):
...@@ -4285,8 +4282,8 @@ class T_local_erfc(unittest.TestCase): ...@@ -4285,8 +4282,8 @@ class T_local_erfc(unittest.TestCase):
0].op.scalar_op.fgraph.apply_nodes) == 22, len(f.maker.fgraph.toposort()[0].fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) 0].op.scalar_op.fgraph.apply_nodes) == 22, len(f.maker.fgraph.toposort()[0].fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes)
# TODO: fix this problem # TODO: fix this problem
if theano.config.floatX == "float32" and theano.config.mode in ["DebugMode", "DEBUG_MODE"]: if theano.config.floatX == "float32" and theano.config.mode in ["DebugMode", "DEBUG_MODE"]:
raise SkipTest("Not yet implemented") raise SkipTest('The python code upcast somewhere internally some value of float32 to python float for part of its computation.'
# The python code upcast somewhere internally some value of float32 to python float for part of its computation. That make that the c and python code don't generate the same value. You can ignore this error. 'That make that the c and python code dont generate the same value. You can ignore this error.')
assert all(numpy.isfinite(f(val))) assert all(numpy.isfinite(f(val)))
def test_local_grad_log_erfc_neg(self): def test_local_grad_log_erfc_neg(self):
......
...@@ -20,7 +20,6 @@ from theano import tensor ...@@ -20,7 +20,6 @@ from theano import tensor
import numpy import numpy
from theano.gof import Op, Apply from theano.gof import Op, Apply
from theano.gradient import grad_undefined from theano.gradient import grad_undefined
from numpy.testing.noseclasses import KnownFailureTest
from theano.tests.unittest_tools import SkipTest from theano.tests.unittest_tools import SkipTest
from theano.tensor.signal.downsample import DownsampleFactorMax from theano.tensor.signal.downsample import DownsampleFactorMax
from theano.tensor.nnet import conv from theano.tensor.nnet import conv
...@@ -191,11 +190,8 @@ class RopLop_checker(unittest.TestCase): ...@@ -191,11 +190,8 @@ class RopLop_checker(unittest.TestCase):
assert numpy.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2)) assert numpy.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
if known_fail: if known_fail:
raise SkipTest("Not yet implemented") raise SkipTest('Rop doesn t handle non-differentiable inputs correctly.'
# Rop doesn't handle non-differentiable 'Bug exposed by fixing Add.grad method.')
# inputs correctly. Bug exposed by fixing Add.grad
# method.
class test_RopLop(RopLop_checker): class test_RopLop(RopLop_checker):
def test_shape(self): def test_shape(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论