提交 958c4194 authored 作者: Thomas Mesnard's avatar Thomas Mesnard

pep8 errors

上级 e95f0a2b
......@@ -40,7 +40,8 @@ class T_function(unittest.TestCase):
fn = function([], None) # ok
rval = fn()
if rval == []:
raise SkipTest("See #254: Using None as function output leads to [] return value")
raise SkipTest("See #254: Using None as function output leads"
"to [] return value")
else:
assert rval is None
......
......@@ -471,8 +471,9 @@ class TestScan(unittest.TestCase):
# place (even when told not to by DebugMode). As this op will change
# soon, and it is in the sandbox and not for user consumption, the
# error is marked as KnownFailure
raise SkipTest("Work-in-progress sandbox ScanOp is not fully functional yet")
raise SkipTest("Work-in-progress sandbox ScanOp is"
"not fully functional yet")
def f_pow2(x_tm1):
return 2 * x_tm1
......@@ -508,7 +509,8 @@ class TestScan(unittest.TestCase):
# place (even when told not to by DebugMode). As this op will change
# soon, and it is in the sandbox and not for user consumption, the
# error is marked as KnownFailure
raise SkipTest("Work-in-progress sandbox ScanOp is not fully functional yet")
raise SkipTest("Work-in-progress sandbox"
"ScanOp is not fully functional yet")
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
......
......@@ -3271,7 +3271,8 @@ class T_Scan(unittest.TestCase):
if isinstance(x.op, theano.tensor.Elemwise)]) == 0
def test_alloc_inputs2(self):
raise SkipTest("This tests depends on an optimization for scan that has not been implemented yet.")
raise SkipTest("This tests depends on an optimization for"
"scan that has not been implemented yet.")
W1 = tensor.matrix()
W2 = tensor.matrix()
h0 = tensor.vector()
......@@ -3498,7 +3499,9 @@ class T_Scan(unittest.TestCase):
assert out == 24
def test_infershape_seq_shorter_nsteps(self):
raise SkipTest("This is a generic problem with infershape that has to be discussed and figured out")
raise SkipTest("This is a generic problem with"
"infershape that has to be discussed"
"and figured out")
x = tensor.vector('x')
[o1, o2], _ = theano.scan(lambda x, y: (x + 1, y + x),
sequences=x,
......
......@@ -6120,7 +6120,8 @@ class test_arithmetic_cast(unittest.TestCase):
# a float32 may result in a complex64. As
# of 1.9.2. this is still the case so it is
# probably by design
raise SkipTest("Known issue with numpy >= 1.6.x see #761")
raise SkipTest("Known issue with"
"numpy >= 1.6.x see #761")
# In any other situation: something wrong is
# going on!
assert False
......
......@@ -1558,7 +1558,8 @@ def test_log_add():
f([10000], [10000]) # causes overflow if handled incorrectly
assert numpy.allclose(f([10000], [10000]), 20000)
except AssertionError:
raise SkipTest("log(add(exp)) is not stabilized when adding more than 2 elements, see #623")
raise SkipTest("log(add(exp)) is not stabilized when adding"
"more than 2 elements, see #623")
# TODO: test that the optimization works in the presence of broadcasting.
......@@ -4025,7 +4026,8 @@ def test_constant_get_stabilized():
except (AssertionError, theano.compile.debugmode.InvalidValueError):
raise SkipTest('Theano optimizes constant before stabilization.'
'This breaks stabilization optimization in some cases. See #504.')
'This breaks stabilization optimization in some'
'cases. See #504.')
class T_local_switch_sink(unittest.TestCase):
......@@ -4282,8 +4284,11 @@ class T_local_erfc(unittest.TestCase):
0].op.scalar_op.fgraph.apply_nodes) == 22, len(f.maker.fgraph.toposort()[0].fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes)
# TODO: fix this problem
if theano.config.floatX == "float32" and theano.config.mode in ["DebugMode", "DEBUG_MODE"]:
raise SkipTest('The python code upcast somewhere internally some value of float32 to python float for part of its computation.'
'That make that the c and python code dont generate the same value. You can ignore this error.')
raise SkipTest('The python code upcast somewhere internally'
'some value of float32 to python float for'
'part of its computation. That make that the'
'c and python code dont generate the same value.'
'You can ignore this error.')
assert all(numpy.isfinite(f(val)))
def test_local_grad_log_erfc_neg(self):
......@@ -4352,7 +4357,11 @@ class T_local_erfc(unittest.TestCase):
# TODO: fix this problem
if theano.config.floatX == "float32" and theano.config.mode in ["DebugMode", "DEBUG_MODE"]:
# Showing this test error is a duplicate of the one in test_local_log_erfc. We hide it.
#raise KnownFailureTest("the python code upcast somewhere internally some value of float32 to python float for part of its computation. That make that the c and python code don't generate the same value. You can ignore this error. This happen in an intermediate step that don't show in the final result.")
# raise KnownFailureTest("the python code upcast somewhere internally some value of
# float32 to python float for part of its computation.
# That make that the c and python code do not generate the same value.
# You can ignore this error.
# This happen in an intermediate step that don't show in the final result.")
pass
else:
assert all(numpy.isfinite(f(val)))
......
......@@ -190,8 +190,9 @@ class RopLop_checker(unittest.TestCase):
assert numpy.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
if known_fail:
raise SkipTest('Rop doesn t handle non-differentiable inputs correctly.'
'Bug exposed by fixing Add.grad method.')
raise SkipTest('Rop does not handle non-differentiable inputs'
'correctly. Bug exposed by fixing Add.grad method.')
class test_RopLop(RopLop_checker):
def test_shape(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论