提交 23460ad2 authored 作者: Brandon T. Willard's avatar Brandon T. Willard

Convert remaining exception tests to pytest format

上级 6e8c2b92
......@@ -8,7 +8,6 @@ from six import reraise
from theano import config
from theano import gof
import theano
from theano.compat import exc_message
from theano.compile import debugmode
import theano.tensor
from theano.tests import unittest_tools as utt
......@@ -214,14 +213,10 @@ def test_badthunkoutput():
if not theano.config.cxx:
pytest.skip("G++ not available, so we need to skip this test.")
try:
with pytest.raises(debugmode.BadThunkOutput) as einfo:
f_inconsistent([1.0, 2.0, 3.0], [2, 3, 4])
except debugmode.BadThunkOutput as e:
# print repr(e)
assert e.r.owner.op is inconsistent
return # TEST PASS
assert False # an error should have been detected
assert einfo.value.r.owner.op is inconsistent
def test_badoptimization():
......@@ -240,15 +235,11 @@ def test_badoptimization():
f = theano.function([a, b], a + b, mode=debugmode.DebugMode(optimizer=opt))
try:
with pytest.raises(debugmode.BadOptimization) as einfo:
f(
[1.0, 2.0, 3.0], [2, 3, 4],
)
except debugmode.BadOptimization as e:
assert str(e.reason) == "insert_broken_add"
return # TEST PASS
assert False
assert str(einfo.value.reason) == "insert_broken_add"
def test_badoptimization_opt_err():
......@@ -283,17 +274,15 @@ def test_badoptimization_opt_err():
b = theano.tensor.dvector()
f = theano.function([a, b], a + b, mode=debugmode.DebugMode(optimizer=opt))
try:
with pytest.raises(ValueError, match=r"insert_bigger_b_add"):
f(
[1.0, 2.0, 3.0], [2, 3, 4],
)
except ValueError as e:
assert "insert_bigger_b_add" in exc_message(e)
else:
assert False
# Test that opt that do an illegal change still get the error from gof.
try:
with pytest.raises(
theano.gof.toolbox.BadOptimization, match=r"insert_bad_dtype"
) as einfo:
with theano.change_flags(on_opt_error="raise"):
f2 = theano.function(
[a, b],
......@@ -303,20 +292,14 @@ def test_badoptimization_opt_err():
f2(
[1.0, 2.0, 3.0], [2, 3, 4],
)
except theano.gof.toolbox.BadOptimization as e:
assert "insert_bad_dtype" in str(e)
# Test that we can reraise the error with an extended message
try:
new_e = e.__class__("TTT" + str(e))
exc_type, exc_value, exc_trace = sys.exc_info()
exc_value = new_e
reraise(e.__class__, exc_value, exc_trace)
except theano.gof.toolbox.BadOptimization:
pass
else:
assert False
else:
assert False
# Test that we can reraise the error with an extended message
with pytest.raises(theano.gof.toolbox.BadOptimization):
e = einfo.value
new_e = e.__class__("TTT" + str(e))
exc_type, exc_value, exc_trace = sys.exc_info()
exc_value = new_e
reraise(e.__class__, exc_value, exc_trace)
def test_stochasticoptimization():
......@@ -340,7 +323,7 @@ def test_stochasticoptimization():
a = theano.tensor.dvector()
b = theano.tensor.dvector()
try:
with pytest.raises(debugmode.StochasticOrder):
theano.function(
[a, b],
theano.tensor.add(a, b),
......@@ -350,9 +333,6 @@ def test_stochasticoptimization():
stability_patience=max(2, config.DebugMode.patience),
),
)
except debugmode.StochasticOrder:
return # TEST PASS
assert False
def test_just_c_code():
......@@ -379,11 +359,8 @@ def test_baddestroymap():
y = theano.tensor.dvector()
f = theano.function([x, y], BadAdd()(x, y), mode="DEBUG_MODE")
try:
with pytest.raises(debugmode.BadDestroyMap):
f([1, 2], [3, 4])
assert False # failed to raise error
except debugmode.BadDestroyMap:
pass
def test_baddestroymap_c():
......@@ -391,11 +368,8 @@ def test_baddestroymap_c():
pytest.skip("G++ not available, so we need to skip this test.")
x = theano.tensor.dvector()
f = theano.function([x], wb2i(x), mode=debugmode.DebugMode(check_py_code=False))
try:
with pytest.raises(debugmode.BadDestroyMap):
assert np.all(f([1, 2]) == [2, 4])
assert False # failed to raise error
except debugmode.BadDestroyMap:
pass
class TestViewMap:
......@@ -423,21 +397,15 @@ class TestViewMap:
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], self.BadAddRef()(x, y), mode="DEBUG_MODE")
try:
with pytest.raises(debugmode.BadViewMap):
f([1, 2], [3, 4])
assert False # failed to raise error
except debugmode.BadViewMap:
return
def test_badviewmap_slice(self):
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], self.BadAddSlice()(x, y), mode="DEBUG_MODE")
try:
with pytest.raises(debugmode.BadViewMap):
f([1, 2], [3, 4])
assert False # failed to raise error
except debugmode.BadViewMap:
return
def test_goodviewmap(self):
goodop = self.BadAddRef()
......@@ -445,22 +413,16 @@ class TestViewMap:
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], goodop(x, y), mode="DEBUG_MODE")
try:
f([1, 5, 1], [3, 4, 2, 1, 4])
return
except debugmode.BadViewMap:
assert False # failed to raise error
# Shouldn't raise an error
f([1, 5, 1], [3, 4, 2, 1, 4])
def test_badviewmap_c(self):
if not theano.config.cxx:
pytest.skip("G++ not available, so we need to skip this test.")
pytest.skip("C++ not available, so we need to skip this test.")
x = theano.tensor.dvector()
f = theano.function([x], wb1i(x), mode=debugmode.DebugMode(check_py_code=False))
try:
with pytest.raises(debugmode.BadViewMap):
f([1, 2])
assert False # failed to raise error
except debugmode.BadViewMap:
pass
def test_aliased_outputs_ok(self):
# here aliased outputs is ok because they are both aliased to an input
......@@ -563,12 +525,8 @@ class TestViewMap:
out = bad_xy0 * 2 + bad_xy1 * 2
f = theano.function([x, y], out, mode="DEBUG_MODE")
try:
with pytest.raises(debugmode.BadViewMap):
f([1, 2, 3, 4], [5, 6, 7, 8])
assert False # DebugMode should have caught the error
except debugmode.BadViewMap:
# print e
pass
# the situation can be rescued by picking one of the inputs and
# pretending that it is aliased to both the outputs.
......
......@@ -365,14 +365,11 @@ def test_duallinker_mismatch():
# (purposely) wrong
assert PerformLinker().accept(g).make_function()(1.0, 2.0, 3.0) == -10.0
try:
with pytest.raises(MyExc):
# this runs OpWiseCLinker and PerformLinker in parallel and feeds
# variables of matching operations to _my_checker to verify that they
# are the same.
fn(1.0, 2.0, 3.0)
raise Exception("An exception should have been raised here!")
except MyExc:
pass
################################
......@@ -407,12 +404,8 @@ def test_c_fail_error():
e = add_fail(mul(x, y), mul(y, z))
lnk = OpWiseCLinker().accept(Env([y, z], [e]))
fn = lnk.make_function()
try:
with pytest.raises(RuntimeError):
fn(1.5, 3.0)
except RuntimeError:
print("Yay, TEST PASSED")
return # test passed
assert 0 # test failed
def test_shared_input_output():
......
from __future__ import absolute_import, print_function, division
import pytest
from theano.compat import exc_message
from theano.gof.optdb import opt, DB
class TestDB:
def test_0(self):
def test_name_clashes(self):
class Opt(opt.Optimizer): # inheritance buys __hash__
name = "blah"
......@@ -20,35 +20,11 @@ class TestDB:
assert "b" in db
assert "c" in db
try:
with pytest.raises(ValueError, match=r"The name.*"):
db.register("c", Opt()) # name taken
self.fail()
except ValueError as e:
if exc_message(e).startswith("The name"):
pass
else:
raise
except Exception:
self.fail()
try:
with pytest.raises(ValueError, match=r"The name.*"):
db.register("z", Opt()) # name collides with tag
self.fail()
except ValueError as e:
if exc_message(e).startswith("The name"):
pass
else:
raise
except Exception:
self.fail()
try:
with pytest.raises(ValueError, match=r"The tag.*"):
db.register("u", Opt(), "b") # name new but tag collides with name
self.fail()
except ValueError as e:
if exc_message(e).startswith("The tag"):
pass
else:
raise
except Exception:
self.fail()
......@@ -187,38 +187,31 @@ def makeTester(
raise
for i, (variable, expected) in enumerate(izip(variables, expecteds)):
if (
condition = (
variable.dtype != expected.dtype
or variable.shape != expected.shape
or not TensorType.values_eq_approx(variable, expected)
):
self.fail(
(
"Test %s::%s: Output %s gave the wrong "
"value. With inputs %s, expected %s "
"(dtype %s), got %s (dtype %s)."
% (
self.op,
testname,
i,
inputs,
expected,
expected.dtype,
variable,
variable.dtype,
)
)
)
assert not condition, (
"Test %s::%s: Output %s gave the wrong "
"value. With inputs %s, expected %s "
"(dtype %s), got %s (dtype %s)."
% (
self.op,
testname,
i,
inputs,
expected,
expected.dtype,
variable,
variable.dtype,
)
)
for description, check in iteritems(self.checks):
if not check(inputs, variables):
self.fail(
(
"Test %s::%s: Failed check: %s "
"(inputs were %s, ouputs were %s)"
)
% (self.op, testname, description, inputs, variables)
)
assert check(inputs, variables), (
"Test %s::%s: Failed check: %s " "(inputs were %s, ouputs were %s)"
) % (self.op, testname, description, inputs, variables)
Checker.__name__ = name
if hasattr(Checker, "__qualname__"):
......
......@@ -529,42 +529,35 @@ def makeTester(
expecteds = (expecteds,)
for i, (variable, expected) in enumerate(izip(variables, expecteds)):
if (
condition = (
variable.dtype != expected.dtype
or variable.shape != expected.shape
or not np.allclose(variable, expected, atol=eps, rtol=eps)
):
self.fail(
(
"Test %s::%s: Output %s gave the wrong"
" value. With inputs %s, expected %s (dtype %s),"
" got %s (dtype %s). eps=%f"
" np.allclose returns %s %s"
)
% (
self.op,
testname,
i,
inputs,
expected,
expected.dtype,
variable,
variable.dtype,
eps,
np.allclose(variable, expected, atol=eps, rtol=eps),
np.allclose(variable, expected),
)
)
)
assert not condition, (
"Test %s::%s: Output %s gave the wrong"
" value. With inputs %s, expected %s (dtype %s),"
" got %s (dtype %s). eps=%f"
" np.allclose returns %s %s"
) % (
self.op,
testname,
i,
inputs,
expected,
expected.dtype,
variable,
variable.dtype,
eps,
np.allclose(variable, expected, atol=eps, rtol=eps),
np.allclose(variable, expected),
)
for description, check in iteritems(self.checks):
if not check(inputs, variables):
self.fail(
(
"Test %s::%s: Failed check: %s (inputs"
" were %s, outputs were %s)"
)
% (self.op, testname, description, inputs, variables)
)
assert check(inputs, variables), (
"Test %s::%s: Failed check: %s (inputs"
" were %s, outputs were %s)"
) % (self.op, testname, description, inputs, variables)
def test_bad_build(self):
if skip:
......@@ -4080,10 +4073,12 @@ class TestMinMax:
n = as_tensor_variable(data)
assert min(n).dtype == "bool"
i = eval_outputs(min(n))
assert i is False
assert i.ndim == 0
assert not np.any(i)
assert max(n).dtype == "bool"
i = eval_outputs(max(n))
assert i is True
assert i.ndim == 0
assert np.all(i)
def test_basic_allclose():
......@@ -5353,10 +5348,8 @@ class TestDivimpl:
class TestMean:
def test_regression_mean_of_ndarray_failure(self):
try:
tensor.mean(np.zeros(1))
except AttributeError:
self.fail()
# This shouldn't throw an `AttributeError` (or any other, for that matter)
tensor.mean(np.zeros(1))
def test_mean_f16(self):
x = tensor.vector(dtype="float16")
......
......@@ -482,13 +482,8 @@ class TestCAReduce(unittest_tools.InferShapeTester):
% str(scalar_op)
)
if scalar_op in [scalar.maximum, scalar.minimum] and numpy_raised:
try:
out = f(xv)
assert out.dtype == dtype
except ValueError:
pass
else:
self.fail()
with pytest.raises(ValueError):
f(xv)
else:
if test_nan:
try:
......
......@@ -142,25 +142,15 @@ class TestSubtensor(utt.OptimizationTestMixin):
oldlevel = _logger.level
_logger.setLevel(logging.CRITICAL)
try:
try:
with pytest.raises(IndexError):
self.eval_output_and_check(t)
except IndexError:
return
self.fail()
finally:
_logger.setLevel(oldlevel)
def test_err_subslice(self):
n = self.shared(np.ones(3, dtype=self.dtype))
try:
with pytest.raises(Exception):
n[slice(0, slice(1, 2, None), None)]
except Exception:
# Relax constraint on the type of Exception,
# since this might be handled by AvancedSubtensor
# if e[0] != Subtensor.e_indextype:
# raise
return
self.fail()
def test_ok_range_finite(self):
n = self.shared(np.arange(3, dtype=self.dtype))
......@@ -1095,24 +1085,12 @@ class TestSubtensor(utt.OptimizationTestMixin):
a = fscalar()
b = fscalar()
c = vector()
try:
with pytest.raises(TypeError):
c[a:b]
except NotImplementedError:
self.fail()
except TypeError:
pass
try:
with pytest.raises(TypeError):
c[a:]
except NotImplementedError:
self.fail()
except TypeError:
pass
try:
with pytest.raises(TypeError):
c[:b]
except NotImplementedError:
self.fail()
except TypeError:
pass
@pytest.mark.slow
def test_grad_list(self):
......
......@@ -77,18 +77,8 @@ class RopLopChecker:
If your op is not differentiable(so you can't define Rop)
test that an error is raised.
"""
raised = False
try:
with pytest.raises(ValueError):
tensor.Rop(y, self.x, self.v)
except ValueError:
raised = True
if not raised:
self.fail(
(
"Op did not raise an error even though the function"
" is not differentiable"
)
)
def check_mat_rop_lop(self, y, out_shape):
"""
......@@ -162,10 +152,13 @@ class RopLopChecker:
v1 = rop_f(vx, vv)
v2 = scan_f(vx, vv)
assert np.allclose(v1, v2), "ROP mismatch: %s %s" % (v1, v2)
known_fail = False
try:
self.check_nondiff_rop(theano.clone(y, replace={self.x: break_op(self.x)}))
except AssertionError:
tensor.Rop(
theano.clone(y, replace={self.x: break_op(self.x)}), self.x, self.v
)
except ValueError:
known_fail = True
# TEST LOP
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论