提交 ad23d387 authored 作者: Reyhane Askari's avatar Reyhane Askari

expectedFailure decorator added to failing tests with fast flag

上级 8fbf31d6
......@@ -11,6 +11,7 @@ from theano.gof.opt import (OpKeyOptimizer, PatternSub, NavigatorOptimizer,
from theano.gof import destroyhandler
from theano.gof.fg import FunctionGraph, InconsistencyError
from theano.gof.toolbox import ReplaceValidate
from theano.tests.unittest_tools import expectedFailure_fast
from theano.configparser import change_flags
......@@ -169,6 +170,7 @@ def test_misc():
######################
@expectedFailure_fast
def test_aliased_inputs_replacement():
x, y, z = inputs()
tv = transpose_view(x)
......@@ -200,6 +202,7 @@ def test_indestructible():
consistent(g)
@expectedFailure_fast
def test_usage_loop_through_views_2():
x, y, z = inputs()
e0 = transpose_view(transpose_view(sigmoid(x)))
......@@ -210,6 +213,7 @@ def test_usage_loop_through_views_2():
inconsistent(g) # we cut off the path to the sigmoid
@expectedFailure_fast
def test_destroyers_loop():
# AddInPlace(x, y) and AddInPlace(y, x) should not coexist
x, y, z = inputs()
......@@ -259,6 +263,7 @@ def test_aliased_inputs2():
inconsistent(g)
@expectedFailure_fast
def test_aliased_inputs_tolerate():
x, y, z = inputs()
e = add_in_place_2(x, x)
......@@ -273,6 +278,7 @@ def test_aliased_inputs_tolerate2():
inconsistent(g)
@expectedFailure_fast
def test_same_aliased_inputs_ignored():
x, y, z = inputs()
e = add_in_place_3(x, x)
......@@ -280,6 +286,7 @@ def test_same_aliased_inputs_ignored():
consistent(g)
@expectedFailure_fast
def test_different_aliased_inputs_ignored():
x, y, z = inputs()
e = add_in_place_3(x, transpose_view(x))
......@@ -314,6 +321,7 @@ def test_indirect():
inconsistent(g)
@expectedFailure_fast
def test_indirect_2():
x, y, z = inputs()
e0 = transpose_view(x)
......@@ -325,6 +333,7 @@ def test_indirect_2():
consistent(g)
@expectedFailure_fast
def test_long_destroyers_loop():
x, y, z = inputs()
e = dot(dot(add_in_place(x, y),
......@@ -366,6 +375,7 @@ def test_multi_destroyers():
pass
@expectedFailure_fast
def test_multi_destroyers_through_views():
x, y, z = inputs()
e = dot(add(transpose_view(z), y), add(z, x))
......@@ -408,6 +418,7 @@ def test_usage_loop_through_views():
consistent(g)
@expectedFailure_fast
def test_usage_loop_insert_views():
x, y, z = inputs()
e = dot(add_in_place(x, add(y, z)),
......@@ -442,6 +453,7 @@ def test_value_repl_2():
consistent(g)
@expectedFailure_fast
def test_multiple_inplace():
# this tests issue #5223
# there were some problems with Ops that have more than
......
......@@ -1754,6 +1754,7 @@ def test_without_dnn_batchnorm_train_without_running_averages():
f_abstract(X, Scale, Bias, Dy)
@utt.expectedFailure_fast
def test_dnn_batchnorm_train_inplace():
# test inplace_running_mean and inplace_running_var
if not dnn.dnn_available(test_ctx_name):
......@@ -1876,6 +1877,7 @@ def test_batchnorm_inference():
utt.assert_allclose(outputs_abstract[5], outputs_ref[5], rtol=2e-3, atol=4e-5) # dvar
@utt.expectedFailure_fast
def test_batchnorm_inference_inplace():
# test inplace
if not dnn.dnn_available(test_ctx_name):
......
......@@ -175,6 +175,7 @@ class TestGpuCholesky(unittest.TestCase):
GpuCholesky(lower=True, inplace=False)(A)
self.assertRaises(AssertionError, invalid_input_func)
@utt.expectedFailure_fast
def test_diag_chol(self):
# Diagonal matrix input Cholesky test.
for lower in [True, False]:
......@@ -183,6 +184,7 @@ class TestGpuCholesky(unittest.TestCase):
A_val = np.diag(np.random.uniform(size=5).astype("float32") + 1)
self.compare_gpu_cholesky_to_np(A_val, lower=lower, inplace=inplace)
@utt.expectedFailure_fast
def test_dense_chol_lower(self):
# Dense matrix input lower-triangular Cholesky test.
for lower in [True, False]:
......
......@@ -582,6 +582,7 @@ def test_no_complex():
mode=mode_with_gpu)
@utt.expectedFailure_fast
def test_local_lift_solve():
if not cusolver_available:
raise SkipTest('No cuSolver')
......@@ -616,6 +617,7 @@ def test_gpu_solve_not_inplace():
utt.assert_allclose(f_cpu(A_val, b_val), f_gpu(A_val, b_val))
@utt.expectedFailure_fast
def test_local_lift_cholesky():
if not cusolver_available:
raise SkipTest('No cuSolver')
......
......@@ -3201,6 +3201,7 @@ import theano.tensor.tests.test_sharedvar
theano_fct_=lambda a: dense_from_sparse(a * 2.),
ref_fct_=lambda a: np.asarray((a * 2).todense()),
cast_value_=scipy.sparse.csr_matrix,
expect_fail_fast_shape_inplace=False,
)
class test_shared_options(object):
pass
......
from __future__ import absolute_import, print_function, division
import unittest
import theano
from theano import tensor
from theano.gof.opt import check_stack_trace
......@@ -25,6 +26,9 @@ def test_blocksparse_inplace_gemv_opt():
assert f.maker.fgraph.toposort()[-1].op.inplace
assert check_stack_trace(f, ops_to_check=[sparse_block_gemv_inplace])
if theano.config.cycle_detection == 'fast' and theano.config.mode != 'FAST_COMPILE':
test_blocksparse_inplace_gemv_opt = unittest.expectedFailure(test_blocksparse_inplace_gemv_opt)
def test_blocksparse_inplace_outer_opt():
b = tensor.fmatrix()
......
......@@ -4805,6 +4805,9 @@ class T_exp(unittest.TestCase):
np.asarray([[1.5089518, 1.48439076, -4.7820262],
[2.04832468, 0.50791564, -1.58892269]])])
if theano.config.cycle_detection == 'fast' and theano.config.mode != 'FAST_COMPILE':
test_grad_1 = unittest.expectedFailure(test_grad_1)
def test_int(self):
x = ivector()
f = function([x], exp(x))
......
......@@ -500,6 +500,7 @@ def just_gemm(i, o, ishapes=[(4, 3), (3, 5), (4, 5), (), ()],
raise
@unittest_tools.expectedFailure_fast
def test_gemm_opt0():
# Many subgraphs whose dots can be eliminated
X, Y, Z, a, b = XYZab()
......
......@@ -1989,6 +1989,7 @@ class test_local_subtensor_lift(unittest.TestCase):
assert len(prog) == 3
f([4, 5]) # let debugmode test something
@utt.expectedFailure_fast
def test4(self):
# basic test that the optimization doesn't work with broadcasting
# ... It *could* be extended to,
......
......@@ -27,6 +27,7 @@ def makeSharedTester(shared_constructor_,
theano_fct_,
ref_fct_,
cast_value_=np.asarray,
expect_fail_fast_shape_inplace=True,
):
"""
This is a generic fct to allow reusing the same test function
......@@ -549,6 +550,10 @@ def makeSharedTester(shared_constructor_,
assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
assert all(node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm))
assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
if theano.config.cycle_detection == 'fast' and expect_fail_fast_shape_inplace and theano.config.mode != 'FAST_COMPILE':
test_specify_shape_inplace = unittest.expectedFailure(test_specify_shape_inplace)
def test_values_eq(self):
""" Test the type.values_eq[_approx] function"""
dtype = self.dtype
......
......@@ -447,8 +447,11 @@ class AttemptManyTimes:
return attempt_multiple_times
def expectedFailure_fast():
def expectedFailure_fast(f):
"""A Decorator to handle the test cases that are failing when
THEANO_FALGS =cycle_detection='fast'.
THEANO_FLAGS =cycle_detection='fast'.
"""
return unittest.expectedFailure if theano.config.cycle_detection == 'fast' else lambda x: x
if theano.config.cycle_detection == 'fast':
return unittest.expectedFailure(f)
else:
return f
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论