提交 a09228b5 authored 作者: Frederic Bastien's avatar Frederic Bastien

Skip many tests when there is no compiler.

上级 a37a03f1
...@@ -18,6 +18,8 @@ def test_no_output_from_implace(): ...@@ -18,6 +18,8 @@ def test_no_output_from_implace():
op = fct_no_opt.maker.fgraph.outputs[0].owner.op op = fct_no_opt.maker.fgraph.outputs[0].owner.op
assert (hasattr(op, 'destroy_map') and 0 in op.destroy_map) assert (hasattr(op, 'destroy_map') and 0 in op.destroy_map)
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
# Ensure that the elemwise op that produces the output is not inplace when # Ensure that the elemwise op that produces the output is not inplace when
# using a mode that includes the optimization # using a mode that includes the optimization
opt = AddFeatureOptimizer(NoOutputFromInplace()) opt = AddFeatureOptimizer(NoOutputFromInplace())
......
...@@ -390,6 +390,9 @@ def test_shared_input_output(): ...@@ -390,6 +390,9 @@ def test_shared_input_output():
# Test bug reported on the mailing list by Alberto Orlandi # Test bug reported on the mailing list by Alberto Orlandi
# https://groups.google.com/d/topic/theano-users/6dLaEqc2R6g/discussion # https://groups.google.com/d/topic/theano-users/6dLaEqc2R6g/discussion
# The shared variable is both an input and an output of the function. # The shared variable is both an input and an output of the function.
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
inc = theano.tensor.iscalar('inc') inc = theano.tensor.iscalar('inc')
state = theano.shared(0) state = theano.shared(0)
state.name = 'state' state.name = 'state'
......
...@@ -3,6 +3,8 @@ import os ...@@ -3,6 +3,8 @@ import os
import pickle import pickle
import unittest import unittest
from nose.plugins.skip import SkipTest
import theano import theano
from theano.compat import PY3 from theano.compat import PY3
from theano.gof import CachedConstantError, FunctionGraph from theano.gof import CachedConstantError, FunctionGraph
...@@ -32,6 +34,8 @@ class TFunctionGraph(unittest.TestCase): ...@@ -32,6 +34,8 @@ class TFunctionGraph(unittest.TestCase):
# In the past, we where removing some not used variable from # In the past, we where removing some not used variable from
# fgraph.variables event if the apply had other output used in # fgraph.variables event if the apply had other output used in
# the graph. This caused a crash. # the graph. This caused a crash.
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
# This test run the pickle that reproduce this case. # This test run the pickle that reproduce this case.
with open(os.path.join(os.path.dirname(__file__), with open(os.path.join(os.path.dirname(__file__),
......
...@@ -209,6 +209,8 @@ def test_partial_function(): ...@@ -209,6 +209,8 @@ def test_partial_function():
utt.assert_allclose(f(5), np.array([32., 16., 1.7857142857142858])) utt.assert_allclose(f(5), np.array([32., 16., 1.7857142857142858]))
check_partial_function(vm.VM_Linker(allow_partial_eval=True, use_cloop=False)) check_partial_function(vm.VM_Linker(allow_partial_eval=True, use_cloop=False))
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
check_partial_function('cvm') check_partial_function('cvm')
...@@ -223,6 +225,8 @@ def test_partial_function_with_output_keys(): ...@@ -223,6 +225,8 @@ def test_partial_function_with_output_keys():
assert f(5, output_subset=['a'])['a'] == f(5)['a'] assert f(5, output_subset=['a'])['a'] == f(5)['a']
check_partial_function_output_keys(vm.VM_Linker(allow_partial_eval=True, use_cloop=False)) check_partial_function_output_keys(vm.VM_Linker(allow_partial_eval=True, use_cloop=False))
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
check_partial_function_output_keys('cvm') check_partial_function_output_keys('cvm')
...@@ -243,6 +247,8 @@ def test_partial_function_with_updates(): ...@@ -243,6 +247,8 @@ def test_partial_function_with_updates():
assert y.get_value() == 10 assert y.get_value() == 10
check_updates(vm.VM_Linker(allow_partial_eval=True, use_cloop=False)) check_updates(vm.VM_Linker(allow_partial_eval=True, use_cloop=False))
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
check_updates('cvm') check_updates('cvm')
......
from __future__ import absolute_import, print_function, division from __future__ import absolute_import, print_function, division
import theano
from theano.scalar.basic_sympy import SymPyCCode from theano.scalar.basic_sympy import SymPyCCode
from theano.scalar.basic import floats from theano.scalar.basic import floats
import theano
from nose.plugins.skip import SkipTest
try: try:
import sympy import sympy
...@@ -15,6 +18,9 @@ xt, yt = floats('xy') ...@@ -15,6 +18,9 @@ xt, yt = floats('xy')
def test_SymPyCCode(): def test_SymPyCCode():
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
op = SymPyCCode([xs, ys], xs + ys) op = SymPyCCode([xs, ys], xs + ys)
e = op(xt, yt) e = op(xt, yt)
g = theano.gof.FunctionGraph([xt, yt], [e]) g = theano.gof.FunctionGraph([xt, yt], [e])
......
...@@ -456,6 +456,9 @@ class TestAbstractConvNoOptim(BaseTestConv2d): ...@@ -456,6 +456,9 @@ class TestAbstractConvNoOptim(BaseTestConv2d):
o = self.get_output_shape(i, f, s, b, fd) o = self.get_output_shape(i, f, s, b, fd)
mode = theano.Mode(optimizer=None) mode = theano.Mode(optimizer=None)
if not theano.config.cxx:
raise SkipTest("Need cxx to test conv2d")
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s, self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, provide_shape=provide_shape, verify_grad=True, provide_shape=provide_shape,
border_mode=b, filter_flip=flip, border_mode=b, filter_flip=flip,
...@@ -490,6 +493,9 @@ class TestCpuConv2d(BaseTestConv2d): ...@@ -490,6 +493,9 @@ class TestCpuConv2d(BaseTestConv2d):
def tcase(self, i, f, s, b, flip, provide_shape, fd=(1, 1)): def tcase(self, i, f, s, b, flip, provide_shape, fd=(1, 1)):
if fd != (1, 1): if fd != (1, 1):
raise SkipTest("No dilation implementation for basic cpu ConvOp.") raise SkipTest("No dilation implementation for basic cpu ConvOp.")
if not theano.config.cxx:
raise SkipTest("Need cxx to test conv2d")
mode = self.mode mode = self.mode
o = self.get_output_shape(i, f, s, b, fd) o = self.get_output_shape(i, f, s, b, fd)
fwd_OK = True fwd_OK = True
...@@ -679,6 +685,8 @@ class TestCpuConv3d(BaseTestConv3d): ...@@ -679,6 +685,8 @@ class TestCpuConv3d(BaseTestConv3d):
def tcase(self, i, f, s, b, flip, provide_shape, fd=(1, 1, 1)): def tcase(self, i, f, s, b, flip, provide_shape, fd=(1, 1, 1)):
if fd != (1, 1, 1): if fd != (1, 1, 1):
raise SkipTest("No dilation implementation for basic cpu Conv3D.") raise SkipTest("No dilation implementation for basic cpu Conv3D.")
if not theano.config.cxx:
raise SkipTest("Need cxx to test conv2d")
mode = self.mode mode = self.mode
o = self.get_output_shape(i, f, s, b, fd) o = self.get_output_shape(i, f, s, b, fd)
fwd_OK = True fwd_OK = True
......
...@@ -37,6 +37,8 @@ class TestCorr2D(utt.InferShapeTester): ...@@ -37,6 +37,8 @@ class TestCorr2D(utt.InferShapeTester):
:param image_shape: The constant shape info passed to corrMM. :param image_shape: The constant shape info passed to corrMM.
:param filter_shape: The constant shape info passed to corrMM. :param filter_shape: The constant shape info passed to corrMM.
""" """
if not theano.config.cxx:
raise SkipTest("Need cxx to test conv2d")
N_image_shape = [T.get_scalar_constant_value(T.as_tensor_variable(x)) N_image_shape = [T.get_scalar_constant_value(T.as_tensor_variable(x))
for x in image_shape] for x in image_shape]
N_filter_shape = [T.get_scalar_constant_value(T.as_tensor_variable(x)) N_filter_shape = [T.get_scalar_constant_value(T.as_tensor_variable(x))
...@@ -265,6 +267,8 @@ class TestCorr2D(utt.InferShapeTester): ...@@ -265,6 +267,8 @@ class TestCorr2D(utt.InferShapeTester):
def rand(shape, dtype='float64'): def rand(shape, dtype='float64'):
r = numpy.asarray(numpy.random.rand(*shape), dtype=dtype) r = numpy.asarray(numpy.random.rand(*shape), dtype=dtype)
return r * 2 - 1 return r * 2 - 1
if not theano.config.cxx:
raise SkipTest("Need cxx to test conv2d")
ops = [corr.CorrMM, corr.CorrMM_gradWeights, corr.CorrMM_gradInputs] ops = [corr.CorrMM, corr.CorrMM_gradWeights, corr.CorrMM_gradInputs]
a_shapes = [[4, 5, 6, 3], [1, 5, 6, 3], [1, 5, 6, 3]] a_shapes = [[4, 5, 6, 3], [1, 5, 6, 3], [1, 5, 6, 3]]
...@@ -288,6 +292,8 @@ class TestCorr2D(utt.InferShapeTester): ...@@ -288,6 +292,8 @@ class TestCorr2D(utt.InferShapeTester):
def test_infer_shape_forward(self): def test_infer_shape_forward(self):
if theano.config.mode == "FAST_COMPILE": if theano.config.mode == "FAST_COMPILE":
raise SkipTest("CorrMM don't work in FAST_COMPILE") raise SkipTest("CorrMM don't work in FAST_COMPILE")
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
def rand(*shape): def rand(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype='float64') r = numpy.asarray(numpy.random.rand(*shape), dtype='float64')
...@@ -319,6 +325,8 @@ class TestCorr2D(utt.InferShapeTester): ...@@ -319,6 +325,8 @@ class TestCorr2D(utt.InferShapeTester):
def test_infer_shape_gradW(self): def test_infer_shape_gradW(self):
if theano.config.mode == "FAST_COMPILE": if theano.config.mode == "FAST_COMPILE":
raise SkipTest("CorrMM don't work in FAST_COMPILE") raise SkipTest("CorrMM don't work in FAST_COMPILE")
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
def rand(*shape): def rand(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype='float64') r = numpy.asarray(numpy.random.rand(*shape), dtype='float64')
...@@ -357,6 +365,8 @@ class TestCorr2D(utt.InferShapeTester): ...@@ -357,6 +365,8 @@ class TestCorr2D(utt.InferShapeTester):
def test_infer_shape_gradI(self): def test_infer_shape_gradI(self):
if theano.config.mode == "FAST_COMPILE": if theano.config.mode == "FAST_COMPILE":
raise SkipTest("CorrMM don't work in FAST_COMPILE") raise SkipTest("CorrMM don't work in FAST_COMPILE")
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
def rand(*shape): def rand(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype='float64') r = numpy.asarray(numpy.random.rand(*shape), dtype='float64')
......
...@@ -37,6 +37,9 @@ class TestCorr3D(utt.InferShapeTester): ...@@ -37,6 +37,9 @@ class TestCorr3D(utt.InferShapeTester):
:param image_shape: The constant shape info passed to corr3dMM. :param image_shape: The constant shape info passed to corr3dMM.
:param filter_shape: The constant shape info passed to corr3dMM. :param filter_shape: The constant shape info passed to corr3dMM.
""" """
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
N_image_shape = [T.get_scalar_constant_value(T.as_tensor_variable(x)) N_image_shape = [T.get_scalar_constant_value(T.as_tensor_variable(x))
for x in image_shape] for x in image_shape]
N_filter_shape = [T.get_scalar_constant_value(T.as_tensor_variable(x)) N_filter_shape = [T.get_scalar_constant_value(T.as_tensor_variable(x))
...@@ -276,6 +279,9 @@ class TestCorr3D(utt.InferShapeTester): ...@@ -276,6 +279,9 @@ class TestCorr3D(utt.InferShapeTester):
""" """
Checks dtype upcast for Corr3dMM methods. Checks dtype upcast for Corr3dMM methods.
""" """
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
def rand(shape, dtype='float64'): def rand(shape, dtype='float64'):
r = numpy.asarray(numpy.random.rand(*shape), dtype=dtype) r = numpy.asarray(numpy.random.rand(*shape), dtype=dtype)
return r * 2 - 1 return r * 2 - 1
...@@ -302,6 +308,8 @@ class TestCorr3D(utt.InferShapeTester): ...@@ -302,6 +308,8 @@ class TestCorr3D(utt.InferShapeTester):
def test_infer_shape_forward(self): def test_infer_shape_forward(self):
if theano.config.mode == "FAST_COMPILE": if theano.config.mode == "FAST_COMPILE":
raise SkipTest("Corr3dMM don't work in FAST_COMPILE") raise SkipTest("Corr3dMM don't work in FAST_COMPILE")
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
def rand(*shape): def rand(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype='float64') r = numpy.asarray(numpy.random.rand(*shape), dtype='float64')
...@@ -333,6 +341,8 @@ class TestCorr3D(utt.InferShapeTester): ...@@ -333,6 +341,8 @@ class TestCorr3D(utt.InferShapeTester):
def test_infer_shape_gradW(self): def test_infer_shape_gradW(self):
if theano.config.mode == "FAST_COMPILE": if theano.config.mode == "FAST_COMPILE":
raise SkipTest("Corr3dMM don't work in FAST_COMPILE") raise SkipTest("Corr3dMM don't work in FAST_COMPILE")
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
def rand(*shape): def rand(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype='float64') r = numpy.asarray(numpy.random.rand(*shape), dtype='float64')
...@@ -372,6 +382,8 @@ class TestCorr3D(utt.InferShapeTester): ...@@ -372,6 +382,8 @@ class TestCorr3D(utt.InferShapeTester):
def test_infer_shape_gradI(self): def test_infer_shape_gradI(self):
if theano.config.mode == "FAST_COMPILE": if theano.config.mode == "FAST_COMPILE":
raise SkipTest("Corr3dMM don't work in FAST_COMPILE") raise SkipTest("Corr3dMM don't work in FAST_COMPILE")
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
def rand(*shape): def rand(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype='float64') r = numpy.asarray(numpy.random.rand(*shape), dtype='float64')
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论