提交 027566e1 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #1723 from abergeron/tag_slow

Tag slow tests
......@@ -885,9 +885,9 @@ if 0: # old code still to be ported from ProfileMode
print 'Theano fct call %.3fs %.1f%%' % (total_fct_time,
total_fct_time / total_time *
100)
print ' Theano Op time (included in fct call, Time spent running thunks) %.3fs %.1f%%(of total) %.1f%%(of fct call)' % (local_time,
local_time / total_time * 100,
time_pr_in_fct)
print (' Theano Op time (included in fct call, Time spent '
'running thunks) %.3fs %.1f%%(of total) %.1f%%(of fct call)' %
(local_time, local_time / total_time * 100, time_pr_in_fct))
print 'Other time since import %.3fs %.1f%%'%(other_time,other_time/total_time*100)
print '%i Theano fct call, %.3fs per call'%(total_fct_call, time_per_call)
......
......@@ -2,6 +2,7 @@
import unittest
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
import theano
from theano.gof.link import PerformLinker
......@@ -191,6 +192,7 @@ def test_clinker_straightforward():
assert fn(2.0, 2.0, 2.0) == 2.0
@attr('slow')
def test_clinker_literal_inlining():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
......@@ -206,6 +208,7 @@ def test_clinker_literal_inlining():
assert "4.12345678" in code # we expect the number to be inlined
@attr('slow')
def test_clinker_single_node():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
......@@ -216,6 +219,7 @@ def test_clinker_single_node():
assert fn(2.0, 7.0) == 9
@attr('slow')
def test_clinker_dups():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
......@@ -228,18 +232,19 @@ def test_clinker_dups():
# note: for now the behavior of fn(2.0, 7.0) is undefined
@attr('slow')
def test_clinker_not_used_inputs():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
# Testing that duplicate inputs are allowed.
# Testing that unused inputs are allowed.
x, y, z = inputs()
e = add(x, y)
lnk = CLinker().accept(Env([x, y, z], [e]))
fn = lnk.make_function()
assert fn(2.0, 1.5, 1.0) == 3.5
# note: for now the behavior of fn(2.0, 7.0) is undefined
@attr('slow')
def test_clinker_dups_inner():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
......@@ -255,6 +260,7 @@ def test_clinker_dups_inner():
# Test OpWiseCLinker #
######################
# slow on linux, but near sole test and very central
def test_opwiseclinker_straightforward():
x, y, z = inputs()
e = add(mul(add(x, y), div(x, y)), bad_sub(bad_sub(x, y), z))
......@@ -267,6 +273,7 @@ def test_opwiseclinker_straightforward():
assert fn(2.0, 2.0, 2.0) == -6
@attr('slow')
def test_opwiseclinker_constant():
x, y, z = inputs()
x = Constant(tdouble, 7.2, name='x')
......@@ -300,6 +307,7 @@ def test_duallinker_straightforward():
assert res == 15.3
@attr('slow')
def test_duallinker_mismatch():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
......
......@@ -34,6 +34,7 @@ from theano.sandbox.linalg.ops import (cholesky,
)
from theano.sandbox.linalg import eig, eigh
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
def check_lower_triangular(pd, ch_f):
......@@ -93,6 +94,7 @@ def test_cholesky_grad():
rng, eps=eps))
@attr('slow')
def test_cholesky_and_cholesky_grad_shape():
if not imported_scipy:
raise SkipTest("Scipy needed for the Cholesky op.")
......@@ -395,6 +397,7 @@ class test_diag(unittest.TestCase):
x = rng.rand(5, 4).astype(self.floatX)
tensor.verify_grad(extract_diag, [x], rng=rng)
@attr('slow')
def test_extract_diag_empty(self):
c = self.shared(numpy.array([[], []], self.floatX))
f = theano.function([], extract_diag(c), mode=self.mode)
......
......@@ -16,6 +16,7 @@ if cuda_available:
import unittest
from theano.tests import unittest_tools as utt
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
#TODO: test gpu
# Done in test_consistency_GPU_{serial,parallel}
......@@ -445,6 +446,7 @@ def test_uniform():
allow_01=True, inputs=input)
@attr('slow')
def test_binomial():
#TODO: test size=None, ndim=X
#TODO: test size=X, ndim!=X.ndim
......@@ -532,6 +534,7 @@ def test_binomial():
inputs=input, target_avg=mean, mean_rtol=rtol)
@attr('slow')
def test_normal0():
steps = 50
......
......@@ -8,6 +8,7 @@ import unittest
import cPickle
import numpy
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
from numpy.testing import dec
import theano
......@@ -1525,6 +1526,7 @@ class T_Scan(unittest.TestCase):
analytic_grad[max_err_pos],
num_grad.gx[max_err_pos]))
@attr('slow')
def test_grad_multiple_outs_taps(self):
l = 5
rng = numpy.random.RandomState(utt.fetch_seed())
......@@ -1618,6 +1620,7 @@ class T_Scan(unittest.TestCase):
analytic_grad[max_err_pos],
num_grad.gx[max_err_pos]))
@attr('slow')
def test_grad_multiple_outs_taps_backwards(self):
l = 5
rng = numpy.random.RandomState(utt.fetch_seed())
......@@ -2599,6 +2602,7 @@ class T_Scan(unittest.TestCase):
f2 = theano.function([], gx)
utt.assert_allclose(f2(), numpy.ones((10,)))
@attr('slow')
def test_rop2(self):
seed = utt.fetch_seed()
rng = numpy.random.RandomState(seed)
......@@ -3659,6 +3663,7 @@ class T_Scan(unittest.TestCase):
inp = scan_node.op.outer_non_seqs(scan_node)
assert len(inp) == 1
@attr('slow')
def test_hessian_bug_grad_grad_two_scans(self):
#Bug reported by Bitton Tenessi
......
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
import sys
import time
import unittest
......@@ -128,6 +129,7 @@ class TestSP(unittest.TestCase):
#profmode.print_summary()
@attr('slow')
def test_sparse(self):
# print '\n\n*************************************************'
......
......@@ -1226,6 +1226,7 @@ class UsmmTests(unittest.TestCase):
self.z = numpy.asarray(self.rng.uniform(-1, 1, z_size),
dtype=theano.config.floatX)
# this is slow, but it's the only test for the op.
def test(self):
def mat(format, name, dtype):
if format == 'dense':
......@@ -2069,6 +2070,7 @@ class CastTester(utt.InferShapeTester):
def setUp(self):
super(CastTester, self).setUp()
# slow but only test
def test_cast(self):
for format in sparse.sparse_formats:
for i_dtype in sparse.all_dtypes:
......
import time
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
import numpy
import theano
......@@ -246,6 +247,7 @@ class TestConv2D(utt.InferShapeTester):
N_image_shape=(2, 3, 3, 3), N_filter_shape=(5, 3, 2, 2),
should_raise=True)
@attr('slow')
def test_subsample(self):
"""
Tests convolution where subsampling != (1,1)
......@@ -282,6 +284,7 @@ class TestConv2D(utt.InferShapeTester):
(3, 2, 8, 8), (4, 3, 5, 5),
'valid')
@attr('slow')
def test_invalid_input_shape(self):
"""
Tests that when the shape gived at build time is not the same as
......
......@@ -12,6 +12,7 @@ import theano.sparse
if theano.sparse.enable_sparse:
from scipy import sparse
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
floatX = theano.config.floatX
......@@ -222,6 +223,7 @@ class TestConv3D(utt.InferShapeTester):
self.randomize()
self.check_c_against_python(self.V.get_value(borrow=True).shape[1:4])
@attr('slow')
def test_c_against_mat_mul(self):
# Use a filter of the same size as the image, so the convolution is
# just a dense matrix multiply.
......
......@@ -13,6 +13,7 @@ import __builtin__
builtin_min = __builtin__.min
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
import numpy
from numpy.testing import dec, assert_array_equal, assert_allclose
from numpy.testing.noseclasses import KnownFailureTest
......@@ -1996,6 +1997,7 @@ class TestAlloc(unittest.TestCase):
numpy.zeros(shp))
# This is slow for the ('int8', 3) version.
def test_eye():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
......@@ -4050,6 +4052,7 @@ class t_dot(unittest.TestCase):
utt.verify_grad(dot, [rand(2, 3, 4), rand(4, 5)])
utt.verify_grad(dot, [rand(2, 3, 4), rand(3, 4, 5)])
@attr('slow')
def test_broadcastable_patterns(self):
#
......
......@@ -6,6 +6,8 @@ from numpy import (arange, array, common_type, complex64, complex128, float32,
float64, newaxis, shape, transpose, zeros)
from numpy.testing import assert_array_almost_equal
from nose.plugins.attrib import attr
import theano
import theano.tensor as T
from theano import tensor, Param, shared, config
......@@ -855,6 +857,7 @@ def test_dot22():
cmp((0, 0), (0, 0))
@attr('slow')
def test_dot22scalar():
## including does not seem to work for 'local_dot_to_dot22' and
## 'local_dot22_to_dot22scalar'
......@@ -1179,6 +1182,7 @@ class TestGemv(TestCase, unittest_tools.TestOptimizationMixin):
assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
@attr('slow')
def test_gemv1(self):
self.t_gemv1((3, 2))
self.t_gemv1((0, 2))
......
......@@ -5,6 +5,7 @@ import unittest
import numpy
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
import theano
from theano.gof.python25 import all, any
......@@ -470,6 +471,7 @@ class test_CAReduce(unittest_tools.InferShapeTester):
self.with_linker(gof.PerformLinker(), scalar.and_, dtype=dtype,
test_nan=True, tensor_op=tensor.all)
@attr('slow')
def test_c(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
......@@ -629,6 +631,7 @@ class test_Prod(unittest.TestCase):
fn_a0 = theano.function([x], pwz_a0, mode=self.mode)
assert numpy.allclose(fn_a0(x_val), [1, 10, 162])
@attr('slow')
def test_other_grad_tests(self):
x = theano.tensor.dmatrix()
x_val1 = numpy.array([[1, 2, 3], [0, 5, 6], [0, 0, 9]],
......@@ -760,6 +763,7 @@ class T_sum_dtype(unittest.TestCase):
data = data.astype(dtype)
f(data)
@attr('slow')
def test_sum_custom_dtype(self):
"""
Test the ability to provide your own output dtype for a sum.
......@@ -860,6 +864,7 @@ class T_mean_dtype(unittest.TestCase):
data = data.astype(dtype)
f(data)
@attr('slow')
def test_mean_custom_dtype(self):
"""
Test the ability to provide your own output dtype for a mean.
......@@ -967,6 +972,7 @@ class T_prod_dtype(unittest.TestCase):
data = data.astype(dtype)
f(data)
@attr('slow')
def test_prod_custom_dtype(self):
"""
Test the ability to provide your own output dtype for a prod.
......@@ -998,6 +1004,7 @@ class T_prod_dtype(unittest.TestCase):
tensor.grad(prod_var.sum(), x,
disconnected_inputs='ignore')
@attr('slow')
def test_prod_custom_acc_dtype(self):
"""
Test the ability to provide your own acc_dtype for a prod.
......@@ -1086,6 +1093,7 @@ class T_prod_without_zeros_dtype(unittest.TestCase):
data = data.astype(dtype)
f(data)
@attr('slow')
def test_prod_without_zeros_custom_dtype(self):
"""
Test ability to provide your own output dtype for a ProdWithoutZeros().
......@@ -1109,6 +1117,7 @@ class T_prod_without_zeros_dtype(unittest.TestCase):
data = data.astype(input_dtype)
f(data)
@attr('slow')
def test_prod_without_zeros_custom_acc_dtype(self):
"""
Test ability to provide your own acc_dtype for a ProdWithoutZeros().
......
......@@ -407,13 +407,6 @@ class TestBartlett(utt.InferShapeTester):
self._compile_and_check([x], [self.op(x)], [1], self.op_class)
if __name__ == "__main__":
t = TestBartlett('setUp')
t.setUp()
t.test_perform()
t.test_infer_shape()
class TestFillDiagonal(utt.InferShapeTester):
rng = numpy.random.RandomState(43)
......@@ -470,11 +463,3 @@ class TestFillDiagonal(utt.InferShapeTester):
numpy.random.rand()],
self.op_class,
warn=False)
if __name__ == "__main__":
utt.unittest.main()
t = TestFillDiagonal('setUp')
t.setUp()
t.test_perform()
t.test_gradient()
t.test_infer_shape()
from nose.plugins.attrib import attr
import numpy
from theano import tensor, function
import unittest
class TestKeepDims:
# this tests other ops to ensure they keep the dimensions of their
# inputs correctly
class TestKeepDims(unittest.TestCase):
def makeKeepDims_local(self, x, y, axis):
x = tensor.as_tensor_variable(x)
......@@ -28,6 +33,7 @@ class TestKeepDims:
return tensor.DimShuffle(y.type.broadcastable, new_dims)(y)
@attr('slow')
def test_keepdims(self):
x = tensor.dtensor3()
......@@ -95,7 +101,3 @@ class TestKeepDims:
assert numpy.allclose(keep_param(a), keep_synth(a))
assert keep_param(a).shape == keep_synth(a).shape
if __name__ == '__main__':
TestKeepDims().test_keepdims()
......@@ -12,6 +12,8 @@ import sys
from theano.tests import unittest_tools
from numpy.testing.noseclasses import KnownFailureTest
from nose.plugins.attrib import attr
def cross_entropy(target, output, axis=1):
"""
......@@ -557,6 +559,7 @@ def create_realistic(window_size=3, # 7,
return model
@attr('slow')
def test_naacl_model(iters_per_unsup=3, iters_per_sup=3,
optimizer=None, realistic=False):
#print "BUILDING MODEL"
......@@ -643,13 +646,7 @@ def jtest_main():
test_naacl_model(optimizer, 10, 10, realistic=False)
def real_main():
test_naacl_model()
def profile_main():
# This is the main function for profiling
# We've renamed our original main() above to real_main()
import cProfile
import pstats
from theano.compat.six import StringIO
......@@ -664,5 +661,4 @@ def profile_main():
# stats.print_callers()
if __name__ == '__main__':
#real_main()
profile_main()
......@@ -10,6 +10,7 @@ import unittest
import numpy
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
from numpy.testing import dec
from numpy.testing.noseclasses import KnownFailureTest
......@@ -338,13 +339,11 @@ class test_canonize(unittest.TestCase):
fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')
fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')
fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')
fvv = theano._asarray(numpy.random.rand(shp[0]), dtype=
'float32').reshape(1, shp[0])
fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])
dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')
dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')
dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')
dvv = theano._asarray(numpy.random.rand(shp[0]), dtype=
'float64').reshape(1, shp[0])
dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])
cases = [
(fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),
(fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),
......@@ -422,6 +421,7 @@ class test_canonize(unittest.TestCase):
assert(len(f.maker.fgraph.toposort()) == nb_elemwise)
assert(out_dtype == out.dtype)
@attr('slow')
def test_multiple_case(self):
""" test those case take from the comment in Canonizer
x / x -> 1
......@@ -445,14 +445,12 @@ class test_canonize(unittest.TestCase):
fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')
fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')
fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')
fvv = theano._asarray(numpy.random.rand(shp[0]), dtype=
'float32').reshape(1, shp[0])
fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])
dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')
dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')
dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')
dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')
dvv = theano._asarray(numpy.random.rand(shp[0]), dtype=
'float64').reshape(1, shp[0])
dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])
#We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
......@@ -707,8 +705,7 @@ class test_canonize(unittest.TestCase):
dxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')
dyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')
dzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')
fvv = theano._asarray(numpy.random.rand(shp[0]), dtype=
'float32').reshape(1, shp[0])
fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])
#We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode = compile.mode.get_default_mode()
......@@ -1152,6 +1149,7 @@ class test_fusion(unittest.TestCase):
'local_elemwise_fusion', 'canonicalize')
self.do(mode, shared, shp)
@attr('slow')
def test_elemwise_fusion_4d(self):
shp = (3, 3, 3, 3)
mode = copy.copy(compile.mode.get_default_mode())
......@@ -1176,6 +1174,7 @@ class test_fusion(unittest.TestCase):
self.do(mode, cuda.float32_shared_constructor, shp, gpu=True)
@attr('slow')
def test_gpu_fusion_Xd(self):
#we need the optimisation enabled, debug do this.
if theano.config.mode == "FAST_COMPILE":
......@@ -1229,8 +1228,10 @@ class test_fusion(unittest.TestCase):
mode2 = copy.copy(compile.get_default_mode())
mode2._optimizer = mode2._optimizer.excluding('local_elemwise_fusion')
print "test with linker", str(mode1.linker)
times1 = self.do(mode1, shared_fn, shp, gpu=gpu, nb_repeat= nb_repeat, assert_len_topo=False, slice=s)
times2 = self.do(mode2, shared_fn, shp, gpu=gpu, nb_repeat= nb_repeat, assert_len_topo=False, slice=s)
times1 = self.do(mode1, shared_fn, shp, gpu=gpu, nb_repeat=nb_repeat,
assert_len_topo=False, slice=s)
times2 = self.do(mode2, shared_fn, shp, gpu=gpu, nb_repeat=nb_repeat,
assert_len_topo=False, slice=s)
print "times1 with local_elemwise_fusion"
print times1, times1.min(), times1.max(), times1.sum()
print "times2 without local_elemwise_fusion"
......@@ -1268,11 +1269,12 @@ class test_fusion(unittest.TestCase):
linker = gof.OpWiseCLinker
mode = compile.Mode(linker(), copy.copy(compile.mode.OPT_FAST_RUN))
mode = compile.ProfileMode()
print "time", self.do(mode, shared, shp=(1000, 1000), gpu=
False, assert_len_topo=False, slice=s, nb_repeat=100)
print "time", self.do(mode, shared, shp=(1000, 1000), gpu=False,
assert_len_topo=False, slice=s, nb_repeat=100)
def tes_memory_leak(self, mode=compile.mode.Mode('c', 'merge'), shared_fn=shared, shp=(3000,3000), gpu=False, nb_repeat=30, assert_len_topo=True, slice=None):
def tes_memory_leak(self, mode=compile.mode.Mode('c', 'merge'),
shared_fn=shared, shp=(3000,3000), gpu=False,
nb_repeat=30, assert_len_topo=True, slice=None):
"""
param shared_fn: if None, will use compile.function
verify that the elemwise fusion work
......@@ -1873,6 +1875,7 @@ class test_local_subtensor_merge(unittest.TestCase):
self.assertRaises(IndexError, f, x_val, idx)
self.assertRaises(IndexError, g, x_val, idx)
@attr('slow')
def test_const2(self):
# var[::-1][const] -> var[-1]
x = tensor.matrix('x')
......@@ -2155,6 +2158,7 @@ class test_local_subtensor_merge(unittest.TestCase):
print 'shape: %s' % (x_s,)
print '%% OK: %f' % (float(n_ok) * 100 / (n_ok + n_index_err))
@attr('slow')
def test_none_slice(self):
# Test case of two slices, var[b1:e1:s1][b2:e2:s2]
# where any of the b, e, and s can be None
......@@ -3175,6 +3179,7 @@ class T_local_switch_sink(unittest.TestCase):
resm[idx])).sum() == self.resm[idx].size
idx += 1
@attr('slow')
def test_local_div_switch_sink(self):
c = T.dscalar()
idx = 0
......
......@@ -4,6 +4,7 @@ import sys
import unittest
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
import numpy
import theano
......@@ -499,6 +500,7 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
self.assertTrue(isinstance(topo_[0].op, self.adv_sub1))
self.assertTrue(numpy.allclose(f([0]), ones[0] * 5))
@attr('slow')
def test_shape_i_const(self):
# Each axis is treated independently by shape_i/shape operators
......@@ -756,6 +758,7 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
except TypeError:
pass
@attr('slow')
def test_grad_list(self):
data = rand(4)
data = numpy.asarray(data, dtype=self.dtype)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论