提交 278525e1 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #2768 from nouiz/faster_test

[MRG] Faster test
......@@ -716,7 +716,8 @@ def clone_get_equiv(inputs, outputs,
return memo
def general_toposort(r_out, deps, debug_print=False):
def general_toposort(r_out, deps, debug_print=False,
_deps=None, deps_cache=None):
"""WRITEME
:note:
......@@ -727,22 +728,29 @@ def general_toposort(r_out, deps, debug_print=False):
:note:
The order of the return value list is determined by the order of nodes returned by the deps() function.
"""
deps_cache = {}
def _deps(io):
if io not in deps_cache:
d = deps(io)
if d:
if not isinstance(d, (list, OrderedSet)):
raise TypeError("Non-deterministic collections here make"
:note: deps should be provided or can be None and the caller
provide _deps and deps_cache. The second option remove a
Python function call, so is faster.
"""
if _deps is None:
deps_cache = {}
def _deps(io):
if io not in deps_cache:
d = deps(io)
if d:
if not isinstance(d, (list, OrderedSet)):
raise TypeError(
"Non-deterministic collections here make"
" toposort non-deterministic.")
deps_cache[io] = list(d)
deps_cache[io] = list(d)
else:
deps_cache[io] = d
return d
else:
deps_cache[io] = d
return d
else:
return deps_cache[io]
return deps_cache[io]
assert isinstance(r_out, (tuple, list, deque))
......@@ -786,26 +794,54 @@ def io_toposort(inputs, outputs, orderings=None):
order. no sets allowed!
"""
if orderings is None:
orderings = {}
# the inputs are used only here in the function that decides what 'predecessors' to explore
iset = set(inputs)
def deps(obj):
rval = []
if obj not in iset:
if isinstance(obj, Variable):
if obj.owner:
rval = [obj.owner]
elif isinstance(obj, Apply):
rval = list(obj.inputs)
rval.extend(orderings.get(obj, []))
else:
assert not orderings.get(obj, [])
return rval
# We build 2 functions as a speed up
deps_cache = {}
deps = None
_deps = None
if not orderings: # can be None or empty dict
# Specialized function that is faster when no ordering.
# Also include the cache in the function itself for speed up.
def _deps(obj):
if obj in deps_cache:
return deps_cache[io]
rval = []
if obj not in iset:
if isinstance(obj, Variable):
if obj.owner:
rval = [obj.owner]
elif isinstance(obj, Apply):
rval = list(obj.inputs)
if rval:
if not isinstance(rval, (list, OrderedSet)):
raise TypeError(
"Non-deterministic collections here make"
" toposort non-deterministic.")
deps_cache[obj] = list(rval)
else:
deps_cache[obj] = rval
else:
deps_cache[obj] = rval
return rval
else:
def deps(obj):
rval = []
if obj not in iset:
if isinstance(obj, Variable):
if obj.owner:
rval = [obj.owner]
elif isinstance(obj, Apply):
rval = list(obj.inputs)
rval.extend(orderings.get(obj, []))
else:
assert not orderings.get(obj, [])
return rval
topo = general_toposort(outputs, deps)
topo = general_toposort(outputs, deps=deps, _deps=_deps,
deps_cache=deps_cache)
return [o for o in topo if isinstance(o, Apply)]
......
......@@ -1969,6 +1969,7 @@ class T_Scan(unittest.TestCase):
analytic_grad = reset_rng_grad_fn(v_u, v_x0, vW_in)
utt.assert_allclose(analytic_grad[0][:2], numpy.zeros((2, 2)))
@attr('slow')
def test_grad_multiple_outs_some_disconnected(self):
# Created on Tue Oct 07 13:28:51 2014
# @author: vaneetke
......
......@@ -24,6 +24,7 @@ from theano.sparse.tests.test_basic import sparse_random_inputs
class TestSP(unittest.TestCase):
@attr('slow')
def test_convolution(self):
# print '\n\n*************************************************'
# print ' TEST CONVOLUTION'
......@@ -218,6 +219,7 @@ class TestSP(unittest.TestCase):
# print 'Theano processing time: ', ttot
# profmode.print_summary()
@attr('slow')
def test_multilayer_sparse(self):
# fixed parameters
bsize = 10 # batch size
......
import time
import unittest
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
import numpy
try:
......@@ -2347,6 +2348,7 @@ class CastTester(utt.InferShapeTester):
utt.assert_allclose(expected, t_cls)
utt.assert_allclose(expected, t_prop)
@attr('slow')
def test_infer_shape(self):
for format in sparse.sparse_formats:
for i_dtype in sparse.all_dtypes:
......
......@@ -521,6 +521,7 @@ class test_CAReduce(unittest_tools.InferShapeTester):
self.with_linker(gof.CLinker(), scalar.and_, dtype=dtype)
self.with_linker(gof.CLinker(), scalar.xor, dtype=dtype)
@attr('slow')
def test_c_nan(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
......@@ -568,6 +569,7 @@ class test_Prod(unittest.TestCase):
self.mode = mode
@attr('slow')
def test_verify_grad(self):
# including zeros, as the case with zeros is important
......@@ -624,6 +626,7 @@ class test_Prod(unittest.TestCase):
#unittest_tools.verify_grad(fn5, [x_val])
@attr('slow')
def test_prod_no_zeros_in_input(self):
x = theano.tensor.dmatrix()
x_val = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype='float32')
......
from nose.plugins.attrib import attr
import numpy as np
import numpy
import unittest
......@@ -191,6 +192,7 @@ class TestBinCountOp(utt.InferShapeTester):
assert (np.bincount(a, minlength=23) == f3(a)).all()
assert (np.bincount(a, minlength=5) == f4(a)).all()
@attr('slow')
def test_infer_shape(self):
for dtype in tensor.discrete_dtypes:
# uint64 always fails
......@@ -432,6 +434,7 @@ class TestRepeatOp(utt.InferShapeTester):
assert np.allclose(np.repeat(a, r, axis=axis),
f(a, r))
@attr('slow')
def test_infer_shape(self):
for ndim in range(4):
x = T.TensorType(config.floatX, [False] * ndim)()
......@@ -545,6 +548,7 @@ class TestFillDiagonal(utt.InferShapeTester):
assert out[2, 2, 2] == val
assert (out == val).sum() == min(a.shape)
@attr('slow')
def test_gradient(self):
utt.verify_grad(fill_diagonal, [numpy.random.rand(5, 8),
numpy.random.rand()],
......
......@@ -304,23 +304,20 @@ class test_canonize(unittest.TestCase):
# We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode = compile.mode.get_default_mode()
old_optimizer = mode._optimizer
try:
mode._optimizer = gof.Query(["canonicalize"])
mode._optimizer = mode._optimizer.excluding(
'local_elemwise_fusion')
for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
# we need the optimisation enabled, debug do this.
mode=mode)
opt = gof.Query(["canonicalize"])
opt = opt.excluding('local_elemwise_fusion')
mode = mode.__class__(linker=mode.linker, optimizer=opt)
for id, [g, sym_inputs, val_inputs,
nb_elemwise, out_dtype] in enumerate(cases):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
# we need the optimisation enabled, debug do this.
mode=mode)
out = f(*val_inputs)
assert(len(f.maker.fgraph.toposort()) == nb_elemwise)
assert(out_dtype == out.dtype)
finally:
mode._optimizer = old_optimizer
out = f(*val_inputs)
assert(len(f.maker.fgraph.toposort()) == nb_elemwise)
assert(out_dtype == out.dtype)
def test_elemwise_multiple_inputs_optimisation2(self):
"""
......@@ -455,13 +452,12 @@ class test_canonize(unittest.TestCase):
# We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode = compile.mode.get_default_mode()
old_optimizer = mode._optimizer
try:
mode._optimizer = gof.Query(["canonicalize"])
mode._optimizer = mode._optimizer.including('ShapeOpt')
mode._optimizer = mode._optimizer.excluding(
opt = gof.Query(["canonicalize"])
opt = opt.including('ShapeOpt')
opt = opt.excluding(
'local_elemwise_fusion')
mode = mode.__class__(linker=mode.linker, optimizer=opt)
# test x / x -> 1
for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx, [fx], [fxv], 'float32'),
(dx/dx, [dx], [dxv], 'float64'),
......@@ -644,7 +640,7 @@ class test_canonize(unittest.TestCase):
assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)
assert(out_dtype == out.dtype)
finally:
mode._optimizer = old_optimizer
pass
def test_abs_mul_div(self):
"""
......@@ -705,12 +701,11 @@ class test_canonize(unittest.TestCase):
# We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode = compile.mode.get_default_mode()
old_optimizer = mode._optimizer
try:
mode._optimizer = gof.Query(["canonicalize"])
mode._optimizer = mode._optimizer.excluding(
opt = gof.Query(["canonicalize"])
opt = opt.excluding(
'local_elemwise_fusion')
mode = mode.__class__(linker=mode.linker, optimizer=opt)
# test fail!
# test x / y / z -> x / (y * z)
for (g, sym_inputs, val_inputs, out_dtype) in [
......@@ -749,7 +744,7 @@ class test_canonize(unittest.TestCase):
assert(out_dtype == out.dtype)
finally:
mode._optimizer = old_optimizer
pass
def test_dont_merge_if_multiple_client(self):
""" test those case take from the comment in Canonizer
......@@ -3412,6 +3407,8 @@ class test_shapeoptimizer(unittest.TestCase):
# Register the optimization
opt.register_specialize(local_identity_noshape_to_identity_shape)
mode = theano.compile.get_default_mode().including(
'ShapeOpt', 'specialize')
# With the optimization
# The identity_shape op should not be needed anymore to compute
# the shape
......
......@@ -296,30 +296,30 @@ class TestKron(utt.InferShapeTester):
raise SkipTest('kron tests need the scipy package to be installed')
for shp0 in [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]:
x = tensor.tensor(dtype='floatX',
broadcastable=(False,) * len(shp0))
a = numpy.asarray(self.rng.rand(*shp0)).astype(config.floatX)
for shp1 in [(6,), (6, 7), (6, 7, 8), (6, 7, 8, 9)]:
if len(shp0) + len(shp1) == 2:
continue
x = tensor.tensor(dtype='floatX',
broadcastable=(False,) * len(shp0))
y = tensor.tensor(dtype='floatX',
broadcastable=(False,) * len(shp1))
f = function([x, y], kron(x, y))
a = numpy.asarray(self.rng.rand(*shp0)).astype(config.floatX)
b = self.rng.rand(*shp1).astype(config.floatX)
out = f(a, b)
assert numpy.allclose(out, scipy.linalg.kron(a, b))
def test_numpy_2d(self):
for shp0 in [(2, 3)]:
x = tensor.tensor(dtype='floatX',
broadcastable=(False,) * len(shp0))
a = numpy.asarray(self.rng.rand(*shp0)).astype(config.floatX)
for shp1 in [(6, 7)]:
if len(shp0) + len(shp1) == 2:
continue
x = tensor.tensor(dtype='floatX',
broadcastable=(False,) * len(shp0))
y = tensor.tensor(dtype='floatX',
broadcastable=(False,) * len(shp1))
f = function([x, y], kron(x, y))
a = numpy.asarray(self.rng.rand(*shp0)).astype(config.floatX)
b = self.rng.rand(*shp1).astype(config.floatX)
out = f(a, b)
assert numpy.allclose(out, numpy.kron(a, b))
......@@ -1432,6 +1432,7 @@ class TestAdvancedSubtensor(unittest.TestCase):
class TestInferShape(utt.InferShapeTester):
@attr('slow')
def test_infer_shape(self):
# IncSubtensor
admat = dmatrix()
......
......@@ -5,6 +5,7 @@ import os
import shutil
import unittest
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
import numpy
from numpy import array
......@@ -724,6 +725,7 @@ class T_examples(unittest.TestCase):
assert numpy.allclose(v3, 0.59044123)
assert numpy.allclose(v4, 0.59044123)
@attr('slow')
def test_examples_real_example(self):
rng = numpy.random
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论