提交 6e9e6d6b authored 作者: lamblin's avatar lamblin

Merge pull request #854 from nouiz/fix_test

Fix test
......@@ -61,7 +61,8 @@ class T_ProfileMode_WrapLinker(unittest.TestCase):
copy.deepcopy(modified_mode)
# More straightforward test
assert theano.compile.mode.get_default_mode().linker.fgraph is None
linker = theano.compile.mode.get_default_mode().linker
assert not hasattr(linker, "fgraph") or linker.fgraph is None
if __name__ == '__main__':
......
......@@ -858,28 +858,38 @@ class T_subtensor(theano.tensor.tests.test_basic.T_subtensor):
# The variable fast is used to set the member perform_using_take of
# the Op. It is only useful for testing that we use the fast
# version when we should. Users should not use it.
for data, idx, fast in [(rand(70000), range(70000), True),
(rand(70000, 5), range(70000), True),
(rand(70000, 2, 3), range(70000), True),
(rand(1025, 1025), [5, 10], True),
(rand(3, 1025, 1026), [1, 2], True),
(rand(1025, 67000), [5, 10], True),
(rand(3, 10, 68000), [1, 2], True),
(rand(3, 69000, 11), [1, 2], True),
# use too much memory to enable by default.
#(rand(2*10e7), [-1, 199999999], True),
(rand(4, 5), [2, 3], True),
(rand(4, 2, 3), [0, 3], True),
(rand(4, 2, 3), [3, 3, 1, 1, 2,
2, 0, 0], True),
(rand(4, 2, 3), [3, 3, 1, 1, 2, 2, 0,
0, -1, -2, -3, -4], True),
# Test 4 dims as gpu. code use another algo
# in that case. This new algo is not as much
# optimized for that case.
(rand(4, 4, 2, 3), [3, 3, 1, 1, 2, 2, 0, 0,
-1, -2, -3, -4], False),
]:
for shape, idx, fast in [((70000,), range(70000), True),
((70000, 5), range(70000), True),
((70000, 2, 3), range(70000), True),
((1025, 1025), [5, 10], True),
((3, 1025, 1026), [1, 2], True),
((1025, 67000), [5, 10], True),
((3, 10, 68000), [1, 2], True),
((3, 69000, 11), [1, 2], True),
# much memory, will be disabled if needed
((2*10e7,), [-1, 199999999], True),
((4, 5), [2, 3], True),
((4, 2, 3), [0, 3], True),
((4, 2, 3), [3, 3, 1, 1, 2,
2, 0, 0], True),
((4, 2, 3), [3, 3, 1, 1, 2, 2, 0,
0, -1, -2, -3, -4], True),
# Test 4 dims as gpu. code use another algo
# in that case. This new algo is not as much
# optimized for that case.
((4, 4, 2, 3), [3, 3, 1, 1, 2, 2, 0, 0,
-1, -2, -3, -4], False),
]:
# If there is not enought memory on the GPU, skip the test
size_needed = numpy.prod(shape) * (4 + 1)
if isinstance(theano.compile.get_default_mode(),
theano.compile.DebugMode):
size_needed = numpy.prod(shape) * 4 * 4
if size_needed >= theano.sandbox.cuda.mem_info()[0]:
#print "skip", shape
continue
data = rand(*shape)
data = numpy.asarray(data, dtype=self.dtype)
n = self.shared(data, borrow=True)
......@@ -895,7 +905,10 @@ class T_subtensor(theano.tensor.tests.test_basic.T_subtensor):
# Test with input strided
t = self.adv_sub1()(n[::-1], idx)
t.owner.op.perform_using_take = fast
#DebugMode do a copy of the input, so we loose the strides.
if not isinstance(theano.compile.get_default_mode(),
theano.compile.DebugMode):
t.owner.op.perform_using_take = fast
val = theano.function([], t, mode=self.mode)()
val = numpy.asarray(val)
......
......@@ -3341,7 +3341,7 @@ class StructuredDot(gof.Op):
:param a: A sparse matrix.
:param b: A sparse or dense matrix.
:return: The dot product of `a` and `b`.
:return: The dot product of `a` and `b` as a dense matrix.
:note: The grad implemented is structured.
"""
......@@ -4425,8 +4425,8 @@ class Dot(gof.op.Op):
one or all operands is sparse. Supported format are CSC and CSR.
The output of the operation is dense.
:param x: Matrix variable.
:param y: Matrix variable.
:param x: sparse or dense matrix variable.
:param y: sparse or dense matrix variable.
:return: The dot product `x`.`y` in a dense format.
......
......@@ -264,7 +264,7 @@ def makeTester(name, op, expected, checks=None, good=None, bad_build=None,
or numpy.any(abs(variable - expected) > eps)):
self.fail(("Test %s::%s: Output %s gave the wrong"
" value. With inputs %s, expected %s (dtype %s),"
" got %s (dtype %s)."
" got %s (dtype %s). eps=%f"
" numpy.allclose returns %s %s") % (
self.op,
testname,
......@@ -274,6 +274,7 @@ def makeTester(name, op, expected, checks=None, good=None, bad_build=None,
expected.dtype,
variable,
variable.dtype,
eps,
numpy.allclose(variable, expected, atol=eps),
numpy.allclose(variable, expected)))
......@@ -1352,7 +1353,7 @@ _grad_broadcast_unary_gammaln = dict(
normal=(rand_ranged(1e-8, 10, (2, 3)),),)
if theano.config.floatX == 'float32':
gamma_eps = 2e-4
gamma_eps = 3e-4
else:
gamma_eps = 2e-10
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论