提交 1f324a65 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Merge pull request #3076 from nouiz/tests

[TEST] fix tests
......@@ -1770,7 +1770,9 @@ def local_gpu_downsample_factor_max_grad(node):
node.op.ds == node.op.st):
assert node.op.__props__ == ('ds', 'ignore_border', 'st', 'padding',
'mode')
if node.op.padding != (0, 0) or node.op.mode != 'max':
if (node.op.padding != (0, 0) or
node.op.mode != 'max' or
node.op.st != node.op.ds):
return
x, z, gz = node.inputs
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
......@@ -1785,8 +1787,10 @@ def local_gpu_downsample_factor_max_grad(node):
@local_optimizer([downsample.DownsampleFactorMaxGradGrad])
def local_gpu_downsample_factor_max_grad_grad(node):
if isinstance(node.op, downsample.DownsampleFactorMaxGradGrad):
assert node.op.__props__ == ('ds', 'ignore_border', 'st')
assert node.op.__props__ == ('ds', 'ignore_border', 'st',
'padding', 'mode')
if node.op.padding != (0, 0) or node.op.mode != 'max':
return
x, z, gx = node.inputs
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
op = GpuDownsampleFactorMaxGradGrad(node.op.ds,
......
......@@ -1069,7 +1069,7 @@ class T_subtensor(theano.tensor.tests.test_subtensor.T_subtensor):
val = numpy.asarray(val)
good = data[idx]
self.assertTrue(val.ndim == data.ndim)
self.assertTrue(numpy.allclose(val, good), (val, good))
utt.assert_allclose(val, good)
# Test with input strided
t = self.adv_sub1()(n[::-1], idx)
......@@ -1082,7 +1082,7 @@ class T_subtensor(theano.tensor.tests.test_subtensor.T_subtensor):
val = numpy.asarray(val)
good = data[::-1][idx]
self.assertTrue(val.ndim == data.ndim)
self.assertTrue(numpy.allclose(val, good), (val, good))
utt.assert_allclose(val, good)
def test_advinc_subtensor1():
......@@ -1103,7 +1103,7 @@ def test_advinc_subtensor1():
rval = f(yval)
rep = xval.copy()
rep[[0, 2]] += yval
assert numpy.allclose(rval, rep)
utt.assert_allclose(rval, rep)
def test_inc_subtensor():
......@@ -1121,8 +1121,8 @@ def test_inc_subtensor():
assert sum([isinstance(node.op, cuda.GpuIncSubtensor) and
node.op.set_instead_of_inc == False
for node in f.maker.fgraph.toposort()]) == 1
assert numpy.allclose(f(xval, yval), [[1., 12., 13.],
[4., 15., 16.], [7., 18., 19.]])
utt.assert_allclose(f(xval, yval), [[1., 12., 13.],
[4., 15., 16.], [7., 18., 19.]])
def test_set_subtensor():
......@@ -1180,11 +1180,11 @@ def test_many_arg_elemwise():
if mode is mode_with_gpu:
assert any([isinstance(node.op, cuda.GpuElemwise)
for node in f.maker.fgraph.apply_nodes])
assert numpy.allclose(out, outputs[-1])
utt.assert_allclose(out, outputs[-1])
results_gpu, results_cpu = outputs
assert numpy.allclose(results_gpu, results_cpu)
utt.assert_allclose(results_gpu, results_cpu)
def test_duplicate_arg_elemwise():
......@@ -1196,7 +1196,7 @@ def test_duplicate_arg_elemwise():
Aval = numpy.random.RandomState([1, 2, 3]).randn(5, 5).astype('float32')
Bval = Aval + Aval
assert numpy.allclose(Bval, f(Aval))
utt.assert_allclose(Bval, f(Aval))
def test_shared_float32():
......@@ -1235,7 +1235,7 @@ def test_gpueye():
B.as_cuda_ndarray_variable(out),
mode=mode_with_gpu)
result = numpy.asarray(f(N, M))
assert numpy.allclose(result, numpy.eye(N, M_, dtype=dtype))
utt.assert_allclose(result, numpy.eye(N, M_, dtype=dtype))
assert result.dtype == numpy.dtype(dtype)
assert any([isinstance(node.op, B.GpuEye)
for node in f.maker.fgraph.toposort()])
......
......@@ -18,6 +18,8 @@ def get_mode(gpu):
'local_gpu_multinomial')
if isinstance(mode.linker, theano.gof.PerformLinker):
mode.linker = predefined_linkers['c|py']
if hasattr(mode.linker, 'c_thunks'):
mode.linker.c_thunks = True
return mode
......
......@@ -1189,12 +1189,15 @@ class T_random_function(utt.InferShapeTester):
# binomial was created by calling RandomFunction on a string,
# random_integers by calling it on a function.
rng_r = random_state_type()
mode = None
if theano.config.mode in ["DEBUG_MODE", "DebugMode"]:
mode = 'FAST_COMPILE'
post_bin_r, bin_sample = binomial(rng_r, (3, 5), 1, .3)
f = theano.function([rng_r], [post_bin_r, bin_sample])
f = theano.function([rng_r], [post_bin_r, bin_sample], mode=mode)
pkl_f = pickle.dumps(f)
post_int_r, int_sample = random_integers(rng_r, (3, 5), -1, 8)
g = theano.function([rng_r], [post_int_r, int_sample])
g = theano.function([rng_r], [post_int_r, int_sample], mode=mode)
pkl_g = pickle.dumps(g)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论