提交 2fc09a03 authored 作者: Frederic's avatar Frederic

More info in tests error

上级 b9967384
......@@ -225,7 +225,7 @@ def test_conv_nnet1():
rval_cpu = run_conv_nnet1(False)
utt.seed_rng()
rval_gpu = run_conv_nnet1(True)
assert numpy.allclose(rval_cpu, rval_gpu, rtol=1e-4, atol=1e-6)
utt.assert_allclose(rval_cpu, rval_gpu, rtol=1e-4, atol=1e-6)
def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST
......@@ -318,7 +318,7 @@ def test_conv_nnet2():
utt.seed_rng()
rval_cpu = run_conv_nnet2(False)
# print rval_cpu[0], rval_gpu[0],rval_cpu[0]-rval_gpu[0]
assert numpy.allclose(rval_cpu, rval_gpu, rtol=1e-4, atol=1e-4)
utt.assert_allclose(rval_cpu, rval_gpu, rtol=1e-4, atol=1e-4)
def build_conv_nnet2_classif(use_gpu, isize, ksize, n_batch,
......@@ -559,8 +559,8 @@ def cmp_run_conv_nnet2_classif(seed, isize, ksize, bsize,
rval_gpu - rval_cpu) / rval_gpu))
if not ignore_error:
assert numpy.allclose(rval_cpu, rval_gpu,
rtol=1e-5, atol=float_atol)
utt.assert_allclose(rval_cpu, rval_gpu,
rtol=1e-5, atol=float_atol)
# Synchronize parameters to start from the same point next time
if i < n_train - 1:
......
......@@ -7,6 +7,7 @@ from theano import config, function, tensor
from theano.sandbox import multinomial
from theano.compile.mode import get_default_mode, predefined_linkers
import theano.sandbox.cuda as cuda
import theano.tests.unittest_tools as utt
def get_mode(gpu):
......@@ -45,22 +46,22 @@ def test_multinomial_0():
for node in f.maker.fgraph.toposort()])
# test that both first and second samples can be drawn
assert numpy.allclose(f([[1, 0], [0, 1]], [.1, .1]),
utt.assert_allclose(f([[1, 0], [0, 1]], [.1, .1]),
[[2, 0], [0, 2]])
# test that both second labels can be drawn
r = f([[.2, .8], [.3, .7]], [.31, .31])
assert numpy.allclose(r, [[0, 2], [0, 2]]), r
utt.assert_allclose(r, [[0, 2], [0, 2]])
# test that both first labels can be drawn
r = f([[.2, .8], [.3, .7]], [.21, .21])
assert numpy.allclose(r, [[0, 2], [2, 0]]), r
utt.assert_allclose(r, [[0, 2], [2, 0]])
# change the size to make sure output gets reallocated ok
# and also make sure that the GPU version doesn't screw up the
# transposed-ness
r = f([[.2, .8]], [.25])
assert numpy.allclose(r, [[0, 2]]), r
utt.assert_allclose(r, [[0, 2]])
run_with_c(body)
if cuda.cuda_available:
......@@ -93,9 +94,9 @@ def test_multinomial_large():
assert mval.dtype == 'float64'
else:
raise NotImplementedError(config.cast_policy)
assert numpy.allclose(mval.sum(axis=1), 2)
utt.assert_allclose(mval.sum(axis=1), 2)
asdf = numpy.asarray([0, 0, 2, 0])+0*pval
assert numpy.allclose(mval, asdf) # broadcast over all rows
utt.assert_allclose(mval, asdf) # broadcast over all rows
run_with_c(body)
if cuda.cuda_available:
run_with_c(body, True)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论