提交 c42a18c6 authored 作者: abergeron's avatar abergeron

Merge pull request #3358 from nouiz/tests

Fix test and better error message
......@@ -3213,9 +3213,10 @@ CudaNdarray_gpu_init(PyObject* _unused, PyObject* args)
if (cnmem > 1)
mem = cnmem * 1024 * 1024;
else{
// Clip to 98.5% to let memory for the driver.
if (cnmem > .985){
cnmem = .985;
// Clip to 98% to let memory for the driver.
// 98.5% didn't worked in some cases.
if (cnmem > .98){
cnmem = .98;
}
size_t free = 0, total = 0;
cudaError_t err = cudaMemGetInfo(&free, &total);
......
......@@ -225,7 +225,7 @@ def test_conv_nnet1():
rval_cpu = run_conv_nnet1(False)
utt.seed_rng()
rval_gpu = run_conv_nnet1(True)
assert numpy.allclose(rval_cpu, rval_gpu, rtol=1e-4, atol=1e-6)
utt.assert_allclose(rval_cpu, rval_gpu, rtol=1e-4, atol=1e-6)
def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST
......@@ -318,7 +318,7 @@ def test_conv_nnet2():
utt.seed_rng()
rval_cpu = run_conv_nnet2(False)
# print rval_cpu[0], rval_gpu[0],rval_cpu[0]-rval_gpu[0]
assert numpy.allclose(rval_cpu, rval_gpu, rtol=1e-4, atol=1e-4)
utt.assert_allclose(rval_cpu, rval_gpu, rtol=1e-4, atol=1e-4)
def build_conv_nnet2_classif(use_gpu, isize, ksize, n_batch,
......@@ -559,8 +559,8 @@ def cmp_run_conv_nnet2_classif(seed, isize, ksize, bsize,
rval_gpu - rval_cpu) / rval_gpu))
if not ignore_error:
assert numpy.allclose(rval_cpu, rval_gpu,
rtol=1e-5, atol=float_atol)
utt.assert_allclose(rval_cpu, rval_gpu,
rtol=1e-5, atol=float_atol)
# Synchronize parameters to start from the same point next time
if i < n_train - 1:
......
......@@ -21,7 +21,10 @@ class BlockSparse_Gemv_and_Outer(unittest.TestCase):
def setUp(self):
utt.seed_rng()
self.mode = theano.compile.get_default_mode().excluding(
mode = None
if theano.config.mode == "FAST_COMPILE":
mode = "FAST_RUN"
self.mode = theano.compile.get_mode(mode).excluding(
'constant_folding'
)
self.gemv_op = sparse_block_gemv
......@@ -304,7 +307,7 @@ class BlockSparse_Gemv_and_Outer(unittest.TestCase):
out = self.outer_op(o, x, y, xIdx, yIdx)
f = theano.function([o, x, y, xIdx, yIdx], out,
on_unused_input="warn")
on_unused_input="warn", mode=self.mode)
o_val, x_val, y_val, xIdx_val, yIdx_val = \
BlockSparse_Gemv_and_Outer.outer_data()
......
......@@ -7,6 +7,7 @@ from theano import config, function, tensor
from theano.sandbox import multinomial
from theano.compile.mode import get_default_mode, predefined_linkers
import theano.sandbox.cuda as cuda
import theano.tests.unittest_tools as utt
def get_mode(gpu):
......@@ -45,22 +46,22 @@ def test_multinomial_0():
for node in f.maker.fgraph.toposort()])
# test that both first and second samples can be drawn
assert numpy.allclose(f([[1, 0], [0, 1]], [.1, .1]),
utt.assert_allclose(f([[1, 0], [0, 1]], [.1, .1]),
[[2, 0], [0, 2]])
# test that both second labels can be drawn
r = f([[.2, .8], [.3, .7]], [.31, .31])
assert numpy.allclose(r, [[0, 2], [0, 2]]), r
utt.assert_allclose(r, [[0, 2], [0, 2]])
# test that both first labels can be drawn
r = f([[.2, .8], [.3, .7]], [.21, .21])
assert numpy.allclose(r, [[0, 2], [2, 0]]), r
utt.assert_allclose(r, [[0, 2], [2, 0]])
# change the size to make sure output gets reallocated ok
# and also make sure that the GPU version doesn't screw up the
# transposed-ness
r = f([[.2, .8]], [.25])
assert numpy.allclose(r, [[0, 2]]), r
utt.assert_allclose(r, [[0, 2]])
run_with_c(body)
if cuda.cuda_available:
......@@ -93,9 +94,9 @@ def test_multinomial_large():
assert mval.dtype == 'float64'
else:
raise NotImplementedError(config.cast_policy)
assert numpy.allclose(mval.sum(axis=1), 2)
utt.assert_allclose(mval.sum(axis=1), 2)
asdf = numpy.asarray([0, 0, 2, 0])+0*pval
assert numpy.allclose(mval, asdf) # broadcast over all rows
utt.assert_allclose(mval, asdf) # broadcast over all rows
run_with_c(body)
if cuda.cuda_available:
run_with_c(body, True)
......
......@@ -4064,8 +4064,11 @@ def get_vector_length(v):
if ((isinstance(stop, numbers.Integral) and
isinstance(start, numbers.Integral))):
return stop - start
raise ValueError("length not known")
if isinstance(v, Variable):
msg = theano.printing.debugprint(v, file='str')
else:
msg = str(v)
raise ValueError("length not known: %s" % msg)
@constructor
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论