提交 48fd7188 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Add a test to prevent the problem from re-occuring.

上级 06a836aa
......@@ -361,13 +361,9 @@ def test_GPU_nstreams_limit():
def test_consistency_GPUA_serial():
"""
Verify that the random numbers generated by GPUA_mrg_uniform, serially,
are the same as the reference (Java) implementation by L'Ecuyer et al.
"""
from theano.gpuarray.tests.test_basic_ops import \
mode_with_gpu as mode
# Verify that the random numbers generated by GPUA_mrg_uniform, serially,
# are the same as the reference (Java) implementation by L'Ecuyer et al.
from theano.gpuarray.tests.test_basic_ops import mode_with_gpu as mode
from theano.gpuarray.type import gpuarray_shared_constructor
seed = 12345
......@@ -415,14 +411,10 @@ def test_consistency_GPUA_serial():
def test_consistency_GPUA_parallel():
"""
Verify that the random numbers generated by GPUA_mrg_uniform, in
parallel, are the same as the reference (Java) implementation by
L'Ecuyer et al.
"""
from theano.gpuarray.tests.test_basic_ops import \
mode_with_gpu as mode
# Verify that the random numbers generated by GPUA_mrg_uniform, in
# parallel, are the same as the reference (Java) implementation by
# L'Ecuyer et al.
from theano.gpuarray.tests.test_basic_ops import mode_with_gpu as mode
from theano.gpuarray.type import gpuarray_shared_constructor
seed = 12345
......@@ -468,6 +460,30 @@ def test_consistency_GPUA_parallel():
assert(numpy.allclose(samples, java_samples))
def test_GPUA_full_fill():
# Make sure the whole sample buffer is filled. Also make sure
# large samples are consistent with CPU results.
from theano.gpuarray.tests.test_basic_ops import (mode_with_gpu as mode,
test_ctx_name)
from theano.gpuarray.type import gpuarray_shared_constructor
# This needs to be large to trigger the problem on GPU
size = (10, 1000)
R = MRG_RandomStreams(234, use_cuda=False)
uni = R.uniform(size, nstreams=60*256)
f_cpu = theano.function([], uni)
rstate_gpu = gpuarray_shared_constructor(R.state_updates[-1][0].get_value())
new_rstate, sample = rng_mrg.GPUA_mrg_uniform.new(rstate_gpu, ndim=None,
dtype='float32',
size=size)
rstate_gpu.default_update = new_rstate
f_gpu = theano.function([], sample)
utt.assert_allclose(f_cpu(), f_gpu())
def basictest(f, steps, sample_size, prefix="", allow_01=False, inputs=None,
target_avg=0.5, target_std=None, mean_rtol=0.01, std_tol=0.01):
if inputs is None:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论