提交 4be051f3 authored 作者: Frederic Bastien's avatar Frederic Bastien

Fix crash for MRG_RandomStreams.seed on GPU

上级 39edb0de
......@@ -1133,8 +1133,7 @@ class MRG_RandomStreams(object):
if nstreams is None:
nstreams = self.n_streams(size)
rstates = self.get_substream_rstates(nstreams)
if self.use_cuda and dtype == 'float32':
if self.use_cuda and new_r.owner.outputs[1].dtype == 'float32':
rstates = rstates.flatten()
# HACK - we use fact that int32 and float32 have same size to
# sneak ints into the CudaNdarray type.
......@@ -1142,12 +1141,12 @@ class MRG_RandomStreams(object):
tmp_float_buf = numpy.frombuffer(rstates.data, dtype='float32')
assert tmp_float_buf.shape == rstates.shape
assert (tmp_float_buf.view('int32') == rstates).all()
rstate = tmp_float_buf
rstates = tmp_float_buf
assert (old_r.get_value(borrow=True,
return_internal_type=True).shape ==
rstates.shape)
old_r.set_value(rstates,
borrow=True)
assert rstates.dtype == old_r.dtype
old_r.set_value(rstates, borrow=True)
def inc_rstate(self):
"""Update self.rstate to be skipped 2^134 steps forward to the next stream start"""
......
......@@ -938,12 +938,22 @@ def test_seed_fn():
test_use_cuda.append(True)
idx = tensor.ivector()
for use_cuda in test_use_cuda:
if config.mode == 'FAST_COMPILE' and use_cuda:
mode = 'FAST_RUN'
else:
mode = config.mode
for new_seed, same in [(234, True), (None, True), (23, False)]:
random = MRG_RandomStreams(234)
fn1 = theano.function([], random.uniform((2, 2)))
fn2 = theano.function([], random.uniform((3, 3), nstreams=2))
random = MRG_RandomStreams(234, use_cuda=use_cuda)
fn1 = theano.function([], random.uniform((2, 2), dtype='float32'),
mode=mode)
fn2 = theano.function([], random.uniform((3, 3), nstreams=2,
dtype='float32'),
mode=mode)
fn3 = theano.function([idx],
random.uniform(idx, nstreams=3, ndim=1))
random.uniform(idx, nstreams=3, ndim=1,
dtype='float32'),
mode=mode)
fn1_val0 = fn1()
fn1_val1 = fn1()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论