提交 1bac3e5b authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Got rid of some warnings in MGR_RandomStreams tests

上级 b2a3ac6c
...@@ -350,7 +350,9 @@ def test_uniform(): ...@@ -350,7 +350,9 @@ def test_uniform():
print 'ON CPU with size=(%s):'%str(size) print 'ON CPU with size=(%s):'%str(size)
x = tensor.matrix() x = tensor.matrix()
R = MRG_RandomStreams(234, use_cuda=False) R = MRG_RandomStreams(234, use_cuda=False)
u = R.uniform(size=size) # Note: we specify `nstreams` to avoid a warning.
u = R.uniform(size=size,
nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, u, mode=mode) f = theano.function(var_input, u, mode=mode)
assert any([isinstance(node.op,theano.sandbox.rng_mrg.mrg_uniform) assert any([isinstance(node.op,theano.sandbox.rng_mrg.mrg_uniform)
for node in f.maker.env.toposort()]) for node in f.maker.env.toposort()])
...@@ -366,7 +368,8 @@ def test_uniform(): ...@@ -366,7 +368,8 @@ def test_uniform():
print '' print ''
print 'ON GPU with size=(%s):'%str(size) print 'ON GPU with size=(%s):'%str(size)
R = MRG_RandomStreams(234, use_cuda=True) R = MRG_RandomStreams(234, use_cuda=True)
u = R.uniform(size=size, dtype='float32') u = R.uniform(size=size, dtype='float32',
nstreams=rng_mrg.guess_n_streams(size, warn=False))
assert u.dtype == 'float32' #well, it's really that this test w GPU doesn't make sense otw assert u.dtype == 'float32' #well, it's really that this test w GPU doesn't make sense otw
f = theano.function(var_input, theano.Out( f = theano.function(var_input, theano.Out(
theano.sandbox.cuda.basic_ops.gpu_from_host(u), theano.sandbox.cuda.basic_ops.gpu_from_host(u),
...@@ -421,7 +424,9 @@ def test_binomial(): ...@@ -421,7 +424,9 @@ def test_binomial():
print '' print ''
print 'ON CPU with size=(%s) and mean(%d):'%(str(size),mean) print 'ON CPU with size=(%s) and mean(%d):'%(str(size),mean)
R = MRG_RandomStreams(234, use_cuda=False) R = MRG_RandomStreams(234, use_cuda=False)
u = R.binomial(size=size, p=mean) # Note: we specify `nstreams` to avoid a warning.
u = R.binomial(size=size, p=mean,
nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, u, mode=mode) f = theano.function(var_input, u, mode=mode)
theano.printing.debugprint(f) theano.printing.debugprint(f)
out = f(*input) out = f(*input)
...@@ -433,7 +438,9 @@ def test_binomial(): ...@@ -433,7 +438,9 @@ def test_binomial():
print '' print ''
print 'ON GPU with size=(%s) and mean(%d):'%(str(size),mean) print 'ON GPU with size=(%s) and mean(%d):'%(str(size),mean)
R = MRG_RandomStreams(234, use_cuda=True) R = MRG_RandomStreams(234, use_cuda=True)
u = R.binomial(size=size, p=mean, dtype='float32') u = R.binomial(size=size, p=mean, dtype='float32',
nstreams=rng_mrg.guess_n_streams(size,
warn=False))
assert u.dtype == 'float32' #well, it's really that this test w GPU doesn't make sense otw assert u.dtype == 'float32' #well, it's really that this test w GPU doesn't make sense otw
f = theano.function(var_input, theano.Out( f = theano.function(var_input, theano.Out(
theano.sandbox.cuda.basic_ops.gpu_from_host(u), theano.sandbox.cuda.basic_ops.gpu_from_host(u),
...@@ -478,7 +485,9 @@ def test_normal0(): ...@@ -478,7 +485,9 @@ def test_normal0():
print 'ON CPU:' print 'ON CPU:'
R = MRG_RandomStreams(234, use_cuda=False) R = MRG_RandomStreams(234, use_cuda=False)
n = R.normal(size=size, avg=avg, std=std) # Note: we specify `nstreams` to avoid a warning.
n = R.normal(size=size, avg=avg, std=std,
nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, n, mode=mode) f = theano.function(var_input, n, mode=mode)
theano.printing.debugprint(f) theano.printing.debugprint(f)
out = f(*input) out = f(*input)
...@@ -491,7 +500,8 @@ def test_normal0(): ...@@ -491,7 +500,8 @@ def test_normal0():
print '' print ''
print 'ON GPU:' print 'ON GPU:'
R = MRG_RandomStreams(234, use_cuda=True) R = MRG_RandomStreams(234, use_cuda=True)
n = R.normal(size=size, avg=avg, std=std, dtype='float32') n = R.normal(size=size, avg=avg, std=std, dtype='float32',
nstreams=rng_mrg.guess_n_streams(size, warn=False))
assert n.dtype == 'float32' #well, it's really that this test w GPU doesn't make sense otw assert n.dtype == 'float32' #well, it's really that this test w GPU doesn't make sense otw
f = theano.function(var_input, theano.Out( f = theano.function(var_input, theano.Out(
theano.sandbox.cuda.basic_ops.gpu_from_host(n), theano.sandbox.cuda.basic_ops.gpu_from_host(n),
...@@ -557,7 +567,8 @@ def test_multinomial(): ...@@ -557,7 +567,8 @@ def test_multinomial():
pvals = numpy.asarray(numpy.random.uniform(size=sample_size)) pvals = numpy.asarray(numpy.random.uniform(size=sample_size))
pvals = numpy.apply_along_axis(lambda row : row/numpy.sum(row), 1, pvals) pvals = numpy.apply_along_axis(lambda row : row/numpy.sum(row), 1, pvals)
R = MRG_RandomStreams(234, use_cuda=False) R = MRG_RandomStreams(234, use_cuda=False)
m = R.multinomial(pvals=pvals, dtype=config.floatX) # Note: we specify `nstreams` to avoid a warning.
m = R.multinomial(pvals=pvals, dtype=config.floatX, nstreams=30 * 256)
f = theano.function([], m, mode=mode_) f = theano.function([], m, mode=mode_)
theano.printing.debugprint(f) theano.printing.debugprint(f)
out = f() out = f()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论