提交 7d048062 authored 作者: Amjad Almahairi's avatar Amjad Almahairi

fixing tests

上级 a43d461f
...@@ -1317,11 +1317,12 @@ class MRG_RandomStreams(object): ...@@ -1317,11 +1317,12 @@ class MRG_RandomStreams(object):
def multinomial(self, size=None, n=1, pvals=None, ndim=None, dtype='int64', def multinomial(self, size=None, n=1, pvals=None, ndim=None, dtype='int64',
nstreams=None): nstreams=None):
""" """
Sample `n` (`n` needs to be >= 1) times from a multinomial Sample `n` (`n` needs to be >= 1, default 1) times from a multinomial
distribution defined by probabilities pvals. distribution defined by probabilities pvals.
Example : pvals = [[.98, .01, .01], [.01, .49, .50]] and n=2 will Example : pvals = [[.98, .01, .01], [.01, .49, .50]] and n=1 will
probably result in [[2,0,0],[0,1,1]]. probably result in [[1,0,0],[0,0,1]]. When setting n=2, this
will probably result in [[2,0,0],[0,1,1]].
Notes Notes
----- -----
......
...@@ -74,7 +74,7 @@ def test_multinomial_0(): ...@@ -74,7 +74,7 @@ def test_multinomial_0():
p = tensor.fmatrix() p = tensor.fmatrix()
u = tensor.fvector() u = tensor.fvector()
m = multinomial.MultinomialFromUniform('auto')(p, u, 1) m = multinomial.MultinomialFromUniform('auto')(p, u)
def body(mode, gpu): def body(mode, gpu):
# the m*2 allows the multinomial to reuse output # the m*2 allows the multinomial to reuse output
...@@ -113,7 +113,7 @@ def test_multinomial_large(): ...@@ -113,7 +113,7 @@ def test_multinomial_large():
def body(mode, gpu): def body(mode, gpu):
p = tensor.fmatrix() p = tensor.fmatrix()
u = tensor.fvector() u = tensor.fvector()
m = multinomial.MultinomialFromUniform('auto')(p, u, 1) m = multinomial.MultinomialFromUniform('auto')(p, u)
f = function([p, u], m*2, allow_input_downcast=True, mode=mode) f = function([p, u], m*2, allow_input_downcast=True, mode=mode)
if gpu: if gpu:
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
...@@ -144,17 +144,17 @@ def test_multinomial_large(): ...@@ -144,17 +144,17 @@ def test_multinomial_large():
def test_multinomial_dtypes(): def test_multinomial_dtypes():
p = tensor.dmatrix() p = tensor.dmatrix()
u = tensor.dvector() u = tensor.dvector()
m = multinomial.MultinomialFromUniform('auto')(p, u, 1) m = multinomial.MultinomialFromUniform('auto')(p, u)
assert m.dtype == 'float64', m.dtype assert m.dtype == 'float64', m.dtype
p = tensor.fmatrix() p = tensor.fmatrix()
u = tensor.fvector() u = tensor.fvector()
m = multinomial.MultinomialFromUniform('auto')(p, u, 1) m = multinomial.MultinomialFromUniform('auto')(p, u)
assert m.dtype == 'float32', m.dtype assert m.dtype == 'float32', m.dtype
p = tensor.fmatrix() p = tensor.fmatrix()
u = tensor.fvector() u = tensor.fvector()
m = multinomial.MultinomialFromUniform('float64')(p, u, 1) m = multinomial.MultinomialFromUniform('float64')(p, u)
assert m.dtype == 'float64', m.dtype assert m.dtype == 'float64', m.dtype
...@@ -168,7 +168,7 @@ def test_gpu_opt(): ...@@ -168,7 +168,7 @@ def test_gpu_opt():
# is moved to the gpu. # is moved to the gpu.
p = tensor.fmatrix() p = tensor.fmatrix()
u = tensor.fvector() u = tensor.fvector()
m = multinomial.MultinomialFromUniform('auto')(p, u, 1) m = multinomial.MultinomialFromUniform('auto')(p, u)
assert m.dtype == 'float32', m.dtype assert m.dtype == 'float32', m.dtype
m_gpu = cuda.gpu_from_host(m) m_gpu = cuda.gpu_from_host(m)
...@@ -182,7 +182,7 @@ def test_gpu_opt(): ...@@ -182,7 +182,7 @@ def test_gpu_opt():
# Test with a row, it was failing in the past. # Test with a row, it was failing in the past.
r = tensor.frow() r = tensor.frow()
m = multinomial.MultinomialFromUniform('auto')(r, u, n) m = multinomial.MultinomialFromUniform('auto')(r, u)
assert m.dtype == 'float32', m.dtype assert m.dtype == 'float32', m.dtype
m_gpu = cuda.gpu_from_host(m) m_gpu = cuda.gpu_from_host(m)
......
...@@ -847,7 +847,6 @@ def test_multinomial(): ...@@ -847,7 +847,6 @@ def test_multinomial():
def test_multinomial_n_samples(): def test_multinomial_n_samples():
steps = 100
mode_ = mode mode_ = mode
if mode == 'FAST_COMPILE': if mode == 'FAST_COMPILE':
mode_ = 'FAST_RUN' mode_ = 'FAST_RUN'
...@@ -863,21 +862,16 @@ def test_multinomial_n_samples(): ...@@ -863,21 +862,16 @@ def test_multinomial_n_samples():
pvals = numpy.apply_along_axis(lambda row: row / numpy.sum(row), 1, pvals) pvals = numpy.apply_along_axis(lambda row: row / numpy.sum(row), 1, pvals)
R = MRG_RandomStreams(234, use_cuda=False) R = MRG_RandomStreams(234, use_cuda=False)
for n_samples in [5, 10, 100, 1000]: for n_samples, steps in zip([5, 10, 100, 1000], [20, 10, 1, 1]):
# Note: we specify `nstreams` to avoid a warning.
m = R.multinomial(pvals=pvals, n=n_samples, dtype=config.floatX, nstreams=30 * 256) m = R.multinomial(pvals=pvals, n=n_samples, dtype=config.floatX, nstreams=30 * 256)
f = theano.function([], m, mode=mode_) f = theano.function([], m, mode=mode_)
basic_multinomialtest(f, steps, sample_size, pvals, n_samples, prefix='mrg ') basic_multinomialtest(f, steps, sample_size, pvals, n_samples, prefix='mrg ')
sys.stdout.flush() sys.stdout.flush()
if mode != 'FAST_COMPILE' and cuda_available: if mode != 'FAST_COMPILE' and cuda_available:
# print ''
# print 'ON GPU:'
R = MRG_RandomStreams(234, use_cuda=True) R = MRG_RandomStreams(234, use_cuda=True)
pvals = numpy.asarray(pvals, dtype='float32') pvals = numpy.asarray(pvals, dtype='float32')
# We give the number of streams to avoid a warning.
n = R.multinomial(pvals=pvals, n=n_samples, dtype='float32', nstreams=30 * 256) n = R.multinomial(pvals=pvals, n=n_samples, dtype='float32', nstreams=30 * 256)
# well, it's really that this test w GPU doesn't make sense otw
assert n.dtype == 'float32' assert n.dtype == 'float32'
f = theano.function( f = theano.function(
[], [],
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论