提交 f12e987c authored 作者: James Bergstra's avatar James Bergstra

fixed sandbox/multinomial tests to not require a perform method

上级 a01aca11
import numpy import numpy
import theano
from theano import tensor, shared, function from theano import tensor, shared, function
import multinomial import multinomial
from theano.compile.mode import get_default_mode, predefined_linkers
def run_with_c(f):
mode = get_default_mode()
linker_orig = mode.linker
if linker_orig == predefined_linkers['py']:
mode.linker = predefined_linkers['c|py']
try:
f(mode)
finally:
mode.linker = linker_orig
def test_multimomial_0(): def test_multimomial_0():
# This tests the multinomial Op directly, not going through the # This tests the multinomial Op directly, not going through the
...@@ -12,48 +25,53 @@ def test_multimomial_0(): ...@@ -12,48 +25,53 @@ def test_multimomial_0():
m = multinomial.Multinomial('auto')(p,u) m = multinomial.Multinomial('auto')(p,u)
#the m*2 allows the multinomial to reuse output def body(mode):
f = function([p,u], m*2, allow_input_downcast=True) #the m*2 allows the multinomial to reuse output
f = function([p,u], m*2, allow_input_downcast=True, mode=mode)
# test that both first and second samples can be drawn
assert numpy.allclose(f([[1,0], [0,1]], [.1, .1]),
[[2,0], [0,2]])
# test that both first and second samples can be drawn # test that both second labels can be drawn
assert numpy.allclose(f([[1,0], [0,1]], [.1, .1]), r = f([[.2,.8], [.3,.7]], [.31, .31])
[[2,0], [0,2]]) assert numpy.allclose(r, [[0,2], [0,2]]), r
# test that both second labels can be drawn
r = f([[.2,.8], [.3,.7]], [.31, .31])
assert numpy.allclose(r, [[0,2], [0,2]]), r
# test that both first labels can be drawn
r = f([[.2,.8], [.3,.7]], [.21, .21])
assert numpy.allclose(r, [[0,2], [2,0]]), r
# test that both first labels can be drawn #change the size to make sure output gets reallocated ok
r = f([[.2,.8], [.3,.7]], [.21, .21]) # and also make sure that the GPU version doesn't screw up the
assert numpy.allclose(r, [[0,2], [2,0]]), r # transposed-ness
r = f([[.2,.8] ], [.25])
assert numpy.allclose(r, [[0,2]]), r
#change the size to make sure output gets reallocated ok run_with_c(body)
# and also make sure that the GPU version doesn't screw up the
# transposed-ness
r = f([[.2,.8] ], [.25])
assert numpy.allclose(r, [[0,2]]), r
#TODO: check a bigger example (make sure blocking on GPU is handled correctly) #TODO: check a bigger example (make sure blocking on GPU is handled correctly)
def test_multinomial_large(): def test_multinomial_large():
# DEBUG_MODE will test this on GPU # DEBUG_MODE will test this on GPU
p = tensor.fmatrix() def body(mode):
u = tensor.fvector() p = tensor.fmatrix()
m = multinomial.Multinomial('auto')(p,u) u = tensor.fvector()
f = function([p,u], m*2, allow_input_downcast=True) m = multinomial.Multinomial('auto')(p,u)
f = function([p,u], m*2, allow_input_downcast=True, mode=mode)
pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4))+0.1
pval = pval / pval.sum(axis=1)[:,None] pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4))+0.1
uval = numpy.ones_like(pval[:,0]) * 0.5 pval = pval / pval.sum(axis=1)[:,None]
mval = f(pval,uval) uval = numpy.ones_like(pval[:,0]) * 0.5
mval = f(pval,uval)
assert mval.shape == pval.shape
assert mval.dtype == pval.dtype assert mval.shape == pval.shape
assert numpy.allclose(mval.sum(axis=1), 2) assert mval.dtype == pval.dtype
asdf = numpy.asarray([0, 0, 2, 0])+0*pval assert numpy.allclose(mval.sum(axis=1), 2)
assert numpy.allclose(mval, asdf) #broadcast over all rows asdf = numpy.asarray([0, 0, 2, 0])+0*pval
assert numpy.allclose(mval, asdf) #broadcast over all rows
run_with_c(body)
def test_multinomial_dtypes(): def test_multinomial_dtypes():
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论