提交 a8ca588c authored 作者: Amjad Almahairi's avatar Amjad Almahairi

testing more dtypes

上级 fcc0ae39
...@@ -23,31 +23,33 @@ def test_multinomial_0(): ...@@ -23,31 +23,33 @@ def test_multinomial_0():
p = tensor.fmatrix() p = tensor.fmatrix()
u = tensor.fvector() u = tensor.fvector()
m = theano.sandbox.multinomial.MultinomialFromUniform('auto')(p, u) for dtype in ['int64', 'float32', 'auto']:
# the m*2 allows the multinomial to reuse output m = theano.sandbox.multinomial.MultinomialFromUniform(dtype)(p, u)
f = function([p, u], m * 2, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform # the m*2 allows the multinomial to reuse output
for node in f.maker.fgraph.toposort()]) f = function([p, u], m * 2, allow_input_downcast=True, mode=mode_with_gpu)
# test that both first and second samples can be drawn assert any([type(node.op) is GPUAMultinomialFromUniform
utt.assert_allclose(f([[1, 0], [0, 1]], [.1, .1]), for node in f.maker.fgraph.toposort()])
[[2, 0], [0, 2]])
# test that both first and second samples can be drawn
utt.assert_allclose(f([[1, 0], [0, 1]], [.1, .1]),
[[2, 0], [0, 2]])
# test that both second labels can be drawn # test that both second labels can be drawn
r = f([[.2, .8], [.3, .7]], [.31, .31]) r = f([[.2, .8], [.3, .7]], [.31, .31])
utt.assert_allclose(r, [[0, 2], [0, 2]]) utt.assert_allclose(r, [[0, 2], [0, 2]])
# test that both first labels can be drawn # test that both first labels can be drawn
r = f([[.2, .8], [.3, .7]], [.21, .21]) r = f([[.2, .8], [.3, .7]], [.21, .21])
utt.assert_allclose(r, [[0, 2], [2, 0]]) utt.assert_allclose(r, [[0, 2], [2, 0]])
# change the size to make sure output gets reallocated ok # change the size to make sure output gets reallocated ok
# and also make sure that the GPU version doesn't screw up the # and also make sure that the GPU version doesn't screw up the
# transposed-ness # transposed-ness
r = f([[.2, .8]], [.25]) r = f([[.2, .8]], [.25])
utt.assert_allclose(r, [[0, 2]]) utt.assert_allclose(r, [[0, 2]])
# TODO: check a bigger example (make sure blocking on GPU is handled correctly) # TODO: check a bigger example (make sure blocking on GPU is handled correctly)
...@@ -81,8 +83,8 @@ def test_multinomial_large(): ...@@ -81,8 +83,8 @@ def test_multinomial_large():
def test_gpu_opt_dtypes(): def test_gpu_opt_dtypes():
# Does have some overlap with test_multinomial_0 # Test if the returned samples are of the datatype specified
for dtype in ['float32', 'int64', 'float64']: for dtype in ['uint32', 'float32', 'int64', 'float64']:
p = tensor.fmatrix() p = tensor.fmatrix()
u = tensor.fvector() u = tensor.fvector()
m = theano.sandbox.multinomial.MultinomialFromUniform(dtype)(p, u) m = theano.sandbox.multinomial.MultinomialFromUniform(dtype)(p, u)
...@@ -104,8 +106,8 @@ def test_gpu_opt(): ...@@ -104,8 +106,8 @@ def test_gpu_opt():
# is moved to the gpu. # is moved to the gpu.
p = tensor.fmatrix() p = tensor.fmatrix()
u = tensor.fvector() u = tensor.fvector()
m = theano.sandbox.multinomial.MultinomialFromUniform('float32')(p, u) m = theano.sandbox.multinomial.MultinomialFromUniform('auto')(p, u)
# assert m.dtype == 'float32', m.dtype assert m.dtype == 'float32', m.dtype
f = function([p, u], m, allow_input_downcast=True, mode=mode_with_gpu) f = function([p, u], m, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform assert any([type(node.op) is GPUAMultinomialFromUniform
...@@ -115,18 +117,18 @@ def test_gpu_opt(): ...@@ -115,18 +117,18 @@ def test_gpu_opt():
uval = numpy.ones_like(pval[:, 0]) * 0.5 uval = numpy.ones_like(pval[:, 0]) * 0.5
f(pval, uval) f(pval, uval)
# # Test with a row, it was failing in the past. # Test with a row, it was failing in the past.
# r = tensor.frow() r = tensor.frow()
# m = theano.sandbox.multinomial.MultinomialFromUniform('auto')(r, u) m = theano.sandbox.multinomial.MultinomialFromUniform('auto')(r, u)
# assert m.dtype == 'float32', m.dtype assert m.dtype == 'float32', m.dtype
# f = function([r, u], m, allow_input_downcast=True, mode=mode_with_gpu) f = function([r, u], m, allow_input_downcast=True, mode=mode_with_gpu)
# assert any([type(node.op) is GPUAMultinomialFromUniform assert any([type(node.op) is GPUAMultinomialFromUniform
# for node in f.maker.fgraph.toposort()]) for node in f.maker.fgraph.toposort()])
# pval = numpy.arange(1 * 4, dtype='float32').reshape((1, 4)) + 0.1 pval = numpy.arange(1 * 4, dtype='float32').reshape((1, 4)) + 0.1
# pval = pval / pval.sum(axis=1)[:, None] pval = pval / pval.sum(axis=1)[:, None]
# uval = numpy.ones_like(pval[:, 0]) * 0.5 uval = numpy.ones_like(pval[:, 0]) * 0.5
# f(pval, uval) f(pval, uval)
class test_OP_wor(unittest.TestCase): class test_OP_wor(unittest.TestCase):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论