提交 de021dcf authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Add tests in float16 for softmax and softmax_with_bias.

上级 f83f03af
...@@ -162,6 +162,14 @@ def test_GpuCrossentropySoftmax1HotWithBiasDx(): ...@@ -162,6 +162,14 @@ def test_GpuCrossentropySoftmax1HotWithBiasDx():
rtol, atol) rtol, atol)
def test_softmax_with_bias_float16():
softmax_with_bias_unittest_template(dtypeInput='float16',
dtypeBias='float32')
softmax_with_bias_unittest_template(dtypeInput='float16',
dtypeBias='float16')
softmax_with_bias_unittest_template(dtypeInput='float32',
dtypeBias='float16')
def test_softmax_with_bias_float32(): def test_softmax_with_bias_float32():
softmax_with_bias_unittest_template(dtypeInput='float32', softmax_with_bias_unittest_template(dtypeInput='float32',
dtypeBias='float32') dtypeBias='float32')
...@@ -185,22 +193,12 @@ def softmax_with_bias_unittest_template(dtypeInput, dtypeBias): ...@@ -185,22 +193,12 @@ def softmax_with_bias_unittest_template(dtypeInput, dtypeBias):
TODO: check that we loop when their is too much thread.(THIS IS TODO: check that we loop when their is too much thread.(THIS IS
NOT IMPLEMENTED) NOT IMPLEMENTED)
""" """
assert dtypeInput in ['float32', 'float64'] x = T.matrix('x', dtype=dtypeInput)
assert dtypeBias in ['float32', 'float64']
if dtypeInput == 'float32':
x = T.fmatrix('x')
elif dtypeInput == 'float64':
x = T.dmatrix('x')
# We can't use zeros_like(x[0,::]) as this don't allow to test with # We can't use zeros_like(x[0,::]) as this don't allow to test with
# 0 shape # 0 shape
if dtypeBias == 'float32':
z = T.nnet.softmax_with_bias(x, T.arange(x.shape[1] * 2,
dtype='float32')[::2])
elif dtypeBias == 'float64':
z = T.nnet.softmax_with_bias(x, T.arange(x.shape[1] * 2, z = T.nnet.softmax_with_bias(x, T.arange(x.shape[1] * 2,
dtype='float64')[::2]) dtype=dtypeBias)[::2])
f = theano.function([x], z, mode=mode_without_gpu) f = theano.function([x], z, mode=mode_without_gpu)
f_gpu = theano.function([x], z, mode=mode_with_gpu) f_gpu = theano.function([x], z, mode=mode_with_gpu)
...@@ -209,11 +207,7 @@ def softmax_with_bias_unittest_template(dtypeInput, dtypeBias): ...@@ -209,11 +207,7 @@ def softmax_with_bias_unittest_template(dtypeInput, dtypeBias):
GpuSoftmaxWithBias) GpuSoftmaxWithBias)
def cmp(n, m): def cmp(n, m):
# print "test_softmax",n,m data = numpy.arange(n * m, dtype=dtypeInput).reshape(n, m)
if dtypeInput == 'float32':
data = numpy.arange(n * m, dtype='float32').reshape(n, m)
elif dtypeInput == 'float64':
data = numpy.arange(n * m, dtype='float64').reshape(n, m)
out = f(data) out = f(data)
gout = f_gpu(data) gout = f_gpu(data)
...@@ -237,41 +231,34 @@ def softmax_with_bias_unittest_template(dtypeInput, dtypeBias): ...@@ -237,41 +231,34 @@ def softmax_with_bias_unittest_template(dtypeInput, dtypeBias):
cmp(128, 64 * 1024) cmp(128, 64 * 1024)
def test_softmax_float16():
softmax_unittest_template('float16')
def test_softmax_float32(): def test_softmax_float32():
softmax_unittest_template('float32') softmax_unittest_template('float32')
def test_softmax_float64(): def test_softmax_float64():
softmax_unittest_template('float64') softmax_unittest_template('float64')
def softmax_unittest_template(dtypeInput): def softmax_unittest_template(dtypeInput):
""" """
This is basic test for GpuSoftmax with float64 variables This is basic test for GpuSoftmax.
We check that we loop when their is too much block We check that we loop when their is too much block
We use slower code when there isn't enough shared memory We use slower code when there isn't enough shared memory
""" """
assert dtypeInput in ['float32', 'float64'] x = T.matrix('x', dtype=dtypeInput)
if dtypeInput == 'float32':
x = T.fmatrix('x')
elif dtypeInput == 'float64':
x = T.dmatrix('x')
z = T.nnet.softmax(x) z = T.nnet.softmax(x)
mode = mode_with_gpu.excluding('cudnn')
f = theano.function([x], z, mode=mode_without_gpu) f = theano.function([x], z, mode=mode_without_gpu)
f_gpu = theano.function([x], z, mode=mode) f_gpu = theano.function([x], z, mode=mode_wo_cudnn)
assert f.maker.fgraph.toposort()[-1].op == T.nnet.softmax_op assert f.maker.fgraph.toposort()[-1].op == T.nnet.softmax_op
assert isinstance(f_gpu.maker.fgraph.toposort()[-2].op, assert isinstance(f_gpu.maker.fgraph.toposort()[-2].op,
GpuSoftmax) GpuSoftmax)
def cmp(n, m): def cmp(n, m):
if dtypeInput == 'float32': data = numpy.arange(n * m, dtype=dtypeInput).reshape(n, m)
data = numpy.arange(n * m, dtype='float32').reshape(n, m)
elif dtypeInput == 'float64':
data = numpy.arange(n * m, dtype='float64').reshape(n, m)
out = f(data) out = f(data)
gout = f_gpu(data) gout = f_gpu(data)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论