提交 ba6c605e authored 作者: Frederic Bastien's avatar Frederic Bastien

Fix upcast to float64 when mixing python int/float and numpy ndarray.

上级 beefa939
......@@ -2220,6 +2220,10 @@ def relu(x, alpha=0):
if alpha == 0:
return 0.5 * (x + abs(x))
else:
# We can't use 0.5 and 1 for one and half. as if alpha is a
# numpy dtype, they will be considered as float64, so would
# cause upcast to float64.
alpha = tensor.as_tensor_variable(alpha)
f1 = 0.5 * (1 + alpha)
f2 = 0.5 * (1 - alpha)
return f1 * x + f2 * abs(x)
......
......@@ -1598,6 +1598,15 @@ def test_relu():
y = relu(x, alpha).eval({x: X, alpha: A})
assert numpy.allclose(y, numpy.where(X > 0, X, A * X), rtol=3e-5)
# test that for alpha of ndarray don't cause upcast.
x = matrix('x', dtype='float32')
rng = numpy.random.RandomState(seed)
X = rng.randn(20, 30).astype('float32')
alpha = numpy.asarray(.123, dtype='float32')
y = relu(x, alpha).eval({x: X})
assert numpy.allclose(y, numpy.where(X > 0, X, alpha * X))
assert y.dtype == 'float32'
def test_h_softmax():
"""
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论