提交 18e5ee92 authored 作者: abergeron's avatar abergeron

Merge pull request #4484 from nouiz/relu

Relu
......@@ -676,6 +676,20 @@ Creating Tensor
Returns a tensor filled with 1s that has same shape as `x`.
.. function:: zeros(shape, dtype=None)
:param shape: a tuple/list of scalar with the shape information.
:param dtype: the dtype of the new tensor. If None, will use floatX.
Returns a tensor filled with 0s of the provided shape.
.. function:: ones(shape, dtype=None)
:param shape: a tuple/list of scalar with the shape information.
:param dtype: the dtype of the new tensor. If None, will use floatX.
Returns a tensor filled with 1s of the provided shape.
.. function:: fill(a,b)
:param a: tensor that has same shape as output
......
......@@ -54,7 +54,7 @@ PLATFORMS = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"]
MAJOR = 0
MINOR = 9
MICRO = 0
SUFFIX = "dev0" # Should be blank except for rc's, betas, etc.
SUFFIX = "dev1" # Should be blank except for rc's, betas, etc.
ISRELEASED = False
VERSION = '%d.%d.%d%s' % (MAJOR, MINOR, MICRO, SUFFIX)
......
......@@ -2220,6 +2220,10 @@ def relu(x, alpha=0):
if alpha == 0:
return 0.5 * (x + abs(x))
else:
# We can't use 0.5 and 1 for one and half. as if alpha is a
# numpy dtype, they will be considered as float64, so would
# cause upcast to float64.
alpha = tensor.as_tensor_variable(alpha)
f1 = 0.5 * (1 + alpha)
f2 = 0.5 * (1 - alpha)
return f1 * x + f2 * abs(x)
......
......@@ -1598,6 +1598,15 @@ def test_relu():
y = relu(x, alpha).eval({x: X, alpha: A})
assert numpy.allclose(y, numpy.where(X > 0, X, A * X), rtol=3e-5)
# test that for alpha of ndarray don't cause upcast.
x = matrix('x', dtype='float32')
rng = numpy.random.RandomState(seed)
X = rng.randn(20, 30).astype('float32')
alpha = numpy.asarray(.123, dtype='float32')
y = relu(x, alpha).eval({x: X})
assert numpy.allclose(y, numpy.where(X > 0, X, alpha * X))
assert y.dtype == 'float32'
def test_h_softmax():
"""
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论