提交 9d544fb4 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #4324 from yobibyte/fix

Moved theano.sandbox.softsign to tensor/nnet/nnet.py. Added test. #4314
......@@ -228,4 +228,4 @@
<Content Include="theano\sandbox\cuda\cuda_ndarray.cuh" />
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.Common.targets" />
</Project>
\ No newline at end of file
</Project>
......@@ -19,7 +19,7 @@ import theano
from theano.compile import shared, pfunc
from theano import tensor
from theano.tensor.nnet import softplus
from theano.sandbox.softsign import softsign
from theano.tensor.nnet.nnet import softsign
_logger = logging.getLogger('theano.sandbox.cuda.tests.test_bench_loopfusion')
......
from __future__ import absolute_import, print_function, division
from theano.tensor.nnet.nnet import softsign # noqa
import sys
import theano
import theano.tensor
class ScalarSoftsign(theano.scalar.UnaryScalarOp):
# TODO : need description for class
@staticmethod
def static_impl(x):
return x / (1.0 + abs(x))
def impl(self, x):
return ScalarSoftsign.static_impl(x)
def grad(self, inp, grads):
x, = inp
gz, = grads
if 'float' in x.type.dtype:
d = (1.0 + abs(x))
return [gz / (d * d)]
else:
return NotImplemented
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
if node.inputs[0].type in [theano.scalar.float32,
theano.scalar.float64]:
return "%(z)s = %(x)s / (1.0+fabs(%(x)s));" % locals()
raise NotImplementedError('only floating point x is implemented')
scalar_softsign = ScalarSoftsign(theano.scalar.upgrade_to_float,
name='scalar_softsign')
softsign = theano.tensor.Elemwise(scalar_softsign, name='softsign')
print(
"DEPRECATION WARNING: softsign was moved from theano.sandbox.softsign to "
"theano.tensor.nnet.nnet ", file=sys.stderr
)
......@@ -20,7 +20,7 @@ from six.moves import xrange
import theano
from theano import gof
from theano import scalar
from theano.tensor import basic as tensor, subtensor, opt
from theano.tensor import basic as tensor, subtensor, opt, elemwise
from theano.tensor.type import (values_eq_approx_remove_inf,
values_eq_approx_remove_nan)
from theano.tensor.opt import copy_stack_trace
......@@ -2366,3 +2366,38 @@ def elu(x, alpha=1):
Exponential Linear Units (ELUs)" <http://arxiv.org/abs/1511.07289>`.
"""
return tensor.switch(x > 0, x, alpha * (tensor.exp(x) - 1))
class ScalarSoftsign(theano.scalar.UnaryScalarOp):
"""
Softsign activation function
:math:`\\varphi(\\mathbf{x}) = \\frac{1}{1+|x|}`
"""
@staticmethod
def static_impl(x):
return x / (1.0 + abs(x))
def impl(self, x):
return ScalarSoftsign.static_impl(x)
def grad(self, inp, grads):
x, = inp
gz, = grads
if 'float' in x.type.dtype:
d = (1.0 + abs(x))
return [gz / (d * d)]
else:
return NotImplemented
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
if node.inputs[0].type in [theano.scalar.float32,
theano.scalar.float64]:
return "%(z)s = %(x)s / (1.0+fabs(%(x)s));" % locals()
raise NotImplementedError('only floating point x is implemented')
scalar_softsign = ScalarSoftsign(theano.scalar.upgrade_to_float,
name='scalar_softsign')
softsign = elemwise.Elemwise(scalar_softsign, name='softsign')
......@@ -33,6 +33,10 @@ from theano.tensor.nnet import (categorical_crossentropy,
elu,
binary_crossentropy)
from theano.tensor import matrix, vector, lvector, scalar
from theano.tensor.nnet.nnet import softsign
from theano.tensor.tests.test_basic import (makeBroadcastTester, check_floatX,
_good_broadcast_unary_normal_float_no_complex,
upcast_int8_nfunc)
class T_sigmoid(unittest.TestCase):
......@@ -1699,3 +1703,11 @@ def test_binary_crossentropy_reshape():
fga = theano.function([a], ga, mode=mode)
utt.assert_allclose(fga(numpy.array([[[[30.]]]], dtype=config.floatX)),
numpy.zeros((1, 1, 1, 1), dtype=config.floatX))
SoftsignTester = makeBroadcastTester(
op=softsign,
expected=upcast_int8_nfunc(lambda inputs: check_floatX(
inputs, inputs/(1.0+numpy.fabs(inputs)))),
good=_good_broadcast_unary_normal_float_no_complex,
name='SoftsignTester',
)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论