提交 16ca0db2 authored 作者: Olivier Breuleux's avatar Olivier Breuleux

fanciness

上级 e738df89
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#from theano import tensor, scalar #from theano import tensor, scalar
from .. import gof from .. import gof
from .. import scalar from .. import scalar
from .. import printing
from ..printing import pprint
import basic as tensor import basic as tensor
import elemwise import elemwise
import numpy import numpy
...@@ -38,6 +40,9 @@ class ScalarSigmoid(scalar.UnaryScalarOp): ...@@ -38,6 +40,9 @@ class ScalarSigmoid(scalar.UnaryScalarOp):
scalar_sigmoid = ScalarSigmoid(scalar.upgrade_to_float, name='scalar_sigmoid') scalar_sigmoid = ScalarSigmoid(scalar.upgrade_to_float, name='scalar_sigmoid')
sigmoid = elemwise.Elemwise(scalar_sigmoid, name='sigmoid') sigmoid = elemwise.Elemwise(scalar_sigmoid, name='sigmoid')
pprint.assign(sigmoid, printing.FunctionPrinter('sigmoid'))
class ScalarSoftplus(scalar.UnaryScalarOp): class ScalarSoftplus(scalar.UnaryScalarOp):
@staticmethod @staticmethod
def static_impl(x): def static_impl(x):
...@@ -62,6 +67,8 @@ class ScalarSoftplus(scalar.UnaryScalarOp): ...@@ -62,6 +67,8 @@ class ScalarSoftplus(scalar.UnaryScalarOp):
scalar_softplus = ScalarSoftplus(scalar.upgrade_to_float, name='scalar_softplus') scalar_softplus = ScalarSoftplus(scalar.upgrade_to_float, name='scalar_softplus')
softplus = elemwise.Elemwise(scalar_softplus, name='softplus') softplus = elemwise.Elemwise(scalar_softplus, name='softplus')
pprint.assign(softplus, printing.FunctionPrinter('softplus'))
############ ############
# #
...@@ -624,7 +631,7 @@ def binary_crossentropy(output, target): ...@@ -624,7 +631,7 @@ def binary_crossentropy(output, target):
@todo: This is essentially duplicated as cost.cross_entropy @todo: This is essentially duplicated as cost.cross_entropy
@warning: OUTPUT and TARGET are reversed in cost.cross_entropy @warning: OUTPUT and TARGET are reversed in cost.cross_entropy
""" """
return -(target * tensor.log(output) + (1 - target) * tensor.log(1 - output)) return -(target * tensor.log(output) + (1.0 - target) * tensor.log(1.0 - output))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论