提交 a06073d6 authored 作者: James Bergstra's avatar James Bergstra

init xlogx

上级 2f69dde7
from xlogx import xlogx
import unittest
from theano import compile
from theano import gradient
from theano.tensor import as_tensor
import theano._test_tensor as TT
import random
import numpy.random
class T_XlogX(unittest.TestCase):
def test0(self):
x = as_tensor([1, 0])
y = xlogx(x)
y = compile.eval_outputs([y])
self.failUnless(numpy.all(y == numpy.asarray([0, 0.])))
def test1(self):
class Dummy(object):
def make_node(self, a):
return [xlogx(a)[:,2]]
TT.verify_grad(self, Dummy(), [numpy.random.rand(3,4)])
if __name__ == '__main__':
unittest.main()
import theano
from theano import tensor, scalar
import numpy
class XlogX(scalar.UnaryScalarOp):
"""
Compute X * log(X), with special case 0 log(0) = 0.
"""
@staticmethod
def st_impl(x):
if x == 0.0:
return 0.0
return x * numpy.log(x)
def impl(self, x):
return XlogX.st_impl(x)
def grad(self, (x,), (gz,)):
return [gz * (1 + scalar.log(x))]
def c_code(self, node, name, (x,), (z,), sub):
if node.inputs[0].type in [scalar.float32, scalar.float64]:
return """%(z)s =
%(x)s == 0.0
? 0.0
: %(x)s * log(%(x)s);""" % locals()
raise NotImplementedError('only floatingpoint is implemented')
scalar_xlogx = XlogX(scalar.upgrade_to_float, name='scalar_xlogx')
xlogx = tensor.Elemwise(scalar_xlogx, name='xlogx')
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论