added Argmax Op

上级 4dedf419
......@@ -67,6 +67,63 @@ def check_eq2_c(self, inputs, output, args_in, arg_out):
val = fn(*args_in)
self.failUnless( numpy.all(val == arg_out), (val, arg_out))
class T_argmax(unittest.TestCase):
def setUp(self):
numpy.random.seed(123784)
Argmax.debug = 0
def test0(self):
n = tinit(5.0)
v,i = eval_outputs(argmax(n))
self.failUnless(v == 5.0)
self.failUnless(i == 0)
def test1(self):
n = tinit([1,2,3,2,-6])
v,i = eval_outputs(argmax(n))
self.failUnless(v == 3)
self.failUnless(i == 2)
def test2(self):
n = tinit(numpy.random.rand(2,3))
v,i = eval_outputs(argmax(n))
self.failUnless(numpy.all(i == [0,1]))
def test2b(self):
n = tinit(numpy.random.rand(2,3))
v,i = eval_outputs(argmax(n,axis=0))
self.failUnless(numpy.all(i == [0,1,1]))
def test2_invalid(self):
n = tinit(numpy.random.rand(2,3))
try:
eval_outputs(argmax(n,axis=3))
self.fail()
except ValueError, e:
return
def test2_invalid_neg(self):
n = tinit(numpy.random.rand(2,3))
try:
eval_outputs(argmax(n,axis=-3))
self.fail()
except ValueError, e:
return
def test2_valid_neg(self):
n = tinit(numpy.random.rand(2,3))
v,i = eval_outputs(argmax(n,axis=-1))
self.failUnless(v.shape == (2,))
v,i = eval_outputs(argmax(n,axis=-2))
self.failUnless(v.shape == (3,))
def test3(self):
n = tinit(numpy.random.rand(2,3,4))
v,i = eval_outputs(argmax(n,axis=0))
self.failUnless(v.shape == (3,4))
self.failUnless(i.shape == (3,4))
v,i = eval_outputs(argmax(n,axis=1))
self.failUnless(v.shape == (2,4))
self.failUnless(i.shape == (2,4))
v,i = eval_outputs(argmax(n,axis=2))
self.failUnless(v.shape == (2,3))
self.failUnless(i.shape == (2,3))
class T_transpose(unittest.TestCase):
def test0(self):
......
......@@ -281,12 +281,44 @@ class Abs(_Elemwise):
return "%(z)s_i = abs(%(x)s_i);"
#Constructor not necessary because builtin abs() does this
class Argmax(Op):
nin=2 # tensor, axis
nout=2 # max val, max idx
E_axis = 'invalid axis'
debug = 0
def __init__(self, x, axis=None):
x = _as_tensor(x)
if axis is None:
axis = len(x.broadcastable) -1
axis = _as_tensor(axis)
self.inputs = [x, axis]
broadcastable = [0] * (len(x.broadcastable) - 1)
self.outputs = [Tensor(x.dtype, broadcastable),
Tensor(axis.dtype, broadcastable)]
def perform(self):
axis = self.inputs[1].data
x = self.inputs[0].data
self.outputs[0].data = numpy.max(x, axis)
self.outputs[1].data = numpy.argmax(x,axis)
argmax = _constructor(Argmax)
def max(x, axis=None):
"""Return maximum elements obtained by iterating over given axis
Default axis is the last one.
"""
# In python (using Argmax.perform()) this leads to an wasteful
# implementation that goes through the data twice instead of once
# but when Argmax.c_impl() is in place, it should be fine.
return argmax(x,axis)[0]
class Exp(_Elemwise):
def impl(self, x): return numpy.exp(x)
def grad(self, x, gz): return gz * exp(x)
def c_foreach(self, (x_i, ), (z_i, )): return "z_i = exp(x_i);"
exp = _constructor(Exp)
class Neg(_Elemwise):
def impl(self, x):
return -x
......@@ -302,6 +334,12 @@ class Log(_Elemwise):
def c_foreach(self, (x_i, ), (z_i, )): return "z_i = log(x_i);"
log = _constructor(Log)
class Log2(_Elemwise):
def impl(self, x): return numpy.log2(x)
def grad(self, x, gz): return gz / (x * numpy.log(2.0))
def c_foreach(self, (x_i, ), (z_i, )): return "%(z)s_i = log2(%(x)s_i);"
log2 = _constructor(Log2)
class Sgn(_Elemwise):
def impl(self, x):
return numpy.abs(x) / x
......@@ -311,6 +349,18 @@ class Sgn(_Elemwise):
return "%(z)s_i = %(x)s_i/abs(%(x)s_i);" # TODO: C use copysign
sgn = _constructor(Sgn)
class Sqr(_Elemwise):
def impl(self, x): return x * x
def grad(self, x, gz): return 2.0 * x * gz
def c_foreach(self, (x_i, ), (z_i, )): return "%(z)s_i = %(x)s_i * %(x)s_i;"
sqr = _constructor(Sqr)
class Sqrt(_Elemwise):
def impl(self, x): return numpy.sqrt(x)
def grad(self, x, gz): return 0.5 * gz / sqrt(x)
def c_foreach(self, (x_i, ), (z_i, )): return "%(z)s_i = sqrt(%(x)s_i);"
sqrt = _constructor(Sqrt)
class Sum(_Elemwise):
def impl(self, x):
return numpy.sum(x)
......@@ -334,6 +384,10 @@ class Fill(_Elemwise):
def c_foreach(self, (model_i, value), (z_i, )):
return "%(z)s_i = %(value)s0;"
fill = _constructor(Fill)
def ones_like(model):
return fill(model, 1.0)
def zeros_like(model):
return fill(model, 0.0)
class TensorCopy(_Elemwise):
......
......@@ -64,87 +64,6 @@ class Dot(TensorOp):
class NegInplace(Neg.inplace_version()):
def impl(self, x):
x *= -1
return x
class InvElemwise(Elemwise):
def impl(self, x):
return 1 / x
def grad(self, x, gz):
return -gz / (x * x)
def c_foreach(self, (x_i, ), (z_i, )):
return "z_i = 1 / x_i;"
class InvElemwiseInplace(InvElemwise.inplace_version()):
def impl(self, x):
x[:] = 1 / x
return x
class Log2(Elemwise):
def impl(self, x): return numpy.log2(x)
def grad(self, x, gz): return gz / (x * numpy.log(2))
def c_foreach(self, (x_i, ), (z_i, )): return "z_i = log2(x_i);"
class Twice(Elemwise):
def impl(self, x):
return 2.0 * x
def grad(self, x, gz):
return scale(gz, 2.0)
def c_foreach(self, (x_i, ), (z_i, )):
"z_i = x_i + x_i;"
class TwiceInplace(Twice.inplace_version()):
def impl(self, x):
x *= 2.0
return x
class Sqr(Elemwise):
def impl(self, x):
return x * x
def grad(self, x, gz):
return scale(mul_elemwise(x, gz), 2.0)
def c_foreach(self, (x_i, ), (z_i, )):
return "z_i = x_i * x_i;"
class SqrInplace(Sqr.inplace_version()):
def impl(x):
x *= x
return x
class Sqrt(Elemwise):
def impl(self, x):
return numpy.sqrt(x)
def grad(self, x, gz):
return scale(div(gz, sqrt(x)), 0.5)
def c_foreach(self, (x_i, ), (z_i, )):
return "z_i = pow(x_i, 0.5);"
class SqrtInplace(Sqrt.inplace_version()):
def impl(self, x):
x **= 0.5
return x
class OnesLike(Elemwise):
def impl(self, x):
return numpy.ones_like(x)
def grad(self, x, gz):
return None
class ZerosLike(Elemwise):
def impl(self, x):
return numpy.zeros_like(x)
def grad(self, x, gz):
return None
class Min:
pass
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论