提交 d1819ebf authored 作者: Olivier Breuleux's avatar Olivier Breuleux

merge

......@@ -67,6 +67,63 @@ def check_eq2_c(self, inputs, output, args_in, arg_out):
val = fn(*args_in)
self.failUnless( numpy.all(val == arg_out), (val, arg_out))
class T_argmax(unittest.TestCase):
def setUp(self):
numpy.random.seed(123784)
Argmax.debug = 0
def test0(self):
n = tinit(5.0)
v,i = eval_outputs(argmax(n))
self.failUnless(v == 5.0)
self.failUnless(i == 0)
def test1(self):
n = tinit([1,2,3,2,-6])
v,i = eval_outputs(argmax(n))
self.failUnless(v == 3)
self.failUnless(i == 2)
def test2(self):
n = tinit(numpy.random.rand(2,3))
v,i = eval_outputs(argmax(n))
self.failUnless(numpy.all(i == [0,1]))
def test2b(self):
n = tinit(numpy.random.rand(2,3))
v,i = eval_outputs(argmax(n,axis=0))
self.failUnless(numpy.all(i == [0,1,1]))
def test2_invalid(self):
n = tinit(numpy.random.rand(2,3))
try:
eval_outputs(argmax(n,axis=3))
self.fail()
except ValueError, e:
return
def test2_invalid_neg(self):
n = tinit(numpy.random.rand(2,3))
try:
eval_outputs(argmax(n,axis=-3))
self.fail()
except ValueError, e:
return
def test2_valid_neg(self):
n = tinit(numpy.random.rand(2,3))
v,i = eval_outputs(argmax(n,axis=-1))
self.failUnless(v.shape == (2,))
v,i = eval_outputs(argmax(n,axis=-2))
self.failUnless(v.shape == (3,))
def test3(self):
n = tinit(numpy.random.rand(2,3,4))
v,i = eval_outputs(argmax(n,axis=0))
self.failUnless(v.shape == (3,4))
self.failUnless(i.shape == (3,4))
v,i = eval_outputs(argmax(n,axis=1))
self.failUnless(v.shape == (2,4))
self.failUnless(i.shape == (2,4))
v,i = eval_outputs(argmax(n,axis=2))
self.failUnless(v.shape == (2,3))
self.failUnless(i.shape == (2,3))
class T_transpose(unittest.TestCase):
def test0(self):
......@@ -129,6 +186,7 @@ class T_subtensor(unittest.TestCase):
self.failUnless(t.owner.__class__ is Subtensor)
try:
tval = eval_outputs([t])
self.fail()
except Exception, e:
if e[0] != 'index out of bounds':
raise
......@@ -146,7 +204,6 @@ class T_subtensor(unittest.TestCase):
tval = eval_outputs([t])
self.failUnless(tval.shape == (2,))
self.failUnless(tval[1] == 5.0)
if 0:
def test1_err_invalid(self):
n = tinit(numpy.ones(1))
try:
......@@ -159,8 +216,8 @@ class T_subtensor(unittest.TestCase):
t = n[0]
self.failUnless(t.owner.__class__ is Subtensor)
tval = eval_outputs([t])
self.failUnless(tval.shape == (1,))
self.failUnless(tval[0] == 5.0)
self.failUnless(tval.shape == ())
self.failUnless(tval == 5.0)
def test1_ok_range_infinite(self):
n = tinit(numpy.ones(3)*5)
t = n[1:]
......@@ -173,35 +230,87 @@ class T_subtensor(unittest.TestCase):
t = n[1::2]
self.failUnless(t.owner.__class__ is Subtensor)
tval = eval_outputs([t])
self.failUnless(tval.shape == (3,))
self.failUnless(tval.shape == (2,))
self.failUnless(tval[1] == 5.0)
tval = eval_outputs([n[1:-1:2]])
self.failUnless(tval.shape == (3,))
tval = eval_outputs([n[0:-1:2]]) #0 to 1 from the end stepping by 2
self.failUnless(tval.shape == (2,))
self.failUnless(tval[1] == 5.0)
def test2(self):
raise NotImplementedError() #remember to bring back the rest of tests
if 0:
def test2_err_bounds0(self):
raise NotImplementedError()
n = tinit(numpy.ones((2,3))*5)
t = n[0,4]
self.failUnless(t.owner.__class__ is Subtensor)
try:
tval = eval_outputs([t])
self.fail()
except IndexError, e:
return
def test2_err_bounds1(self):
raise NotImplementedError()
n = tinit(numpy.ones((2,3))*5)
t = n[4:5,2]
self.failUnless(t.owner.__class__ is Subtensor)
try:
tval = eval_outputs([t])
except Exception, e:
if e[0] != 'index out of bounds':
raise
def test2_ok_elem(self):
raise NotImplementedError()
n = tinit(numpy.asarray(range(6)).reshape((2,3)))
t = n[0,2]
self.failUnless(t.owner.__class__ is Subtensor)
tval = eval_outputs([t])
self.failUnless(tval.shape == ())
self.failUnless(numpy.all(tval == 2))
def test2_ok_row(self):
raise NotImplementedError()
n = tinit(numpy.asarray(range(6)).reshape((2,3)))
t = n[1]
self.failIf(any(n.broadcastable))
self.failUnless(t.owner.__class__ is Subtensor)
tval = eval_outputs([t])
self.failUnless(tval.shape == (3,))
self.failUnless(numpy.all(tval == [3,4,5]))
def test2_ok_col(self):
raise NotImplementedError()
n = tinit(numpy.ones((2,3))*5)
t = n[:,0]
self.failUnless(t.owner.__class__ is Subtensor)
self.failIf(any(n.broadcastable))
tval = eval_outputs([t])
self.failUnless(tval.shape == (2,))
self.failUnless(numpy.all(tval == 5.0))
def test2_ok_rows_finite(self):
raise NotImplementedError()
n = tinit(numpy.ones((4,3))*5)
t = n[1:3,0]
self.failUnless(t.owner.__class__ is Subtensor)
tval = eval_outputs([t])
self.failUnless(tval.shape == (2,))
self.failUnless(numpy.all(tval == 5.0))
def test2_ok_cols_infinite(self):
raise NotImplementedError()
n = tinit(numpy.asarray(range(12)).reshape((4,3)))
t = n[1,2:]
self.failUnless(t.owner.__class__ is Subtensor)
tval = eval_outputs([t])
self.failUnless(tval.shape == (1,))
self.failUnless(numpy.all(tval == 5))
def test2_ok_strided(self):
raise NotImplementedError()
n = tinit(numpy.asarray(range(20)).reshape((4,5)))
t = n[1:4:2,1:5:2]
self.failUnless(t.owner.__class__ is Subtensor)
tval = eval_outputs([t])
self.failUnless(tval.shape == (2,2))
self.failUnless(numpy.all(tval == [[6, 8],[16, 18]]))
def test3_ok_mat(self):
raise NotImplementedError()
n = tinit(numpy.asarray(range(24)).reshape((2,3,4)))
t = n[0,0,0]
self.failUnless(t.owner.__class__ is Subtensor)
tval = eval_outputs([t])
self.failUnless(tval.shape == ())
self.failUnless(numpy.all(tval == 0))
class T_add(unittest.TestCase):
......@@ -332,6 +441,9 @@ class T_div(unittest.TestCase):
verify_grad(self, DivElemwise, [numpy.random.rand(3), numpy.ones(3)])
verify_grad(self, DivElemwise, [numpy.random.rand(3,5), numpy.random.rand(3,5)+0.1])
class T_log2(unittest.TestCase):
def test0(self):
verify_grad(self, Log2, [numpy.random.rand(3,1)+0.0001])
class T_pow(unittest.TestCase):
def setUp(self):
......
"""A ResultBase to store numpy.ndarray with basic accompanying Ops"""
import sys # for sys.maxint
import inspect
import numpy
from copy import copy
import inspect
from gof import ResultBase, Op, utils, Destroyer, Viewer, AbstractFunctionError
import gof.result
......@@ -129,31 +130,6 @@ class _Op(BaseTensorOp):
def input_wrapper(cls, obj):
return _as_tensor(obj)
# def upcast(dtype, *dtypes):
# z = numpy.zeros((), dtype = dtype)
# for dtype in dtypes:
# z = z + numpy.zeros((), dtype = dtype)
# return str(z.dtype)
# for dtype in i_dtypes:
# if dtype is None:
# raise TypeError("Expected a Tensor.")
# upcasted = upcast(*i_dtypes)
# return [upcasted] * self.nout
# # try:
# # dmap = self.destroy_map()
# # except AttributeError:
# # dmap = {}
# # rval = []
# # for i in xrange(self.nout):
# # if i in dmap:
# # destroyed = dmap[output]
# # if len(destroyed) != 1:
# # raise TypeError("Cannot infer dtype of output %s because it destroys more than one input." % output)
# # rval.append(destroyed[0])
# # else:
# # rval.append(upcasted)
# # return rval
def impl(self, *inputs):
raise AbstractFunctionError()
......@@ -280,12 +256,44 @@ class Abs(_Elemwise):
return "%(z)s_i = abs(%(x)s_i);"
#Constructor not necessary because builtin abs() does this
class Argmax(Op):
nin=2 # tensor, axis
nout=2 # max val, max idx
E_axis = 'invalid axis'
debug = 0
def __init__(self, x, axis=None):
x = _as_tensor(x)
if axis is None:
axis = len(x.broadcastable) -1
axis = _as_tensor(axis)
self.inputs = [x, axis]
broadcastable = [0] * (len(x.broadcastable) - 1)
self.outputs = [Tensor(x.dtype, broadcastable),
Tensor(axis.dtype, broadcastable)]
def perform(self):
axis = self.inputs[1].data
x = self.inputs[0].data
self.outputs[0].data = numpy.max(x, axis)
self.outputs[1].data = numpy.argmax(x,axis)
argmax = _constructor(Argmax)
def max(x, axis=None):
"""Return maximum elements obtained by iterating over given axis
Default axis is the last one.
"""
# In python (using Argmax.perform()) this leads to an wasteful
# implementation that goes through the data twice instead of once
# but when Argmax.c_impl() is in place, it should be fine.
return argmax(x,axis)[0]
class Exp(_Elemwise):
def impl(self, x): return numpy.exp(x)
def grad(self, x, gz): return gz * exp(x)
def c_foreach(self, (x_i, ), (z_i, )): return "z_i = exp(x_i);"
exp = _constructor(Exp)
class Neg(_Elemwise):
def impl(self, x):
return -x
......@@ -301,6 +309,12 @@ class Log(_Elemwise):
def c_foreach(self, (x_i, ), (z_i, )): return "z_i = log(x_i);"
log = _constructor(Log)
class Log2(_Elemwise):
def impl(self, x): return numpy.log2(x)
def grad(self, x, gz): return gz / (x * numpy.log(2.0))
def c_foreach(self, (x_i, ), (z_i, )): return "%(z)s_i = log2(%(x)s_i);"
log2 = _constructor(Log2)
class Sgn(_Elemwise):
def impl(self, x):
return numpy.abs(x) / x
......@@ -310,6 +324,18 @@ class Sgn(_Elemwise):
return "%(z)s_i = %(x)s_i/abs(%(x)s_i);" # TODO: C use copysign
sgn = _constructor(Sgn)
class Sqr(_Elemwise):
def impl(self, x): return x * x
def grad(self, x, gz): return 2.0 * x * gz
def c_foreach(self, (x_i, ), (z_i, )): return "%(z)s_i = %(x)s_i * %(x)s_i;"
sqr = _constructor(Sqr)
class Sqrt(_Elemwise):
def impl(self, x): return numpy.sqrt(x)
def grad(self, x, gz): return 0.5 * gz / sqrt(x)
def c_foreach(self, (x_i, ), (z_i, )): return "%(z)s_i = sqrt(%(x)s_i);"
sqrt = _constructor(Sqrt)
class Sum(_Elemwise):
def impl(self, x):
return numpy.sum(x)
......@@ -333,6 +359,10 @@ class Fill(_Elemwise):
def c_foreach(self, (model_i, value), (z_i, )):
return "%(z)s_i = %(value)s0;"
fill = _constructor(Fill)
def ones_like(model):
return fill(model, 1.0)
def zeros_like(model):
return fill(model, 0.0)
class TensorCopy(_Elemwise):
......@@ -374,6 +404,7 @@ class Subtensor(Op, Viewer):
nin = 2
nout = 1
e_invalid = 'invalid index'
debug = 0
def __init__(self, *args,**kwargs):
def as_tuple_result(obj):
if isinstance(obj, ResultBase):
......@@ -384,7 +415,13 @@ class Subtensor(Op, Viewer):
else:
r.data = (obj,)
return r
def pad(tplR, N):
l = list(tplR.data)
for i in range(len(l), N):
l.append(slice(0,sys.maxint,1))
tplR.data = tuple(l)
if Subtensor.debug:
print 'Subtensor.__init__', args, kwargs
#Olivier says not to call this
#Op.__init__(self, *args,**kwargs)
......@@ -392,9 +429,16 @@ class Subtensor(Op, Viewer):
t, coord = args
t = _as_tensor(t)
coord = as_tuple_result(coord)
if len(coord.data) != len(t.broadcastable):
if len(coord.data) > len(t.broadcastable):
raise ValueError(Subtensor.e_invalid)
# add the implicit extra unbounded slices
# e.g. n[0] on a 3d tensor pads to n[0,:,:]
pad(coord, len(t.broadcastable))
broadcastable = [0 for c in coord.data if isinstance(c, slice)]
if Subtensor.debug:
print 'brdcstble', broadcastable
print 't', t.data
print 'coord', coord.data
self.inputs = [t, coord]
self.outputs = [Tensor(t.dtype, broadcastable)]
def view_map(self):
......@@ -402,6 +446,9 @@ class Subtensor(Op, Viewer):
def perform(self):
x = self.inputs[0].data
c = self.inputs[1].data
if Subtensor.debug:
print 'perform: x', x
print 'perform: c', c
if len(c) == 1:
self.outputs[0].data = x.__getitem__(c[0])
else:
......@@ -739,3 +786,28 @@ if 0:
return t
# def upcast(dtype, *dtypes):
# z = numpy.zeros((), dtype = dtype)
# for dtype in dtypes:
# z = z + numpy.zeros((), dtype = dtype)
# return str(z.dtype)
# for dtype in i_dtypes:
# if dtype is None:
# raise TypeError("Expected a Tensor.")
# upcasted = upcast(*i_dtypes)
# return [upcasted] * self.nout
# # try:
# # dmap = self.destroy_map()
# # except AttributeError:
# # dmap = {}
# # rval = []
# # for i in xrange(self.nout):
# # if i in dmap:
# # destroyed = dmap[output]
# # if len(destroyed) != 1:
# # raise TypeError("Cannot infer dtype of output %s because it destroys more than one input." % output)
# # rval.append(destroyed[0])
# # else:
# # rval.append(upcasted)
# # return rval
......@@ -62,179 +62,3 @@ class Dot(TensorOp):
class NegInplace(Neg.inplace_version()):
def impl(self, x):
x *= -1
return x
class InvElemwise(Elemwise):
def impl(self, x):
return 1 / x
def grad(self, x, gz):
return -gz / (x * x)
def c_foreach(self, (x_i, ), (z_i, )):
return "z_i = 1 / x_i;"
class InvElemwiseInplace(InvElemwise.inplace_version()):
def impl(self, x):
x[:] = 1 / x
return x
class Log2(Elemwise):
def impl(self, x): return numpy.log2(x)
def grad(self, x, gz): return gz / (x * numpy.log(2))
def c_foreach(self, (x_i, ), (z_i, )): return "z_i = log2(x_i);"
class Twice(Elemwise):
def impl(self, x):
return 2.0 * x
def grad(self, x, gz):
return scale(gz, 2.0)
def c_foreach(self, (x_i, ), (z_i, )):
"z_i = x_i + x_i;"
class TwiceInplace(Twice.inplace_version()):
def impl(self, x):
x *= 2.0
return x
class Sqr(Elemwise):
def impl(self, x):
return x * x
def grad(self, x, gz):
return scale(mul_elemwise(x, gz), 2.0)
def c_foreach(self, (x_i, ), (z_i, )):
return "z_i = x_i * x_i;"
class SqrInplace(Sqr.inplace_version()):
def impl(x):
x *= x
return x
class Sqrt(Elemwise):
def impl(self, x):
return numpy.sqrt(x)
def grad(self, x, gz):
return scale(div(gz, sqrt(x)), 0.5)
def c_foreach(self, (x_i, ), (z_i, )):
return "z_i = pow(x_i, 0.5);"
class SqrtInplace(Sqrt.inplace_version()):
def impl(self, x):
x **= 0.5
return x
class OnesLike(Elemwise):
def impl(self, x):
return numpy.ones_like(x)
def grad(self, x, gz):
return None
class ZerosLike(Elemwise):
def impl(self, x):
return numpy.zeros_like(x)
def grad(self, x, gz):
return None
class Min:
pass
class Max:
pass
class Argmin:
pass
class Argmax:
pass
class MinMax:
pass
# nout = 2
# def impl(x):
# return x.min, x.max
# def specs(x):
# return [(numpy.ndarray, x[1], ())] * 2
# # def alloc((x, ), (_min, _max)):
# # _min.data = numpy.ndarray((), x.dtype)
# # _max.data = numpy.ndarray((), x.dtype)
# def c_init((x, ), (_min, _max)):
# raise NotImplementedError
# return """
# _x_dtype min = _x[0];
# _x_dtype max = _x[0];
# """
# def c_foreach((x, ), (_min, _max)):
# return """
# if (x < min) min = x;
# if (x > max) max = x;
# """
# def c_finalize((x, ), (_min, _max)):
# return """
# _min[0] = min;
# _max[0] = max;
# """
# class Transpose(UnaryTensorOp):
# def propagate_broadcastable(self, x):
# x2 = copy(x)
# x2.reverse()
# return [x2]
# def impl(self, x):
# return x.T
# def c_impl(self, x, z):
# return """
# PyArrayObject* transposed = (PyArrayObject*)PyArray_Transpose(%(x)s, NULL);
# //if (PyArray_REFCOUNT(transposed) == 1) {
# // printf("lala\\n");
# //}
# //if (%(z)s) {
# // Py_XDECREF(%(z)s);
# //}
# %(z)s = transposed;
# Py_XINCREF(%(z)s);
# """
# # class Transpose(UnaryTensorOp):
# # def propagate_broadcastable(self, x):
# # x2 = copy(x)
# # x2.reverse()
# # return [x2]
# # def impl(self, x):
# # return x.T
# # def c_impl(self, x, z):
# # return """
# # PyArrayObject* transposed = (PyArrayObject*)PyArray_Transpose(%(x)s, NULL);
# # //if (PyArray_REFCOUNT(transposed) == 1) {
# # // printf("lala\\n");
# # //}
# # //if (%(z)s) {
# # // Py_XDECREF(%(z)s);
# # //}
# # %(z)s = transposed;
# # Py_XINCREF(%(z)s);
# # """
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论