提交 fb352f6b authored 作者: bergstrj@iro.umontreal.ca's avatar bergstrj@iro.umontreal.ca

merged and fixed support-code-order bug

......@@ -2,6 +2,8 @@ from base_tensor import *
import unittest
from copy import copy
from compile import Function
import gof
def _tensor(data, broadcastable=None, role=None, name=None):
"""Return a BaseTensor containing given data"""
......@@ -50,6 +52,8 @@ class T_tensor(unittest.TestCase):
self.failUnless(t.dtype == 'complex64')
self.failUnless(t.broadcastable == (0,0))
self.failUnless(isinstance(t.data, numpy.ndarray))
f = Function([t], [t], linker_cls=gof.CLinker)
self.failUnless(numpy.all(t.data == f(t.data)))
def test_data_normal(self): #test that assigning to .data works when it should
t = _tensor(numpy.ones((5,1),dtype='complex64'), broadcastable=0)
o27 = numpy.ones((2,7))
......
......@@ -217,172 +217,6 @@ class gemm(omega_op):
'(_b->descr->type_num == PyArray_FLOAT) ? (REAL)(((float*)_b->data)[0]) : (REAL)(((double*)_b->data)[0])')
class _testCase_transpose(unittest.TestCase):
def setUp(self):
build_eval_mode()
def tearDown(self):
pop_mode()
def test_1d_alias(self):
a = numpy.ones(10)
ta = transpose(a)
self.failUnless(ta.data.shape == a.shape)
self.failUnless(numpy.all(ta.data == a))
a[3] *= -1.0
self.failUnless(numpy.all(ta.data == a))
def test_1d_copy(self):
a = numpy.ones(10)
ta = transpose_copy(a)
self.failUnless(ta.data.shape == a.shape)
self.failUnless(numpy.all(ta.data == a))
a[3] *= -1.0
self.failIf(numpy.all(ta.data == a))
def test_2d_alias(self):
a = numpy.ones((10,3))
ta = transpose(a)
self.failUnless(ta.data.shape == (3,10))
def test_3d_alias(self):
a = numpy.ones((10,3,5))
ta = transpose(a)
self.failUnless(ta.data.shape == (5,3,10))
a[9,0,0] = 5.0
self.failUnless(ta.data[0,0,9] == 5.0)
def test_3d_copy(self):
a = numpy.ones((10,3,5))
ta = transpose_copy(a)
self.failUnless(ta.data.shape == (5,3,10))
a[9,0,0] = 5.0
self.failUnless(ta.data[0,0,9] == 1.0)
class _testCase_power(unittest.TestCase):
def setUp(self):
build_eval_mode()
numpy.random.seed(44)
def tearDown(self):
pop_mode()
def test1(self):
r = numpy.random.rand(50)
exp_r = exp(r)
self.failUnless(exp_r.__array__().__class__ is numpy.ndarray)
def test_0(self):
r = numpy.random.rand(50)
exp_r = exp(r)
n_exp_r = numpy.exp(r)
self.failUnless( _approx_eq(exp_r, n_exp_r),
(exp_r, exp_r.data, n_exp_r,
numpy.max(numpy.abs(n_exp_r.__sub__(exp_r.__array__())))))
log_exp_r = log(exp_r)
self.failUnless( _approx_eq(log_exp_r, r), log_exp_r)
def test_1(self):
r = numpy.random.rand(50)
r2 = pow(r,2)
self.failUnless( _approx_eq(r2, r*r))
class _testCase_slicing(unittest.TestCase):
def setUp(self):
build_eval_mode()
def tearDown(self):
pop_mode()
def test_getitem0(self):
a = numpy.ones((4,4))
wa1 = wrap(a)[:,1]
try:
err = wa1 + a
except ValueError, e:
self.failUnless(str(e) == \
'The dimensions of the inputs do not match.',
'Wrong ValueError')
return
self.fail('add should not have succeeded')
def test_getitem1(self):
a = numpy.ones((4,4))
wa1 = wrap(a)[1]
self.failUnless(wa1.data.shape == (4,))
def test_getslice_0d_all(self):
"""Test getslice does not work on 0d array """
a = numpy.ones(())
try:
wa1 = wrap(a)[:]
except IndexError, e:
self.failUnless(str(e) == "0-d arrays can't be indexed.")
return
self.fail()
def test_getslice_1d_all(self):
"""Test getslice on 1d array"""
a = numpy.ones(4)
wa1 = wrap(a)[:]
self.failUnless(wa1.data.shape == (4,), 'wrong shape')
self.failUnless(numpy.all(wa1.data == a), 'unequal value')
a[1] = 3.4
self.failUnless(wa1.data[1] == 3.4, 'not a view')
try:
wa1[2] = 2.5
except TypeError, e:
self.failUnless("object does not support item assignment" in str(e))
return
self.fail()
def test_getslice_3d_all(self):
"""Test getslice on 3d array"""
a = numpy.ones((4,5,6))
wa1 = wrap(a)[:]
self.failUnless(wa1.data.shape == (4,5,6), 'wrong shape')
self.failUnless(numpy.all(wa1.data == a), 'unequal value')
a[1,1,1] = 3.4
self.failUnless(wa1.data[1,1,1] == 3.4, 'not a view')
def test_getslice_1d_some(self):
"""Test getslice on 1d array"""
a = numpy.ones(5)
wa1 = wrap(a)[1:3]
a[2] = 5.0
a[3] = 2.5
self.failUnless(wa1.data.shape == (2,))
self.failUnless(a[1] == wa1.data[0])
self.failUnless(a[2] == wa1.data[1])
def test_getslice_1d_step(self):
"""Test getslice on 1d array"""
a = numpy.ones(8)
wa1 = wrap(a)[0:8:2]
for i in xrange(8): a[i] = i
self.failUnless(wa1.shape == (4,))
for i in xrange(4):
self.failUnless(a[i*2] == wa1.data[i])
def test_getslice_3d_float(self):
"""Test getslice on 3d array"""
a = numpy.asarray(range(4*5*6))
a.resize((4,5,6))
wa1 = wrap(a)[1:3]
self.failUnless(wa1.shape == (2,5,6))
self.failUnless(numpy.all(a[1:3] == wa1.data))
a[1] *= -1.0
self.failUnless(numpy.all(a[1:3] == wa1.data))
def test_getslice_3d_one(self):
"""Test getslice on 3d array"""
a = numpy.asarray(range(4*5*6))
a.resize((4,5,6))
wa = wrap(a)
wa_123 = wa[1,2,3]
self.failUnless(wa_123.shape == (), wa_123.shape)
......
......@@ -3,7 +3,7 @@ import tensor # for hidden symbols
import unittest
from copy import copy
from compile import Function
from compile import Function, eval_outputs
import gradient
import gof, gof.graph
......@@ -68,6 +68,158 @@ def check_eq2_c(self, inputs, output, args_in, arg_out):
self.failUnless( numpy.all(val == arg_out), (val, arg_out))
class T_transpose(unittest.TestCase):
def test0(self):
n = tinit(numpy.ones(()))
t = transpose(n)
self.failUnless(t.owner.__class__ is Transpose)
f = Function([n], [t])
tval = f(n.data)
self.failUnless(tval.shape == n.data.shape)
#test aliasing
tval += 55.0
self.failUnless(n.data == 56.0)
def test1(self):
n = tinit(numpy.ones(5))
t = transpose(n)
self.failUnless(t.owner.__class__ is Transpose)
f = Function([n], [t])
tval = f(n.data)
self.failUnless(tval.shape == n.data.shape)
#test aliasing
tval += 55.0
self.failUnless(n.data[0] == 56.0)
def test2(self):
n = tinit(numpy.ones((5,3)))
t = transpose(n)
self.failUnless(t.owner.__class__ is Transpose)
f = Function([n], [t])
tval = f(n.data)
self.failUnless(tval.shape == (3,5))
#test aliasing
tval += 55.0
self.failUnless(n.data[0,0] == 56.0)
def test3(self):
n = tinit(numpy.ones((5,3,2)))
t = transpose(n)
self.failUnless(t.owner.__class__ is Transpose)
f = Function([n], [t])
tval = f(n.data)
self.failUnless(tval.shape == (2,3,5))
#test aliasing
tval += 55.0
self.failUnless(n.data[0,0,0] == 56.0)
class T_subtensor(unittest.TestCase):
def test0_err_invalid(self):
#it is impossible to retrieve a view of a 0-d tensor
n = tinit(numpy.ones(()))
try:
t = n[0]
self.fail()
except ValueError, e:
self.failUnless(e[0] is Subtensor.e_invalid)
def test1_err_bounds(self):
n = tinit(numpy.ones(3))
t = n[7]
self.failUnless(t.owner.__class__ is Subtensor)
try:
tval = eval_outputs([t])
except Exception, e:
if e[0] != 'index out of bounds':
raise
def test1_ok_range_finite(self):
n = tinit(numpy.ones(3)*5)
t = n[0:2]
self.failUnless(t.owner.__class__ is Subtensor)
tval = eval_outputs([t])
self.failUnless(tval.shape == (2,))
self.failUnless(tval[1] == 5.0)
def test2_ok_range_finite(self):
n = tinit(numpy.ones((3,4))*5)
t = n[0:2,3]
self.failUnless(t.owner.__class__ is Subtensor)
tval = eval_outputs([t])
self.failUnless(tval.shape == (2,))
self.failUnless(tval[1] == 5.0)
if 0:
def test1_err_invalid(self):
n = tinit(numpy.ones(1))
try:
t = n[0,0]
self.fail()
except ValueError, e:
self.failUnless(e[0] is Subtensor.e_invalid)
def test1_ok_elem(self):
n = tinit(numpy.ones(1)*5)
t = n[0]
self.failUnless(t.owner.__class__ is Subtensor)
tval = eval_outputs([t])
self.failUnless(tval.shape == (1,))
self.failUnless(tval[0] == 5.0)
def test1_ok_range_infinite(self):
n = tinit(numpy.ones(3)*5)
t = n[1:]
self.failUnless(t.owner.__class__ is Subtensor)
tval = eval_outputs([t])
self.failUnless(tval.shape == (2,))
self.failUnless(tval[1] == 5.0)
def test1_ok_strided(self):
n = tinit(numpy.ones(5)*5)
t = n[1::2]
self.failUnless(t.owner.__class__ is Subtensor)
tval = eval_outputs([t])
self.failUnless(tval.shape == (3,))
self.failUnless(tval[1] == 5.0)
tval = eval_outputs([n[1:-1:2]])
self.failUnless(tval.shape == (3,))
self.failUnless(tval[1] == 5.0)
def test2(self):
raise NotImplementedError() #remember to bring back the rest of tests
if 0:
def test2_err_bounds0(self):
raise NotImplementedError()
def test2_err_bounds1(self):
raise NotImplementedError()
def test2_ok_elem(self):
raise NotImplementedError()
def test2_ok_row(self):
raise NotImplementedError()
def test2_ok_col(self):
raise NotImplementedError()
def test2_ok_rows_finite(self):
raise NotImplementedError()
def test2_ok_cols_infinite(self):
raise NotImplementedError()
def test2_ok_strided(self):
raise NotImplementedError()
def test3_ok_mat(self):
raise NotImplementedError()
class T_add(unittest.TestCase):
def test_complex128(self):
a = tinit(numpy.ones(3, dtype='complex128'))
b = tinit(numpy.ones(3, dtype='complex128'))
f = Function([a,b], [a+b], linker_cls = gof.CLinker)
self.failUnless(numpy.all((a.data + b.data) ==
f(a.data, b.data)))
def test_complex128b(self):
a = tinit(numpy.ones(3, dtype='complex128')+0.5j)
b = tinit(numpy.ones(3, dtype='complex128'))
f = Function([a,b], [a+b], linker_cls = gof.CLinker)
self.failUnless(numpy.all((a.data + b.data) ==
f(a.data, b.data)))
class T_abs(unittest.TestCase):
def test_impl(self):
t = tinit(1.0)
......
......@@ -85,7 +85,9 @@ class BaseTensor(ResultBase):
'int8': (int, 'npy_int8', 'NPY_INT8'),
'int16': (int, 'npy_int16', 'NPY_INT16'),
'int32': (int, 'npy_int32', 'NPY_INT32'),
'int64': (int, 'npy_int64', 'NPY_INT64')}[self.dtype]
'int64': (int, 'npy_int64', 'NPY_INT64'),
'complex128': (complex, 'theano_complex128', 'NPY_COMPLEX128'),
'complex64': (complex, 'theano_complex64', 'NPY_COMPLEX64')}[self.dtype]
except KeyError:
raise TypeError("Unsupported dtype for BaseTensor: %s" % self.dtype)
......
......@@ -501,10 +501,10 @@ class CLinker(Linker):
}
""" % dict(struct_name = self.struct_name)
instantiate.customize.add_support_code(self.struct_code)
instantiate.customize.add_support_code(static)
for support_code in self.support_code():
instantiate.customize.add_support_code(support_code)
instantiate.customize.add_support_code(self.struct_code)
instantiate.customize.add_support_code(static)
instantiate.customize.add_extra_compile_arg("-w")
for arg in self.compile_args():
instantiate.customize.add_extra_compile_arg(arg)
......
"""
Contains the Result class, which is the base interface for a
value that is the input or the output of an Op.
"""
import copy
from utils import AbstractFunctionError
......@@ -268,6 +268,7 @@ class ResultBase(object):
def __copy__(self):
"""Create a new instance of self.__class__ with role None, independent data"""
raise AbstractFunctionError()
......@@ -295,6 +296,13 @@ class PythonResult(ResultBase):
py_%(name)s = %(name)s;
Py_XINCREF(py_%(name)s);
"""
def same_properties(self, other):
return False
def __copy__(self):
rval = PythonResult(None, self.name)
rval.data = copy.copy(self.data)
return rval
......
......@@ -4,6 +4,7 @@ import numpy
from copy import copy
import inspect
from gof import ResultBase, Op, utils, Destroyer, Viewer, AbstractFunctionError
import gof.result
from base_tensor import BaseTensor, BaseTensorOp
from elemwise import Elemwise
......@@ -61,8 +62,8 @@ class Tensor(BaseTensor):
T = property(__get_T)
#SLICING
def __getitem__(self, key): raise NotImplementedError()
def __getslice__(self, key): raise NotImplementedError()
def __getitem__(self, item): return subtensor(self, item)
def __getslice__(self, *args): return subtensor(self, slice(*args))
# alternate Tensor constructor
def tinit(data, broadcastable=None, role=None, name=None):
......@@ -113,6 +114,11 @@ def _assert_tensor_scalar(x, a):
if numpy.product(a.shape) != 1:
raise ValueError("The second argument must be a scalar.")
def _as_tensor(obj):
if isinstance(obj, Tensor):
return obj
else:
return tinit(obj)
class _Op(BaseTensorOp):
"""A convenient base for the ops in this file"""
......@@ -121,13 +127,7 @@ class _Op(BaseTensorOp):
@classmethod
def input_wrapper(cls, obj):
if isinstance(obj, Tensor):
return obj
else:
return tinit(obj)
# nin = -1
# nout = 1
return _as_tensor(obj)
# def upcast(dtype, *dtypes):
# z = numpy.zeros((), dtype = dtype)
......@@ -344,46 +344,76 @@ class TensorCopy(_Elemwise):
return "%(z)s_i = %(x)s_i;"
tensor_copy = _constructor(TensorCopy)
if 0:
##########################
# View Operations
##########################
##########################
# View Operations
##########################
class transpose(_Op, Viewer):
def view_map(self):
return {self.out: [self.inputs[0]]}
def impl(self, x):
return x.T
def grad(self, x, gz):
return transpose_copy(gz)
def propagate_broadcastable(self, x):
rval = list(x)
rval.reverse()
return [rval]
def c_impl(self, x, z):
return """
PyArrayObject* transposed = (PyArrayObject*)PyArray_Transpose(%(x)s, NULL);
if (%(z)s) {
Py_XDECREF(%(z)s);
}
%(z)s = transposed;
"""
class Subtensor(_Op, Viewer):
def view_map(self):
return {self.out: [self.inputs[0]]}
def impl(x, item):
rval = x.__getitem__(item)
#print 'get_slice running', rval
return rval
def grad(x, gz):
# - option: allocate a potentially large matrix of zeros, and fill in
# the appropriate elements from gz
# - option: return a sparse matrix
# - option: return gz, but think about how to include a special addition
# function that uses a matching view over the original data
raise NotImplemented
class Transpose(_Op, Viewer):
def view_map(self):
return {self.out: [self.inputs[0]]}
def propagate_broadcastable(self, x):
rval = list(x)
rval.reverse()
return [rval]
def impl(self, x):
return x.T #numpy's transpose
def grad(self, x, gz):
return transpose_copy(gz)
def c_impl(self, x, z):
return """
PyArrayObject* transposed = (PyArrayObject*)PyArray_Transpose(%(x)s, NULL);
if (%(z)s) {
Py_XDECREF(%(z)s);
}
%(z)s = transposed;
"""
transpose = _constructor(Transpose)
class Subtensor(Op, Viewer):
nin = 2
nout = 1
e_invalid = 'invalid index'
def __init__(self, *args,**kwargs):
def as_tuple_result(obj):
if isinstance(obj, ResultBase):
return obj
r = gof.result.PythonResult(None)
if isinstance(obj, tuple):
r.data = obj
else:
r.data = (obj,)
return r
print 'Subtensor.__init__', args, kwargs
#Olivier says not to call this
#Op.__init__(self, *args,**kwargs)
#Viewer.__init__(self, *args,**kwargs)
t, coord = args
t = _as_tensor(t)
coord = as_tuple_result(coord)
if len(coord.data) != len(t.broadcastable):
raise ValueError(Subtensor.e_invalid)
broadcastable = [0 for c in coord.data if isinstance(c, slice)]
self.inputs = [t, coord]
self.outputs = [Tensor(t.dtype, broadcastable)]
def view_map(self):
return {self.out: [self.inputs[0]]}
def perform(self):
x = self.inputs[0].data
c = self.inputs[1].data
if len(c) == 1:
self.outputs[0].data = x.__getitem__(c[0])
else:
self.outputs[0].data = x.__getitem__(c)
def grad(x, gz):
# - option: allocate a potentially large matrix of zeros, and fill in
# the appropriate elements from gz
# - option: return a sparse matrix
# - option: return gz, but think about how to include a special addition
# function that works on a corresponding view of the original data
raise NotImplementedError()
subtensor = _constructor(Subtensor)
##########################
......@@ -398,7 +428,7 @@ class AddElemwise(_Elemwise):
def grad(self, (x, y), gz):
return gz, gz
def c_foreach(self, (x_i, y_i), (z_i, )):
return "z_i = x_i + y_i;"
return "%(z)s_i = %(x)s_i + %(y)s_i;"
add_elemwise = _constructor(AddElemwise)
class AddElemwiseInplace(AddElemwise.inplace_version()):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论