提交 67091013 authored 作者: Olivier Breuleux's avatar Olivier Breuleux

moved Tensor -> NDArrayType, TensorResult -> NDArrayResult, as_tensor ->…

moved Tensor -> NDArrayType, TensorResult -> NDArrayResult, as_tensor -> as_tensor_result, Sparse -> SparseType, as_sparse -> as_sparse_result, closes #243
上级 55c5d0b3
...@@ -22,7 +22,7 @@ class BROKEN_ON_PURPOSE_StructuredDotCSC(gof.Op): ...@@ -22,7 +22,7 @@ class BROKEN_ON_PURPOSE_StructuredDotCSC(gof.Op):
def __hash__(self): def __hash__(self):
return 29834 ^ hash(type(self)) ^ hash(self.py_offset) return 29834 ^ hash(type(self)) ^ hash(self.py_offset)
def make_node(self, a_val, a_ind, a_ptr, a_nrows, b): def make_node(self, a_val, a_ind, a_ptr, a_nrows, b):
a_nrows = theano.tensor.as_tensor(a_nrows) a_nrows = theano.tensor.as_ndarray_result(a_nrows)
assert a_val.type.dtype == b.type.dtype assert a_val.type.dtype == b.type.dtype
r = gof.Apply(self, [a_val, a_ind, a_ptr, a_nrows, b], r = gof.Apply(self, [a_val, a_ind, a_ptr, a_nrows, b],
[theano.tensor.tensor(a_val.type.dtype, (False, False))]) [theano.tensor.tensor(a_val.type.dtype, (False, False))])
......
...@@ -18,7 +18,7 @@ class StochasticGradientDescent(module.FancyModule): ...@@ -18,7 +18,7 @@ class StochasticGradientDescent(module.FancyModule):
def __init__(self, args, cost, params, gradients=None, stepsize=None, WEIRD_STUFF=True): def __init__(self, args, cost, params, gradients=None, stepsize=None, WEIRD_STUFF=True):
""" """
:param stepsize: the step to take in (negative) gradient direction :param stepsize: the step to take in (negative) gradient direction
:type stepsize: None, scalar value, or scalar TensorResult :type stepsize: None, scalar value, or scalar NDArrayResult
""" """
super(StochasticGradientDescent, self).__init__() super(StochasticGradientDescent, self).__init__()
self.WEIRD_STUFF = WEIRD_STUFF self.WEIRD_STUFF = WEIRD_STUFF
...@@ -26,7 +26,7 @@ class StochasticGradientDescent(module.FancyModule): ...@@ -26,7 +26,7 @@ class StochasticGradientDescent(module.FancyModule):
if stepsize is None: if stepsize is None:
self.stepsize = (T.dscalar()) self.stepsize = (T.dscalar())
elif isinstance(stepsize, T.TensorResult): elif isinstance(stepsize, T.NDArrayResult):
self.stepsize = stepsize self.stepsize = stepsize
else: else:
if self.WEIRD_STUFF: if self.WEIRD_STUFF:
...@@ -89,9 +89,9 @@ class TanhRnn(Op): ...@@ -89,9 +89,9 @@ class TanhRnn(Op):
:type A: matrix (M by M) :type A: matrix (M by M)
""" """
x = T.as_tensor(x) x = T.as_ndarray_result(x)
z0 = T.as_tensor(z0) z0 = T.as_ndarray_result(z0)
A = T.as_tensor(A) A = T.as_ndarray_result(A)
z = x.type() #make a new symbolic result with the same type as x z = x.type() #make a new symbolic result with the same type as x
return Apply(self, [x, z0, A], [z]) return Apply(self, [x, z0, A], [z])
......
...@@ -289,9 +289,9 @@ class Type(object2, PureType, CLinkerType): ...@@ -289,9 +289,9 @@ class Type(object2, PureType, CLinkerType):
- `Generic`: for any python type - `Generic`: for any python type
- `Tensor`: for numpy.ndarray - `NDArrayType`: for numpy.ndarray
- `Sparse`: for scipy.sparse - `SparseType`: for scipy.sparse
But you are encouraged to write your own, as described in WRITEME. But you are encouraged to write your own, as described in WRITEME.
......
...@@ -35,19 +35,19 @@ if scipy.__version__ != '0.7.0': ...@@ -35,19 +35,19 @@ if scipy.__version__ != '0.7.0':
def _is_sparse_result(x): def _is_sparse_result(x):
""" """
@rtype: boolean @rtype: boolean
@return: True iff x is a L{SparseResult} (and not a L{tensor.Tensor}) @return: True iff x is a L{SparseResult} (and not a L{tensor.NDArrayType})
""" """
if not isinstance(x.type, Sparse) and not isinstance(x.type, tensor.Tensor): if not isinstance(x.type, SparseType) and not isinstance(x.type, tensor.NDArrayType):
raise NotImplementedError("this function should only be called on *results* (of type sparse.Sparse or tensor.Tensor), not,", x) raise NotImplementedError("this function should only be called on *results* (of type sparse.SparseType or tensor.NDArrayType), not,", x)
return isinstance(x.type, Sparse) return isinstance(x.type, SparseType)
def _is_dense_result(x): def _is_dense_result(x):
""" """
@rtype: boolean @rtype: boolean
@return: True unless x is a L{SparseResult} (and not a L{tensor.Tensor}) @return: True unless x is a L{SparseResult} (and not a L{tensor.NDArrayType})
""" """
if not isinstance(x.type, Sparse) and not isinstance(x.type, tensor.Tensor): if not isinstance(x.type, SparseType) and not isinstance(x.type, tensor.NDArrayType):
raise NotImplementedError("this function should only be called on *results* (of type sparse.Sparse or tensor.Tensor), not,", x) raise NotImplementedError("this function should only be called on *results* (of type sparse.SparseType or tensor.NDArrayType), not,", x)
return isinstance(x.type, tensor.Tensor) return isinstance(x.type, tensor.NDArrayType)
def _is_sparse(x): def _is_sparse(x):
""" """
...@@ -78,10 +78,10 @@ def _kmap_hash(a): ...@@ -78,10 +78,10 @@ def _kmap_hash(a):
# Wrapper type # Wrapper type
def as_sparse(x): def as_sparse_result(x):
""" """
Wrapper around SparseResult constructor. Wrapper around SparseResult constructor.
@param x: A sparse matrix. as_sparse reads dtype and format properties @param x: A sparse matrix. as_sparse_result reads dtype and format properties
out of this sparse matrix. out of this sparse matrix.
@return: SparseResult version of sp. @return: SparseResult version of sp.
...@@ -93,38 +93,40 @@ def as_sparse(x): ...@@ -93,38 +93,40 @@ def as_sparse(x):
else: else:
x = x.outputs[0] x = x.outputs[0]
if isinstance(x, gof.Result): if isinstance(x, gof.Result):
if not isinstance(x.type, Sparse): if not isinstance(x.type, SparseType):
raise TypeError("Result type field must be a Sparse.", x, x.type) raise TypeError("Result type field must be a SparseType.", x, x.type)
return x return x
try: try:
return constant(x) return constant(x)
except TypeError: except TypeError:
raise TypeError("Cannot convert %s to Sparse" % x, type(x)) raise TypeError("Cannot convert %s to SparseType" % x, type(x))
as_sparse = as_sparse_result
def constant(x): def constant(x):
if not isinstance(x, sparse.spmatrix): if not isinstance(x, sparse.spmatrix):
raise TypeError("sparse.constant must be called on a scipy.sparse.spmatrix") raise TypeError("sparse.constant must be called on a scipy.sparse.spmatrix")
try: try:
return SparseConstant(Sparse(format = x.format, return SparseConstant(SparseType(format = x.format,
dtype = x.dtype), x) dtype = x.dtype), x)
except TypeError: except TypeError:
raise TypeError("Could not convert %s to Sparse" % x, type(x)) raise TypeError("Could not convert %s to SparseType" % x, type(x))
def value(x): def value(x):
if not isinstance(x, sparse.spmatrix): if not isinstance(x, sparse.spmatrix):
raise TypeError("sparse.value must be called on a scipy.sparse.spmatrix") raise TypeError("sparse.value must be called on a scipy.sparse.spmatrix")
try: try:
return SparseValue(Sparse(format = x.format, return SparseValue(SparseType(format = x.format,
dtype = x.dtype), x) dtype = x.dtype), x)
except TypeError: except TypeError:
raise TypeError("Could not convert %s to Sparse" % x, type(x)) raise TypeError("Could not convert %s to SparseType" % x, type(x))
def sp_ones_like(x): def sp_ones_like(x):
data, indices, indptr, shape = csm_properties(x) #TODO: don't restrict to CSM formats data, indices, indptr, shape = csm_properties(x) #TODO: don't restrict to CSM formats
return CSM(format=x.format)(tensor.ones_like(data), indices, indptr, shape) return CSM(format=x.format)(tensor.ones_like(data), indices, indptr, shape)
class Sparse(gof.Type): class SparseType(gof.Type):
""" """
@type dtype: numpy dtype string such as 'int64' or 'float64' (among others) @type dtype: numpy dtype string such as 'int64' or 'float64' (among others)
@type format: string @type format: string
...@@ -196,8 +198,8 @@ class Sparse(gof.Type): ...@@ -196,8 +198,8 @@ class Sparse(gof.Type):
def is_valid_value(self, a): def is_valid_value(self, a):
return scipy.sparse.issparse(a) and (a.format == self.format) return scipy.sparse.issparse(a) and (a.format == self.format)
csc_matrix = Sparse(format='csc') csc_matrix = SparseType(format='csc')
csr_matrix = Sparse(format='csr') csr_matrix = SparseType(format='csr')
class _sparse_py_operators: class _sparse_py_operators:
T = property(lambda self: transpose(self), doc = "Return aliased transpose of self (read-only)") T = property(lambda self: transpose(self), doc = "Return aliased transpose of self (read-only)")
...@@ -248,8 +250,8 @@ class CSMProperties(gof.Op): ...@@ -248,8 +250,8 @@ class CSMProperties(gof.Op):
return 8234 ^ hash(type(self)) ^ _kmap_hash(self.kmap) return 8234 ^ hash(type(self)) ^ _kmap_hash(self.kmap)
def make_node(self, csm): def make_node(self, csm):
csm = as_sparse(csm) csm = as_sparse_result(csm)
data = tensor.Tensor(dtype=csm.type.dtype, broadcastable = (False,)).make_result() data = tensor.NDArrayType(dtype=csm.type.dtype, broadcastable = (False,)).make_result()
return gof.Apply(self, [csm], return gof.Apply(self, [csm],
[data, tensor.ivector(), tensor.ivector(), tensor.ivector()]) [data, tensor.ivector(), tensor.ivector(), tensor.ivector()])
...@@ -319,10 +321,10 @@ class CSM(gof.Op): ...@@ -319,10 +321,10 @@ class CSM(gof.Op):
:type indptr: 1-d tensor of ints :type indptr: 1-d tensor of ints
""" """
data = tensor.as_tensor(data) data = tensor.as_ndarray_result(data)
indices = tensor.as_tensor(indices) indices = tensor.as_ndarray_result(indices)
indptr = tensor.as_tensor(indptr) indptr = tensor.as_ndarray_result(indptr)
shape = tensor.as_tensor(shape) shape = tensor.as_ndarray_result(shape)
if data.type.ndim != 1: if data.type.ndim != 1:
raise TypeError('data argument must be a vector', data.type) raise TypeError('data argument must be a vector', data.type)
...@@ -335,7 +337,7 @@ class CSM(gof.Op): ...@@ -335,7 +337,7 @@ class CSM(gof.Op):
return gof.Apply(self, return gof.Apply(self,
[data, indices, indptr, shape], [data, indices, indptr, shape],
[Sparse(dtype = data.type.dtype, [SparseType(dtype = data.type.dtype,
format = self.format).make_result()]) format = self.format).make_result()])
def perform(self, node, (data, indices, indptr, shape), (out,)): def perform(self, node, (data, indices, indptr, shape), (out,)):
...@@ -366,7 +368,7 @@ class CSM(gof.Op): ...@@ -366,7 +368,7 @@ class CSM(gof.Op):
def grad(self, (data, indices, indptr, shape), (g_out,)): def grad(self, (data, indices, indptr, shape), (g_out,)):
"""Return a gradient on the data vector""" """Return a gradient on the data vector"""
#unpack the data vector and wrap it as a 1d Tensor #unpack the data vector and wrap it as a 1d NDArrayType
g_data = csm_grad(self.kmap)(data, csm_data(g_out),csm_indices(g_out)) g_data = csm_grad(self.kmap)(data, csm_data(g_out),csm_indices(g_out))
return [g_data, None, None, None] return [g_data, None, None, None]
...@@ -423,10 +425,10 @@ class DenseFromSparse(gof.op.Op): ...@@ -423,10 +425,10 @@ class DenseFromSparse(gof.op.Op):
"""WRITEME""" """WRITEME"""
def make_node(self, x): def make_node(self, x):
x = as_sparse(x) x = as_sparse_result(x)
return gof.Apply(self, return gof.Apply(self,
[x], [x],
[tensor.Tensor(dtype = x.type.dtype, [tensor.NDArrayType(dtype = x.type.dtype,
broadcastable = (False, False)).make_result()]) broadcastable = (False, False)).make_result()])
def perform(self, node, (x, ), (out, )): def perform(self, node, (x, ), (out, )):
if _is_dense(x): if _is_dense(x):
...@@ -453,13 +455,13 @@ class SparseFromDense(gof.op.Op): ...@@ -453,13 +455,13 @@ class SparseFromDense(gof.op.Op):
return 982374 ^ hash(self.format) ^ hash(DenseFromSparse) return 982374 ^ hash(self.format) ^ hash(DenseFromSparse)
def make_node(self, x): def make_node(self, x):
x = tensor.as_tensor(x) x = tensor.as_ndarray_result(x)
return gof.Apply(self, return gof.Apply(self,
[x], [x],
[Sparse(dtype = x.type.dtype, [SparseType(dtype = x.type.dtype,
format = self.format).make_result()]) format = self.format).make_result()])
def perform(self, node, (x, ), (out, )): def perform(self, node, (x, ), (out, )):
out[0] = Sparse.format_cls[self.format](x) out[0] = SparseType.format_cls[self.format](x)
def grad(self, (x, ), (gz, )): def grad(self, (x, ), (gz, )):
return dense_from_sparse(gz), return dense_from_sparse(gz),
csr_from_dense = SparseFromDense('csr') csr_from_dense = SparseFromDense('csr')
...@@ -473,10 +475,10 @@ class Transpose(gof.op.Op): ...@@ -473,10 +475,10 @@ class Transpose(gof.op.Op):
format_map = {'csr' : 'csc', format_map = {'csr' : 'csc',
'csc' : 'csr'} 'csc' : 'csr'}
def make_node(self, x): def make_node(self, x):
x = as_sparse(x) x = as_sparse_result(x)
return gof.Apply(self, return gof.Apply(self,
[x], [x],
[Sparse(dtype = x.type.dtype, [SparseType(dtype = x.type.dtype,
format = self.format_map[x.type.format]).make_result()]) format = self.format_map[x.type.format]).make_result()])
def perform(self, node, (x, ), (out, )): def perform(self, node, (x, ), (out, )):
assert _is_sparse(x) assert _is_sparse(x)
...@@ -488,7 +490,7 @@ transpose = Transpose() ...@@ -488,7 +490,7 @@ transpose = Transpose()
class Neg(gof.op.Op): class Neg(gof.op.Op):
def make_node(self, x): def make_node(self, x):
x = as_sparse(x) x = as_sparse_result(x)
return gof.Apply(self, [x], [x.type()]) return gof.Apply(self, [x], [x.type()])
def perform(self, node, (x, ), (out, )): def perform(self, node, (x, ), (out, )):
assert _is_sparse(x) assert _is_sparse(x)
...@@ -501,7 +503,7 @@ neg = Neg() ...@@ -501,7 +503,7 @@ neg = Neg()
class AddSS(gof.op.Op): class AddSS(gof.op.Op):
'''Add two sparse matrices ''' '''Add two sparse matrices '''
def make_node(self, x, y): def make_node(self, x, y):
x, y = map(as_sparse, [x, y]) x, y = map(as_sparse_result, [x, y])
if x.type.dtype != y.type.dtype: if x.type.dtype != y.type.dtype:
raise NotImplementedError() raise NotImplementedError()
if x.type.format != y.type.format: if x.type.format != y.type.format:
...@@ -509,7 +511,7 @@ class AddSS(gof.op.Op): ...@@ -509,7 +511,7 @@ class AddSS(gof.op.Op):
raise NotImplementedError() raise NotImplementedError()
return gof.Apply(self, return gof.Apply(self,
[x, y], [x, y],
[Sparse(dtype = x.type.dtype, [SparseType(dtype = x.type.dtype,
format = x.type.format).make_result()]) format = x.type.format).make_result()])
def perform(self, node, (x, y), (out, )): def perform(self, node, (x, y), (out, )):
assert _is_sparse(x) and _is_sparse(y) assert _is_sparse(x) and _is_sparse(y)
...@@ -523,7 +525,7 @@ add_s_s = AddSS() ...@@ -523,7 +525,7 @@ add_s_s = AddSS()
class AddSD(gof.op.Op): class AddSD(gof.op.Op):
''' Add a sparse and a dense matrix ''' ''' Add a sparse and a dense matrix '''
def make_node(self, x, y): def make_node(self, x, y):
x, y = as_sparse(x), tensor.as_tensor(y) x, y = as_sparse_result(x), tensor.as_ndarray_result(y)
if x.type.dtype != y.type.dtype: if x.type.dtype != y.type.dtype:
raise NotImplementedError() raise NotImplementedError()
# The magic number two here arises because L{scipy.sparse} # The magic number two here arises because L{scipy.sparse}
...@@ -531,7 +533,7 @@ class AddSD(gof.op.Op): ...@@ -531,7 +533,7 @@ class AddSD(gof.op.Op):
assert y.type.ndim == 2 assert y.type.ndim == 2
return gof.Apply(self, return gof.Apply(self,
[x, y], [x, y],
[tensor.Tensor(dtype = y.type.dtype, [tensor.NDArrayType(dtype = y.type.dtype,
broadcastable = y.type.broadcastable).make_result()]) broadcastable = y.type.broadcastable).make_result()])
def perform(self, node, (x, y), (out, )): def perform(self, node, (x, y), (out, )):
assert _is_sparse(x) and _is_dense(y) assert _is_sparse(x) and _is_dense(y)
...@@ -545,8 +547,8 @@ def add(x,y): ...@@ -545,8 +547,8 @@ def add(x,y):
""" """
Add two matrices, at least one of which is sparse. Add two matrices, at least one of which is sparse.
""" """
if hasattr(x, 'getnnz'): x = as_sparse(x) if hasattr(x, 'getnnz'): x = as_sparse_result(x)
if hasattr(y, 'getnnz'): y = as_sparse(y) if hasattr(y, 'getnnz'): y = as_sparse_result(y)
x_is_sparse_result = _is_sparse_result(x) x_is_sparse_result = _is_sparse_result(x)
y_is_sparse_result = _is_sparse_result(y) y_is_sparse_result = _is_sparse_result(y)
...@@ -564,7 +566,7 @@ def sub(x,y): ...@@ -564,7 +566,7 @@ def sub(x,y):
class MulSS(gof.op.Op): class MulSS(gof.op.Op):
''' Elementwise multiply a sparse and a ndarray ''' ''' Elementwise multiply a sparse and a ndarray '''
def make_node(self, x, y): def make_node(self, x, y):
x, y = as_sparse(x), as_sparse(y) x, y = as_sparse_result(x), as_sparse_result(y)
if x.type != y.type: if x.type != y.type:
raise NotImplementedError() raise NotImplementedError()
return gof.Apply(self, [x, y], [x.type()]) return gof.Apply(self, [x, y], [x.type()])
...@@ -583,7 +585,7 @@ mul_s_s = MulSS() ...@@ -583,7 +585,7 @@ mul_s_s = MulSS()
class MulSD(gof.op.Op): class MulSD(gof.op.Op):
''' Elementwise multiply a sparse and a ndarray ''' ''' Elementwise multiply a sparse and a ndarray '''
def make_node(self, x, y): def make_node(self, x, y):
x, y = as_sparse(x), tensor.as_tensor(y) x, y = as_sparse_result(x), tensor.as_ndarray_result(y)
if x.type.dtype != y.type.dtype: if x.type.dtype != y.type.dtype:
raise NotImplementedError() raise NotImplementedError()
# The magic number two here arises because L{scipy.sparse} # The magic number two here arises because L{scipy.sparse}
...@@ -641,8 +643,8 @@ def mul(x,y): ...@@ -641,8 +643,8 @@ def mul(x,y):
""" """
Multiply (elementwise) two matrices, at least one of which is sparse. Multiply (elementwise) two matrices, at least one of which is sparse.
""" """
if hasattr(x, 'getnnz'): x = as_sparse(x) if hasattr(x, 'getnnz'): x = as_sparse_result(x)
if hasattr(y, 'getnnz'): y = as_sparse(y) if hasattr(y, 'getnnz'): y = as_sparse_result(y)
x_is_sparse_result = _is_sparse_result(x) x_is_sparse_result = _is_sparse_result(x)
y_is_sparse_result = _is_sparse_result(y) y_is_sparse_result = _is_sparse_result(y)
...@@ -661,7 +663,7 @@ class StructuredDot(gof.Op): ...@@ -661,7 +663,7 @@ class StructuredDot(gof.Op):
"""Structured Dot is like dot, except that only the gradient wrt non-zero elements of the """Structured Dot is like dot, except that only the gradient wrt non-zero elements of the
sparse matrix A are calculated and propagated. sparse matrix A are calculated and propagated.
The output is presumed to be a dense matrix, and is represented by a Tensor instance. The output is presumed to be a dense matrix, and is represented by a NDArrayType instance.
""" """
def make_node(self, a, b): def make_node(self, a, b):
assert a.type.dtype == b.type.dtype assert a.type.dtype == b.type.dtype
...@@ -710,8 +712,8 @@ def structured_dot(x, y): ...@@ -710,8 +712,8 @@ def structured_dot(x, y):
@todo: Maybe the triple-transposition formulation (when x is dense) @todo: Maybe the triple-transposition formulation (when x is dense)
is slow. See if there is a direct way to do this. is slow. See if there is a direct way to do this.
""" """
if hasattr(x, 'getnnz'): x = as_sparse(x) if hasattr(x, 'getnnz'): x = as_sparse_result(x)
if hasattr(y, 'getnnz'): y = as_sparse(y) if hasattr(y, 'getnnz'): y = as_sparse_result(y)
x_is_sparse_result = _is_sparse_result(x) x_is_sparse_result = _is_sparse_result(x)
y_is_sparse_result = _is_sparse_result(y) y_is_sparse_result = _is_sparse_result(y)
......
...@@ -66,8 +66,8 @@ def true_dot(x, y, grad_preserves_dense=True): ...@@ -66,8 +66,8 @@ def true_dot(x, y, grad_preserves_dense=True):
@todo: Maybe the triple-transposition formulation (when x is dense) @todo: Maybe the triple-transposition formulation (when x is dense)
is slow. See if there is a direct way to do this. is slow. See if there is a direct way to do this.
""" """
if hasattr(x, 'getnnz'): x = as_sparse(x) if hasattr(x, 'getnnz'): x = as_sparse_result(x)
if hasattr(y, 'getnnz'): y = as_sparse(y) if hasattr(y, 'getnnz'): y = as_sparse_result(y)
x_is_sparse_result = _is_sparse_result(x) x_is_sparse_result = _is_sparse_result(x)
y_is_sparse_result = _is_sparse_result(y) y_is_sparse_result = _is_sparse_result(y)
...@@ -86,7 +86,7 @@ class test_true_dot(unittest.TestCase): ...@@ -86,7 +86,7 @@ class test_true_dot(unittest.TestCase):
def test_basicSS(self): def test_basicSS(self):
for mtype in _mtypes: for mtype in _mtypes:
x = as_sparse(mtype((500,3))) x = as_sparse_result(mtype((500,3)))
x.data[(10, 1)] = 1 x.data[(10, 1)] = 1
x.data[(20, 2)] = 2 x.data[(20, 2)] = 2
self.failUnless(_is_sparse_result(x)) self.failUnless(_is_sparse_result(x))
...@@ -117,12 +117,12 @@ class test_true_dot(unittest.TestCase): ...@@ -117,12 +117,12 @@ class test_true_dot(unittest.TestCase):
def test_basicSD(self): def test_basicSD(self):
for mtype in _mtypes: for mtype in _mtypes:
x = as_sparse(mtype((500,3))) x = as_sparse_result(mtype((500,3)))
x.data[(10, 1)] = 1 x.data[(10, 1)] = 1
x.data[(20, 2)] = 2 x.data[(20, 2)] = 2
self.failUnless(_is_sparse_result(x)) self.failUnless(_is_sparse_result(x))
y = tensor.as_tensor([[1., 2], [3, 4], [2, 1]]) y = tensor.as_ndarray_result([[1., 2], [3, 4], [2, 1]])
self.failUnless(_is_dense_result(y)) self.failUnless(_is_dense_result(y))
zop = true_dot(x,y) zop = true_dot(x,y)
...@@ -150,12 +150,12 @@ class test_true_dot(unittest.TestCase): ...@@ -150,12 +150,12 @@ class test_true_dot(unittest.TestCase):
def test_basicDS(self): def test_basicDS(self):
for mtype in _mtypes: for mtype in _mtypes:
x = as_sparse(mtype((500,3))) x = as_sparse_result(mtype((500,3)))
x.data[(10, 1)] = 1 x.data[(10, 1)] = 1
x.data[(20, 2)] = 2 x.data[(20, 2)] = 2
self.failUnless(_is_sparse_result(x)) self.failUnless(_is_sparse_result(x))
y = tensor.as_tensor([[1., 2], [3, 4], [2, 1]]) y = tensor.as_ndarray_result([[1., 2], [3, 4], [2, 1]])
self.failUnless(_is_dense_result(y)) self.failUnless(_is_dense_result(y))
x.data = x.data.T x.data = x.data.T
...@@ -189,7 +189,7 @@ class test_true_dot(unittest.TestCase): ...@@ -189,7 +189,7 @@ class test_true_dot(unittest.TestCase):
def test_graph_bprop0(self): def test_graph_bprop0(self):
for mtype in _mtypes: for mtype in _mtypes:
x = tensor.matrix('x') #Tensor('float64', broadcastable=[False,False], name='x') x = tensor.matrix('x') #NDArrayType('float64', broadcastable=[False,False], name='x')
w = Sparse(dtype = 'float64', format = _mtype_to_str[mtype]).make_result() w = Sparse(dtype = 'float64', format = _mtype_to_str[mtype]).make_result()
xw = dense_from_sparse(true_dot(w, x)) xw = dense_from_sparse(true_dot(w, x))
y = dense_from_sparse(true_dot(w.T, xw)) y = dense_from_sparse(true_dot(w.T, xw))
......
...@@ -22,7 +22,7 @@ class T_transpose(unittest.TestCase): ...@@ -22,7 +22,7 @@ class T_transpose(unittest.TestCase):
def test_transpose_csc(self): def test_transpose_csc(self):
sp = sparse.csc_matrix(sparse.eye(5,3)) sp = sparse.csc_matrix(sparse.eye(5,3))
a = as_sparse(sp) a = as_sparse_result(sp)
self.failUnless(a.data is sp) self.failUnless(a.data is sp)
self.failUnless(a.data.shape == (5,3)) self.failUnless(a.data.shape == (5,3))
self.failUnless(a.type.dtype == 'float64', a.type.dtype) self.failUnless(a.type.dtype == 'float64', a.type.dtype)
...@@ -34,7 +34,7 @@ class T_transpose(unittest.TestCase): ...@@ -34,7 +34,7 @@ class T_transpose(unittest.TestCase):
vta = eval_outputs([ta]) vta = eval_outputs([ta])
self.failUnless(vta.shape == (3,5)) self.failUnless(vta.shape == (3,5))
def test_transpose_csr(self): def test_transpose_csr(self):
a = as_sparse(sparse.csr_matrix(sparse.eye(5,3))) a = as_sparse_result(sparse.csr_matrix(sparse.eye(5,3)))
self.failUnless(a.data.shape == (5,3)) self.failUnless(a.data.shape == (5,3))
self.failUnless(a.type.dtype == 'float64') self.failUnless(a.type.dtype == 'float64')
self.failUnless(a.type.format == 'csr') self.failUnless(a.type.format == 'csr')
...@@ -49,13 +49,13 @@ class T_Add(unittest.TestCase): ...@@ -49,13 +49,13 @@ class T_Add(unittest.TestCase):
def testSS(self): def testSS(self):
for mtype in _mtypes: for mtype in _mtypes:
a = mtype(numpy.array([[1., 0], [3, 0], [0, 6]])) a = mtype(numpy.array([[1., 0], [3, 0], [0, 6]]))
aR = as_sparse(a) aR = as_sparse_result(a)
self.failUnless(aR.data is a) self.failUnless(aR.data is a)
self.failUnless(_is_sparse(a)) self.failUnless(_is_sparse(a))
self.failUnless(_is_sparse_result(aR)) self.failUnless(_is_sparse_result(aR))
b = mtype(numpy.asarray([[0, 2.], [0, 4], [5, 0]])) b = mtype(numpy.asarray([[0, 2.], [0, 4], [5, 0]]))
bR = as_sparse(b) bR = as_sparse_result(b)
self.failUnless(bR.data is b) self.failUnless(bR.data is b)
self.failUnless(_is_sparse(b)) self.failUnless(_is_sparse(b))
self.failUnless(_is_sparse_result(bR)) self.failUnless(_is_sparse_result(bR))
...@@ -76,13 +76,13 @@ class T_Add(unittest.TestCase): ...@@ -76,13 +76,13 @@ class T_Add(unittest.TestCase):
def testSD(self): def testSD(self):
for mtype in _mtypes: for mtype in _mtypes:
a = numpy.array([[1., 0], [3, 0], [0, 6]]) a = numpy.array([[1., 0], [3, 0], [0, 6]])
aR = tensor.as_tensor(a) aR = tensor.as_ndarray_result(a)
self.failUnless(aR.data is a) self.failUnless(aR.data is a)
self.failUnless(_is_dense(a)) self.failUnless(_is_dense(a))
self.failUnless(_is_dense_result(aR)) self.failUnless(_is_dense_result(aR))
b = mtype(numpy.asarray([[0, 2.], [0, 4], [5, 0]])) b = mtype(numpy.asarray([[0, 2.], [0, 4], [5, 0]]))
bR = as_sparse(b) bR = as_sparse_result(b)
self.failUnless(bR.data is b) self.failUnless(bR.data is b)
self.failUnless(_is_sparse(b)) self.failUnless(_is_sparse(b))
self.failUnless(_is_sparse_result(bR)) self.failUnless(_is_sparse_result(bR))
...@@ -101,13 +101,13 @@ class T_Add(unittest.TestCase): ...@@ -101,13 +101,13 @@ class T_Add(unittest.TestCase):
def testDS(self): def testDS(self):
for mtype in _mtypes: for mtype in _mtypes:
a = mtype(numpy.array([[1., 0], [3, 0], [0, 6]])) a = mtype(numpy.array([[1., 0], [3, 0], [0, 6]]))
aR = as_sparse(a) aR = as_sparse_result(a)
self.failUnless(aR.data is a) self.failUnless(aR.data is a)
self.failUnless(_is_sparse(a)) self.failUnless(_is_sparse(a))
self.failUnless(_is_sparse_result(aR)) self.failUnless(_is_sparse_result(aR))
b = numpy.asarray([[0, 2.], [0, 4], [5, 0]]) b = numpy.asarray([[0, 2.], [0, 4], [5, 0]])
bR = tensor.as_tensor(b) bR = tensor.as_ndarray_result(b)
self.failUnless(bR.data is b) self.failUnless(bR.data is b)
self.failUnless(_is_dense(b)) self.failUnless(_is_dense(b))
self.failUnless(_is_dense_result(bR)) self.failUnless(_is_dense_result(bR))
...@@ -128,14 +128,14 @@ class T_conversion(unittest.TestCase): ...@@ -128,14 +128,14 @@ class T_conversion(unittest.TestCase):
unittest_tools.seed_rng() unittest_tools.seed_rng()
def test0(self): def test0(self):
a = tensor.as_tensor(numpy.random.rand(5)) a = tensor.as_ndarray_result(numpy.random.rand(5))
s = csc_from_dense(a) s = csc_from_dense(a)
val = eval_outputs([s]) val = eval_outputs([s])
self.failUnless(str(val.dtype)=='float64') self.failUnless(str(val.dtype)=='float64')
self.failUnless(val.format == 'csc') self.failUnless(val.format == 'csc')
def test1(self): def test1(self):
a = tensor.as_tensor(numpy.random.rand(5)) a = tensor.as_ndarray_result(numpy.random.rand(5))
s = csr_from_dense(a) s = csr_from_dense(a)
val = eval_outputs([s]) val = eval_outputs([s])
self.failUnless(str(val.dtype)=='float64') self.failUnless(str(val.dtype)=='float64')
......
...@@ -59,11 +59,11 @@ def __oplist_tag(thing, tag): ...@@ -59,11 +59,11 @@ def __oplist_tag(thing, tag):
thing.__oplist_tags = tags thing.__oplist_tags = tags
def as_tensor(x, name = None, ndim=None): def as_ndarray_result(x, name = None, ndim=None):
"""Return `x`, transformed into a `Tensor` """Return `x`, transformed into a `NDArrayType`
This function is often used by `make_node` methods of `Op` subclasses to This function is often used by `make_node` methods of `Op` subclasses to
turn ndarrays, numbers, `Scalar` instances, `Apply` instances and `Tensor` turn ndarrays, numbers, `Scalar` instances, `Apply` instances and `NDArrayType`
instances into valid input list elemnts. instances into valid input list elemnts.
:Parameters: :Parameters:
...@@ -78,7 +78,7 @@ def as_tensor(x, name = None, ndim=None): ...@@ -78,7 +78,7 @@ def as_tensor(x, name = None, ndim=None):
:Exceptions: :Exceptions:
- `ValueError`: raised if an `Apply` with no default output is fetched - `ValueError`: raised if an `Apply` with no default output is fetched
- `TypeError`: raised if `x` cannot be converted to a Tensor Result - `TypeError`: raised if `x` cannot be converted to a NDArrayType Result
""" """
...@@ -92,15 +92,15 @@ def as_tensor(x, name = None, ndim=None): ...@@ -92,15 +92,15 @@ def as_tensor(x, name = None, ndim=None):
if isinstance(x.type, scal.Scalar): if isinstance(x.type, scal.Scalar):
x = tensor_from_scalar(x) x = tensor_from_scalar(x)
if not isinstance(x.type, Tensor): if not isinstance(x.type, NDArrayType):
raise TypeError("Result type field must be a Tensor.", x, x.type) raise TypeError("Result type field must be a NDArrayType.", x, x.type)
if ndim is None: if ndim is None:
return x return x
else: else:
if (x.type.ndim > ndim): if (x.type.ndim > ndim):
#TODO: strip off leading broadcastable dimensions #TODO: strip off leading broadcastable dimensions
raise ValueError('Tensor could not be cast to have %i dimensions' % ndim, x.type) raise ValueError('NDArrayType could not be cast to have %i dimensions' % ndim, x.type)
elif (x.type.ndim < ndim): elif (x.type.ndim < ndim):
return shape_padleft(x, n_ones=(ndim - x.type.ndim)) return shape_padleft(x, n_ones=(ndim - x.type.ndim))
else: else:
...@@ -112,11 +112,14 @@ def as_tensor(x, name = None, ndim=None): ...@@ -112,11 +112,14 @@ def as_tensor(x, name = None, ndim=None):
str_x = str(x) str_x = str(x)
except: except:
str_x = repr(x) str_x = repr(x)
raise TypeError("Cannot convert %s to Tensor" % str_x, type(x)) raise TypeError("Cannot convert %s to NDArrayType" % str_x, type(x))
# this has a different name, because _as_tensor is the function which ops use # this has a different name, because _as_ndarray_result is the function which ops use
# to upcast their arguments... this internal-use function is a good place to put debugging stuff, better than the global astensor. # to upcast their arguments... this internal-use function is a good place to put debugging stuff, better than the global astensor.
_as_tensor = as_tensor _as_ndarray_result = as_ndarray_result
as_tensor = as_ndarray_result
def constant_or_value(x, rtype, name=None, ndim=None): def constant_or_value(x, rtype, name=None, ndim=None):
"""Return a symbolic `Constant` with value `x` """Return a symbolic `Constant` with value `x`
...@@ -141,19 +144,19 @@ def constant_or_value(x, rtype, name=None, ndim=None): ...@@ -141,19 +144,19 @@ def constant_or_value(x, rtype, name=None, ndim=None):
assert len(bcastable) == ndim assert len(bcastable) == ndim
try: try:
return rtype(Tensor(dtype = x_.dtype, broadcastable = bcastable), x_, name=name) return rtype(NDArrayType(dtype = x_.dtype, broadcastable = bcastable), x_, name=name)
except: except:
raise TypeError("Could not convert %s to Tensor" % x, type(x)) raise TypeError("Could not convert %s to NDArrayType" % x, type(x))
def constant(x, name=None, ndim=None): def constant(x, name=None, ndim=None):
return constant_or_value(x, rtype=TensorConstant, name=name, ndim=ndim) return constant_or_value(x, rtype=NDArrayConstant, name=name, ndim=ndim)
def value(x, name=None, ndim=None): def value(x, name=None, ndim=None):
return constant_or_value(x, rtype=TensorValue, name=name, ndim=ndim) return constant_or_value(x, rtype=NDArrayValue, name=name, ndim=ndim)
class Tensor(Type): class NDArrayType(Type):
"""Symbolic `Type` representing a numpy.ndarray value.""" """Symbolic `Type` representing a numpy.ndarray value."""
def __init__(self, dtype, broadcastable, name = None): def __init__(self, dtype, broadcastable, name = None):
...@@ -178,7 +181,7 @@ class Tensor(Type): ...@@ -178,7 +181,7 @@ class Tensor(Type):
self.name = name self.name = name
def filter(self, data, strict = False): def filter(self, data, strict = False):
"""Convert `data` to something which can be associated to a `TensorResult`. """Convert `data` to something which can be associated to a `NDArrayResult`.
This function is not meant to be called in user code. It is for This function is not meant to be called in user code. It is for
`Linker` instances to use when running a compiled graph. `Linker` instances to use when running a compiled graph.
...@@ -228,7 +231,7 @@ class Tensor(Type): ...@@ -228,7 +231,7 @@ class Tensor(Type):
return scal.Scalar(dtype = self.dtype) return scal.Scalar(dtype = self.dtype)
def __eq__(self, other): def __eq__(self, other):
"""Compare True iff other is the same kind of Tensor""" """Compare True iff other is the same kind of NDArrayType"""
return type(self) == type(other) and other.dtype == self.dtype and other.broadcastable == self.broadcastable return type(self) == type(other) and other.dtype == self.dtype and other.broadcastable == self.broadcastable
def values_eq_approx(self, a, b): def values_eq_approx(self, a, b):
...@@ -236,26 +239,26 @@ class Tensor(Type): ...@@ -236,26 +239,26 @@ class Tensor(Type):
and (a.shape == b.shape) and numpy.allclose(a, b) and (a.shape == b.shape) and numpy.allclose(a, b)
def __hash__(self): def __hash__(self):
"""Hash equal for same kinds of Tensor""" """Hash equal for same kinds of NDArrayType"""
return hash(self.dtype) ^ hash(self.broadcastable) return hash(self.dtype) ^ hash(self.broadcastable)
ndim = property(lambda self: len(self.broadcastable), doc = "number of dimensions") ndim = property(lambda self: len(self.broadcastable), doc = "number of dimensions")
"""Number of dimensions """Number of dimensions
This read-only property is the preferred way to get the number of dimensions This read-only property is the preferred way to get the number of dimensions
of a `Tensor`. of a `NDArrayType`.
""" """
def make_result(self, name = None): def make_result(self, name = None):
"""Return a `TensorResult` of this type """Return a `NDArrayResult` of this type
:Parameters: :Parameters:
- `name`: str - `name`: str
A pretty name to identify this `Result` when printing and debugging A pretty name to identify this `Result` when printing and debugging
""" """
return TensorResult(self, name = name) return NDArrayResult(self, name = name)
def __str__(self): def __str__(self):
if self.name: if self.name:
...@@ -268,11 +271,11 @@ class Tensor(Type): ...@@ -268,11 +271,11 @@ class Tensor(Type):
(False, True): 'col', (False, True): 'col',
(True, False): 'row', (True, False): 'row',
(False, False): 'matrix'}.get(b, "%iD" % len(b) if not any(b) else str(b)) (False, False): 'matrix'}.get(b, "%iD" % len(b) if not any(b) else str(b))
return "Tensor(%s, %s)" % (str(self.dtype), bcast) return "NDArrayType(%s, %s)" % (str(self.dtype), bcast)
def __repr__(self): def __repr__(self):
return str(self) return str(self)
#"Tensor{%s, %s}" % (str(self.dtype), str(self.broadcastable)) #"NDArrayType{%s, %s}" % (str(self.dtype), str(self.broadcastable))
def c_declare(self, name, sub): def c_declare(self, name, sub):
"""Override `CLinkerOp.c_declare` """ """Override `CLinkerOp.c_declare` """
...@@ -386,7 +389,7 @@ class Tensor(Type): ...@@ -386,7 +389,7 @@ class Tensor(Type):
# Easy constructors # Easy constructors
def tensor(*args, **kwargs): def tensor(*args, **kwargs):
return Tensor(*args, **kwargs).make_result() return NDArrayType(*args, **kwargs).make_result()
def _multi(*fns): def _multi(*fns):
def f2(f, *names): def f2(f, *names):
...@@ -407,16 +410,16 @@ def _multi(*fns): ...@@ -407,16 +410,16 @@ def _multi(*fns):
else: else:
return [partial(f2, f) for f in fns] return [partial(f2, f) for f in fns]
cscalar = Tensor('complex64', ()) cscalar = NDArrayType('complex64', ())
zscalar = Tensor('complex128', ()) zscalar = NDArrayType('complex128', ())
fscalar = Tensor('float32', ()) fscalar = NDArrayType('float32', ())
dscalar = Tensor('float64', ()) dscalar = NDArrayType('float64', ())
bscalar = Tensor('int8', ()) bscalar = NDArrayType('int8', ())
wscalar = Tensor('int16', ()) wscalar = NDArrayType('int16', ())
iscalar = Tensor('int32', ()) iscalar = NDArrayType('int32', ())
lscalar = Tensor('int64', ()) lscalar = NDArrayType('int64', ())
def scalar(name = None, dtype = 'float64'): def scalar(name = None, dtype = 'float64'):
type = Tensor(dtype, ()) type = NDArrayType(dtype, ())
return type(name) return type(name)
scalars, fscalars, dscalars, iscalars, lscalars = _multi(scalar, fscalar, dscalar, iscalar, lscalar) scalars, fscalars, dscalars, iscalars, lscalars = _multi(scalar, fscalar, dscalar, iscalar, lscalar)
...@@ -427,16 +430,16 @@ int_scalar_types = int_types ...@@ -427,16 +430,16 @@ int_scalar_types = int_types
float_scalar_types = float_types float_scalar_types = float_types
complex_scalar_types = complex_types complex_scalar_types = complex_types
cvector = Tensor('complex64', (False, )) cvector = NDArrayType('complex64', (False, ))
zvector = Tensor('complex128', (False, )) zvector = NDArrayType('complex128', (False, ))
fvector = Tensor('float32', (False, )) fvector = NDArrayType('float32', (False, ))
dvector = Tensor('float64', (False, )) dvector = NDArrayType('float64', (False, ))
bvector = Tensor('int8', (False,)) bvector = NDArrayType('int8', (False,))
wvector = Tensor('int16', (False,)) wvector = NDArrayType('int16', (False,))
ivector = Tensor('int32', (False, )) ivector = NDArrayType('int32', (False, ))
lvector = Tensor('int64', (False, )) lvector = NDArrayType('int64', (False, ))
def vector(name = None, dtype = 'float64'): def vector(name = None, dtype = 'float64'):
type = Tensor(dtype, (False, )) type = NDArrayType(dtype, (False, ))
return type(name) return type(name)
vectors, fvectors, dvectors, ivectors, lvectors = _multi(vector, fvector, dvector, ivector, lvector) vectors, fvectors, dvectors, ivectors, lvectors = _multi(vector, fvector, dvector, ivector, lvector)
...@@ -444,16 +447,16 @@ int_vector_types = bvector, wvector, ivector, lvector ...@@ -444,16 +447,16 @@ int_vector_types = bvector, wvector, ivector, lvector
float_vector_types = fvector, dvector float_vector_types = fvector, dvector
complex_vector_types = cvector, zvector complex_vector_types = cvector, zvector
cmatrix = Tensor('complex64', (False, False)) cmatrix = NDArrayType('complex64', (False, False))
zmatrix = Tensor('complex128', (False, False)) zmatrix = NDArrayType('complex128', (False, False))
fmatrix = Tensor('float32', (False, False)) fmatrix = NDArrayType('float32', (False, False))
dmatrix = Tensor('float64', (False, False)) dmatrix = NDArrayType('float64', (False, False))
bmatrix = Tensor('int8', (False, False)) bmatrix = NDArrayType('int8', (False, False))
wmatrix = Tensor('int16', (False, False)) wmatrix = NDArrayType('int16', (False, False))
imatrix = Tensor('int32', (False, False)) imatrix = NDArrayType('int32', (False, False))
lmatrix = Tensor('int64', (False, False)) lmatrix = NDArrayType('int64', (False, False))
def matrix(name = None, dtype = 'float64'): def matrix(name = None, dtype = 'float64'):
type = Tensor(dtype, (False, False)) type = NDArrayType(dtype, (False, False))
return type(name) return type(name)
matrices, fmatrices, dmatrices, imatrices, lmatrices = _multi(matrix, fmatrix, dmatrix, imatrix, lmatrix) matrices, fmatrices, dmatrices, imatrices, lmatrices = _multi(matrix, fmatrix, dmatrix, imatrix, lmatrix)
...@@ -461,29 +464,29 @@ int_matrix_types = bmatrix, wmatrix, imatrix, lmatrix ...@@ -461,29 +464,29 @@ int_matrix_types = bmatrix, wmatrix, imatrix, lmatrix
float_matrix_types = fmatrix, dmatrix float_matrix_types = fmatrix, dmatrix
complex_matrix_types = cmatrix, zmatrix complex_matrix_types = cmatrix, zmatrix
crow = Tensor('complex64', (True, False)) crow = NDArrayType('complex64', (True, False))
zrow = Tensor('complex128', (True, False)) zrow = NDArrayType('complex128', (True, False))
frow = Tensor('float32', (True, False)) frow = NDArrayType('float32', (True, False))
drow = Tensor('float64', (True, False)) drow = NDArrayType('float64', (True, False))
brow = Tensor('int8', (True, False)) brow = NDArrayType('int8', (True, False))
wrow = Tensor('int16', (True, False)) wrow = NDArrayType('int16', (True, False))
irow = Tensor('int32', (True, False)) irow = NDArrayType('int32', (True, False))
lrow = Tensor('int64', (True, False)) lrow = NDArrayType('int64', (True, False))
def row(name = None, dtype = 'float64'): def row(name = None, dtype = 'float64'):
type = Tensor(dtype, (True, False)) type = NDArrayType(dtype, (True, False))
return type(name) return type(name)
rows, frows, drows, irows, lrows = _multi(row, frow, drow, irow, lrow) rows, frows, drows, irows, lrows = _multi(row, frow, drow, irow, lrow)
ccol = Tensor('complex64', (False, True)) ccol = NDArrayType('complex64', (False, True))
zcol = Tensor('complex128', (False, True)) zcol = NDArrayType('complex128', (False, True))
fcol = Tensor('float32', (False, True)) fcol = NDArrayType('float32', (False, True))
dcol = Tensor('float64', (False, True)) dcol = NDArrayType('float64', (False, True))
bcol = Tensor('int8', (False, True)) bcol = NDArrayType('int8', (False, True))
wcol = Tensor('int16', (False, True)) wcol = NDArrayType('int16', (False, True))
icol = Tensor('int32', (False, True)) icol = NDArrayType('int32', (False, True))
lcol = Tensor('int64', (False, True)) lcol = NDArrayType('int64', (False, True))
def col(name = None, dtype = 'float64'): def col(name = None, dtype = 'float64'):
type = Tensor(dtype, (False, True)) type = NDArrayType(dtype, (False, True))
return type(name) return type(name)
cols, fcols, dcols, icols, lcols = _multi(col, fcol, dcol, icol, lcol) cols, fcols, dcols, icols, lcols = _multi(col, fcol, dcol, icol, lcol)
...@@ -588,7 +591,7 @@ class _tensor_py_operators: ...@@ -588,7 +591,7 @@ class _tensor_py_operators:
def __iter__(self): def __iter__(self):
# This prevents accidental iteration via builtin.sum(self) # This prevents accidental iteration via builtin.sum(self)
raise TypeError('Tensor does not support iteration. ' raise TypeError('NDArrayType does not support iteration. '
'Maybe you are using builtin.sum instead of theano.tensor.sum? (Maybe .max?)') 'Maybe you are using builtin.sum instead of theano.tensor.sum? (Maybe .max?)')
...@@ -621,10 +624,10 @@ class _tensor_py_operators: ...@@ -621,10 +624,10 @@ class _tensor_py_operators:
return pow(pow(abs_(self), L).sum(axis=axis), 1.0/L) return pow(pow(abs_(self), L).sum(axis=axis), 1.0/L)
class TensorResult(Result, _tensor_py_operators): class NDArrayResult(Result, _tensor_py_operators):
"""Subclass to add the tensor operators to the basic `Result` class.""" """Subclass to add the tensor operators to the basic `Result` class."""
class TensorConstantSignature(tuple): class NDArrayConstantSignature(tuple):
def __eq__(self, other): def __eq__(self, other):
(a, b), (x,y) = self, other (a, b), (x,y) = self, other
#N.B. compare shape to ensure no broadcasting in == #N.B. compare shape to ensure no broadcasting in ==
...@@ -633,26 +636,33 @@ class TensorConstantSignature(tuple): ...@@ -633,26 +636,33 @@ class TensorConstantSignature(tuple):
a, b = self a, b = self
return hash(type(self)) ^ hash(a) ^ hash(b.shape) return hash(type(self)) ^ hash(a) ^ hash(b.shape)
class TensorConstant(Constant, _tensor_py_operators): class NDArrayConstant(Constant, _tensor_py_operators):
"""Subclass to add the tensor operators to the basic `Constant` class. """Subclass to add the tensor operators to the basic `Constant` class.
To create a TensorConstant, use the `constant` function in this module. To create a NDArrayConstant, use the `constant` function in this module.
""" """
def signature(self): def signature(self):
return TensorConstantSignature((self.type, self.data)) return NDArrayConstantSignature((self.type, self.data))
class TensorValue(Value, _tensor_py_operators): class NDArrayValue(Value, _tensor_py_operators):
"""Subclass to add the tensor operators to the basic `Value` class. """Subclass to add the tensor operators to the basic `Value` class.
To create a TensorValue, use the `value` function in this module. To create a NDArrayValue, use the `value` function in this module.
""" """
Tensor = NDArrayType
TensorResult = NDArrayResult
TensorConstant = NDArrayConstant
TensorValue = NDArrayValue
#QUESTION: why are we doing this!? #QUESTION: why are we doing this!?
elemwise.as_tensor = as_tensor elemwise.as_ndarray_result = as_ndarray_result
elemwise.Tensor = Tensor elemwise.NDArrayType = NDArrayType
elemwise.TensorResult = TensorResult elemwise.NDArrayResult = NDArrayResult
elemwise.TensorConstant = TensorConstant elemwise.NDArrayConstant = NDArrayConstant
elemwise.TensorValue = TensorValue elemwise.NDArrayValue = NDArrayValue
...@@ -724,7 +734,7 @@ def _scal_elemwise(symbol): ...@@ -724,7 +734,7 @@ def _scal_elemwise(symbol):
# Casting Operations # Casting Operations
######################### #########################
class TensorFromScalar(Op): class NDArrayFromScalar(Op):
def make_node(self, s): def make_node(self, s):
assert isinstance(s.type, scal.Scalar) assert isinstance(s.type, scal.Scalar)
return Apply(self, return Apply(self,
...@@ -734,12 +744,12 @@ class TensorFromScalar(Op): ...@@ -734,12 +744,12 @@ class TensorFromScalar(Op):
def perform(self, node, (s, ), (out, )): def perform(self, node, (s, ), (out, )):
out[0] = numpy.asarray(s) out[0] = numpy.asarray(s)
def grad(self, (s,), (dt,)): def grad(self, (s,), (dt,)):
return [ScalarFromTensor(dt)] return [ScalarFromNDArray(dt)]
tensor_from_scalar = TensorFromScalar() tensor_from_scalar = NDArrayFromScalar()
class ScalarFromTensor(Op): class ScalarFromNDArray(Op):
def make_node(self, t): def make_node(self, t):
assert isinstance(t.type, Tensor) assert isinstance(t.type, NDArrayType)
assert t.type.broadcastable == () assert t.type.broadcastable == ()
return Apply(self, return Apply(self,
[t], [t],
...@@ -747,8 +757,8 @@ class ScalarFromTensor(Op): ...@@ -747,8 +757,8 @@ class ScalarFromTensor(Op):
def perform(self, node, (s, ), (out, )): def perform(self, node, (s, ), (out, )):
out[0] = s.flatten()[0] out[0] = s.flatten()[0]
def grad(self, (s,), (dt,)): def grad(self, (s,), (dt,)):
return [TensorFromScalar(dt)] return [NDArrayFromScalar(dt)]
scalar_from_tensor = ScalarFromTensor() scalar_from_tensor = ScalarFromNDArray()
@constructor @constructor
...@@ -807,7 +817,7 @@ class Shape(Op): ...@@ -807,7 +817,7 @@ class Shape(Op):
@note: Non-differentiable. @note: Non-differentiable.
""" """
def make_node(self, x): def make_node(self, x):
x = as_tensor(x) x = as_ndarray_result(x)
return Apply(self, [x], [lvector()]) return Apply(self, [x], [lvector()])
def perform(self, node, (x, ), (out, )): def perform(self, node, (x, ), (out, )):
out[0] = numpy.asarray(x.shape, dtype = 'int64') out[0] = numpy.asarray(x.shape, dtype = 'int64')
...@@ -827,10 +837,10 @@ class MaxAndArgmax(Op): ...@@ -827,10 +837,10 @@ class MaxAndArgmax(Op):
E_axis = 'invalid axis' E_axis = 'invalid axis'
def make_node(self, x, axis=None): def make_node(self, x, axis=None):
x = _as_tensor(x) x = _as_ndarray_result(x)
if axis is None: if axis is None:
axis = x.type.ndim - 1 axis = x.type.ndim - 1
axis = _as_tensor(axis) axis = _as_ndarray_result(axis)
inputs = [x, axis] inputs = [x, axis]
broadcastable = [False] * (x.type.ndim - 1) broadcastable = [False] * (x.type.ndim - 1)
outputs = [tensor(x.type.dtype, broadcastable), outputs = [tensor(x.type.dtype, broadcastable),
...@@ -975,7 +985,7 @@ def invert(a): ...@@ -975,7 +985,7 @@ def invert(a):
def abs_(a): def abs_(a):
"""|`a`| """|`a`|
TensorResult overloads the `TensorResult.__abs__` operator so that NDArrayResult overloads the `NDArrayResult.__abs__` operator so that
this function is called when you type abs(a). this function is called when you type abs(a).
""" """
...@@ -1076,11 +1086,11 @@ class Filler(gof.Op): ...@@ -1076,11 +1086,11 @@ class Filler(gof.Op):
self.value = value self.value = value
self.ndim = ndim self.ndim = ndim
self.dtype = dtype self.dtype = dtype
self.type = Tensor(dtype = dtype, self.type = NDArrayType(dtype = dtype,
broadcastable = (False,)*ndim) broadcastable = (False,)*ndim)
def make_node(self, dims): def make_node(self, dims):
dims = as_tensor(dims) dims = as_ndarray_result(dims)
return gof.Apply(self, [dims], [self.type()]) return gof.Apply(self, [dims], [self.type()])
def perform(self, node, (dims,), (out,)): def perform(self, node, (dims,), (out,)):
...@@ -1165,10 +1175,10 @@ def mean(input, axis = None): ...@@ -1165,10 +1175,10 @@ def mean(input, axis = None):
class Repeat(gof.Op): class Repeat(gof.Op):
def make_node(self, input, repeats, axis): def make_node(self, input, repeats, axis):
assert isinstance(input.type, Tensor) assert isinstance(input.type, NDArrayType)
assert repeats.type == iscalar assert repeats.type == iscalar
assert axis.type == iscalar assert axis.type == iscalar
type = Tensor(dtype = input.type.dtype, type = NDArrayType(dtype = input.type.dtype,
broadcastable = [False if i==axis else x for i, x in enumerate(input.broadcastable)]) broadcastable = [False if i==axis else x for i, x in enumerate(input.broadcastable)])
return gof.Apply(self, [inputs, repeats, axis], [type()]) return gof.Apply(self, [inputs, repeats, axis], [type()])
...@@ -1293,9 +1303,9 @@ class Subtensor(Op): ...@@ -1293,9 +1303,9 @@ class Subtensor(Op):
self.idx_list = map(self.convert, idx_list) self.idx_list = map(self.convert, idx_list)
def make_node(self, x, *inputs): def make_node(self, x, *inputs):
x = as_tensor(x) x = as_ndarray_result(x)
def my_as_scalar(a): def my_as_scalar(a):
if isinstance(a, gof.Result) and isinstance(a.type, Tensor): if isinstance(a, gof.Result) and isinstance(a.type, NDArrayType):
return scalar_from_tensor(a) return scalar_from_tensor(a)
else: else:
return scal.as_scalar(a) return scal.as_scalar(a)
...@@ -1397,7 +1407,7 @@ pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Subtensor), S ...@@ -1397,7 +1407,7 @@ pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Subtensor), S
class SetSubtensor(Op): class SetSubtensor(Op):
"""Set just some elements of a larger Tensor. """Set just some elements of a larger NDArrayType.
This is like numpy's This is like numpy's
...@@ -1434,7 +1444,7 @@ class SetSubtensor(Op): ...@@ -1434,7 +1444,7 @@ class SetSubtensor(Op):
self.__class__.__name__, ", ".join(indices)) self.__class__.__name__, ", ".join(indices))
def make_node(self, x, y, *inputs): def make_node(self, x, y, *inputs):
x, y = map(as_tensor, [x, y]) x, y = map(as_ndarray_result, [x, y])
inputs = tuple(map(scal.as_scalar, inputs)) inputs = tuple(map(scal.as_scalar, inputs))
idx_list = list(self.idx_list) idx_list = list(self.idx_list)
...@@ -1487,7 +1497,7 @@ def split(x, splits_size, n_splits, axis=0): ...@@ -1487,7 +1497,7 @@ def split(x, splits_size, n_splits, axis=0):
return the_split(x, axis, splits_size) return the_split(x, axis, splits_size)
class Split(Op): class Split(Op):
"""Partition a `TensorResult` along some axis. """Partition a `NDArrayResult` along some axis.
.. python:: .. python::
...@@ -1523,9 +1533,9 @@ class Split(Op): ...@@ -1523,9 +1533,9 @@ class Split(Op):
def make_node(self, x, axis, splits): def make_node(self, x, axis, splits):
"""WRITEME""" """WRITEME"""
x = as_tensor(x) x = as_ndarray_result(x)
axis = as_tensor(axis) axis = as_ndarray_result(axis)
splits = as_tensor(splits) splits = as_ndarray_result(splits)
if splits.type not in int_vector_types: if splits.type not in int_vector_types:
raise TypeError('splits must have type tensor.lvector', splits.type) raise TypeError('splits must have type tensor.lvector', splits.type)
...@@ -1567,10 +1577,10 @@ class Split(Op): ...@@ -1567,10 +1577,10 @@ class Split(Op):
class Join(Op): class Join(Op):
""" """
Concatenate two `TensorResult`s along some axis. Concatenate two `NDArrayResult`s along some axis.
These tensors must have the same shape along all dimensions other than this axis. These tensors must have the same shape along all dimensions other than this axis.
Of course, TensorResult instances don't have a shape, so this error can't be caught until Of course, NDArrayResult instances don't have a shape, so this error can't be caught until
runtime. See `perform()`. runtime. See `perform()`.
For joins involving scalar values, see @stack. For joins involving scalar values, see @stack.
...@@ -1600,16 +1610,16 @@ class Join(Op): ...@@ -1600,16 +1610,16 @@ class Join(Op):
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:] axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
if not tensors: if not tensors:
raise ValueError('Cannot join an empty list of tensors') raise ValueError('Cannot join an empty list of tensors')
as_tensor_args= [as_tensor(x) for x in tensors] as_ndarray_result_args= [as_ndarray_result(x) for x in tensors]
dtypes = [x.type.dtype for x in as_tensor_args] dtypes = [x.type.dtype for x in as_ndarray_result_args]
out_dtype = scal.upcast(*dtypes) out_dtype = scal.upcast(*dtypes)
if not all(targs.type.ndim for targs in as_tensor_args): if not all(targs.type.ndim for targs in as_ndarray_result_args):
raise TypeError('Join cannot handle arguments of dimension 0. For joining scalar values, see @stack'); raise TypeError('Join cannot handle arguments of dimension 0. For joining scalar values, see @stack');
# When the axis may vary, no dimension can be guaranteed to be # When the axis may vary, no dimension can be guaranteed to be
# broadcastable. # broadcastable.
bcastable = [False] * len(as_tensor_args[0].type.broadcastable) bcastable = [False] * len(as_ndarray_result_args[0].type.broadcastable)
# When the axis is fixed, the broadcastable dimensions remain, except # When the axis is fixed, the broadcastable dimensions remain, except
# for the axis dimension. # for the axis dimension.
...@@ -1617,17 +1627,17 @@ class Join(Op): ...@@ -1617,17 +1627,17 @@ class Join(Op):
# dimensions. # dimensions.
if isinstance(axis, int): if isinstance(axis, int):
bcasts = [x.type.broadcastable[0:axis] + \ bcasts = [x.type.broadcastable[0:axis] + \
x.type.broadcastable[axis + 1:] for x in as_tensor_args] x.type.broadcastable[axis + 1:] for x in as_ndarray_result_args]
if not all([bcasts[0] == bc for bc in bcasts[1:]]): if not all([bcasts[0] == bc for bc in bcasts[1:]]):
raise ValueError('Dimensions other than the given axis must' raise ValueError('Dimensions other than the given axis must'
' match', tensors) ' match', tensors)
bcastable[:] = as_tensor_args[0].type.broadcastable bcastable[:] = as_ndarray_result_args[0].type.broadcastable
try: try:
bcastable[axis] = False bcastable[axis] = False
except IndexError, e: except IndexError, e:
raise ValueError('Join argument "axis" is out of range (given input dimensions)') raise ValueError('Join argument "axis" is out of range (given input dimensions)')
inputs = [as_tensor(axis)] + as_tensor_args inputs = [as_ndarray_result(axis)] + as_ndarray_result_args
if inputs[0].type not in int_types: if inputs[0].type not in int_types:
raise TypeError('Axis could not be cast to an integer type', axis, inputs[0].type, int_types) raise TypeError('Axis could not be cast to an integer type', axis, inputs[0].type, int_types)
...@@ -1683,7 +1693,7 @@ class Join(Op): ...@@ -1683,7 +1693,7 @@ class Join(Op):
@_redefine_asRoutine(Join()) @_redefine_asRoutine(Join())
def join(axis, *tensors): def join(axis, *tensors):
""" """
Convenience function to concatenate `Tensor`s along the given axis. Convenience function to concatenate `NDArrayType`s along the given axis.
:Parameters: :Parameters:
- `tensors` : list of tensors (or list-like) - `tensors` : list of tensors (or list-like)
...@@ -1711,7 +1721,7 @@ def shape_padleft(t, n_ones=1): ...@@ -1711,7 +1721,7 @@ def shape_padleft(t, n_ones=1):
See also: `shape_padright` and `Dimshuffle` See also: `shape_padright` and `Dimshuffle`
""" """
_t = as_tensor(t) _t = as_ndarray_result(t)
pattern = ['x']*n_ones + [i for i in range(_t.type.ndim)] pattern = ['x']*n_ones + [i for i in range(_t.type.ndim)]
return DimShuffle(_t.broadcastable, pattern)(_t) return DimShuffle(_t.broadcastable, pattern)(_t)
...@@ -1722,7 +1732,7 @@ def shape_padright(t, n_ones=1): ...@@ -1722,7 +1732,7 @@ def shape_padright(t, n_ones=1):
See also: `shape_padleft` and `Dimshuffle` See also: `shape_padleft` and `Dimshuffle`
""" """
_t = as_tensor(t) _t = as_ndarray_result(t)
pattern = [i for i in range(_t.type.ndim)] + ['x']*n_ones pattern = [i for i in range(_t.type.ndim)] + ['x']*n_ones
return DimShuffle(_t.broadcastable, pattern)(_t) return DimShuffle(_t.broadcastable, pattern)(_t)
...@@ -1759,7 +1769,7 @@ def get_vector_length(v): ...@@ -1759,7 +1769,7 @@ def get_vector_length(v):
"""Return the run-time length of a symbolic vector. """Return the run-time length of a symbolic vector.
:Parameters: :Parameters:
- `v` : A rank-1 Tensor result. - `v` : A rank-1 NDArrayType result.
:Exceptions: :Exceptions:
- `TypeError` : `v` hasn't the proper type. - `TypeError` : `v` hasn't the proper type.
...@@ -1788,9 +1798,9 @@ def get_vector_length(v): ...@@ -1788,9 +1798,9 @@ def get_vector_length(v):
@constructor @constructor
def horizontal_stack(*args): def horizontal_stack(*args):
""" """
Horizontally stack two L{Tensor}s. Horizontally stack two L{NDArrayType}s.
Stack two L{Tensor}s along the second axis (column wise). These Stack two L{NDArrayType}s along the second axis (column wise). These
L{Tensor}s must have the same shape along all dimensions but the L{NDArrayType}s must have the same shape along all dimensions but the
second. second.
""" """
assert len(args) >= 2 assert len(args) >= 2
...@@ -1806,17 +1816,17 @@ def vertical_stack(*args): ...@@ -1806,17 +1816,17 @@ def vertical_stack(*args):
if 0: #vertical and horizontal stacking are deprecated. Better to use stack() and join(). if 0: #vertical and horizontal stacking are deprecated. Better to use stack() and join().
class VerticalStack(Op): class VerticalStack(Op):
""" """
Vertically stack two L{Tensor}s. Vertically stack two L{NDArrayType}s.
Stack two L{Tensor}s along the first axis (row wise). These Stack two L{NDArrayType}s along the first axis (row wise). These
L{Tensor}s must have the same shape along all dimensions but the L{NDArrayType}s must have the same shape along all dimensions but the
first. first.
@attention: Because we use vstack as the implementation, if the @attention: Because we use vstack as the implementation, if the
inputs have 1-dimension, the output will have 2-dimensions. inputs have 1-dimension, the output will have 2-dimensions.
""" """
def make_node(self, x, y): def make_node(self, x, y):
x = as_tensor(x) x = as_ndarray_result(x)
y = as_tensor(y) y = as_ndarray_result(y)
assert x.type.dtype == y.type.dtype assert x.type.dtype == y.type.dtype
if x.type.broadcastable[1:] != y.type.broadcastable[1:]: if x.type.broadcastable[1:] != y.type.broadcastable[1:]:
raise NotImplementedError raise NotImplementedError
...@@ -1853,9 +1863,9 @@ class MakeVector(Op): ...@@ -1853,9 +1863,9 @@ class MakeVector(Op):
def __init__(self, stype): def __init__(self, stype):
self.stype = stype self.stype = stype
def make_node(self, *inputs): def make_node(self, *inputs):
inputs = map(as_tensor, inputs) inputs = map(as_ndarray_result, inputs)
assert all(a.type == self.stype for a in inputs) assert all(a.type == self.stype for a in inputs)
return Apply(self, inputs, [Tensor(broadcastable = (False,), return Apply(self, inputs, [NDArrayType(broadcastable = (False,),
dtype = self.stype.dtype)()]) dtype = self.stype.dtype)()])
def perform(self, node, inputs, (out,)): def perform(self, node, inputs, (out,)):
out[0] = numpy.asarray(inputs) out[0] = numpy.asarray(inputs)
...@@ -1891,8 +1901,8 @@ class Reshape(Op): ...@@ -1891,8 +1901,8 @@ class Reshape(Op):
def __hash__(self): def __hash__(self):
return hash(Reshape) ^ hash(self.ndim) return hash(Reshape) ^ hash(self.ndim)
def make_node(self, x, shp): def make_node(self, x, shp):
x = as_tensor(x) x = as_ndarray_result(x)
shp = as_tensor(shp) shp = as_ndarray_result(shp)
return gof.Apply(self, [x, shp], [tensor(x.type.dtype, [False]*self.ndim)]) return gof.Apply(self, [x, shp], [tensor(x.type.dtype, [False]*self.ndim)])
def perform(self, node, (x, shp), (out,)): def perform(self, node, (x, shp), (out,)):
if (len(shp) != self.ndim): if (len(shp) != self.ndim):
...@@ -1928,7 +1938,7 @@ class Flatten(Op): ...@@ -1928,7 +1938,7 @@ class Flatten(Op):
def __hash__(self): def __hash__(self):
return hash(type(self))^hash(self.outdim) return hash(type(self))^hash(self.outdim)
def make_node(self, x): def make_node(self, x):
t_x = as_tensor(x) t_x = as_ndarray_result(x)
if self.outdim < 1 or (x.ndim and self.outdim > x.ndim): if self.outdim < 1 or (x.ndim and self.outdim > x.ndim):
raise ValueError('invalid output ndimensions(%i) for tensor of rank %i' %(self.outdim, t_x.ndim)) raise ValueError('invalid output ndimensions(%i) for tensor of rank %i' %(self.outdim, t_x.ndim))
return gof.Apply(self, [t_x], [tensor(x.type.dtype, (False,)*self.outdim)]) return gof.Apply(self, [t_x], [tensor(x.type.dtype, (False,)*self.outdim)])
...@@ -1974,8 +1984,8 @@ class Tile(Op): ...@@ -1974,8 +1984,8 @@ class Tile(Op):
return hash(Tile) ^ hash(self.ndim) return hash(Tile) ^ hash(self.ndim)
def make_node(self, x, reps): def make_node(self, x, reps):
x = as_tensor(x) x = as_ndarray_result(x)
reps = as_tensor(reps) reps = as_ndarray_result(reps)
return gof.Apply(self, [x, reps], [tensor(x.type.dtype, [False,] * self.ndim)]) return gof.Apply(self, [x, reps], [tensor(x.type.dtype, [False,] * self.ndim)])
def perform(self, node, (x, reps), (out,)): def perform(self, node, (x, reps), (out,)):
out[0] = numpy.tile(x, reps) out[0] = numpy.tile(x, reps)
...@@ -2007,7 +2017,7 @@ class Dot(Op): ...@@ -2007,7 +2017,7 @@ class Dot(Op):
""" """
def make_node(self, *inputs): def make_node(self, *inputs):
inputs = map(as_tensor, inputs) inputs = map(as_ndarray_result, inputs)
numpy_semantics = 0 numpy_semantics = 0
if numpy_semantics: if numpy_semantics:
...@@ -2128,7 +2138,7 @@ class TensorDot(Op): ...@@ -2128,7 +2138,7 @@ class TensorDot(Op):
def make_node(self, x, y): def make_node(self, x, y):
axesdim = numpy.size(self.axes)/2 axesdim = numpy.size(self.axes)/2
x, y = map(as_tensor, [x, y]) x, y = map(as_ndarray_result, [x, y])
if axesdim > x.type.ndim or axesdim > y.type.ndim: if axesdim > x.type.ndim or axesdim > y.type.ndim:
raise TypeError('Cannot sum over more dimensions than input. %i > %i,%i' % raise TypeError('Cannot sum over more dimensions than input. %i > %i,%i' %
...@@ -2159,7 +2169,7 @@ class Outer(Op): ...@@ -2159,7 +2169,7 @@ class Outer(Op):
""" Compute vector-vector outer product """ Compute vector-vector outer product
""" """
def make_node(self, *inputs): def make_node(self, *inputs):
inputs = map(as_tensor, inputs) inputs = map(as_ndarray_result, inputs)
x, y = inputs x, y = inputs
nx = x.type.ndim nx = x.type.ndim
...@@ -2199,12 +2209,12 @@ def grad(cost, wrt, g_cost=None, consider_constant=[]): ...@@ -2199,12 +2209,12 @@ def grad(cost, wrt, g_cost=None, consider_constant=[]):
@return: symbolic expression of gradient of I{cost} with respect to I{wrt}. @return: symbolic expression of gradient of I{cost} with respect to I{wrt}.
If I{wrt} is a list, then return a list containing the gradient of I{cost} wrt If I{wrt} is a list, then return a list containing the gradient of I{cost} wrt
each element of the list. If an element of I{wrt} is not differentiable each element of the list. If an element of I{wrt} is not differentiable
with respect to the output, then a L{TensorConstant} with an appropriate with respect to the output, then a L{NDArrayConstant} with an appropriate
kind of zero is returned. kind of zero is returned.
""" """
if not isinstance(cost, TensorResult): if not isinstance(cost, NDArrayResult):
raise TypeError('In tensor.grad(), cost argument should be a TensorResult.', cost) raise TypeError('In tensor.grad(), cost argument should be a NDArrayResult.', cost)
if g_cost is None: if g_cost is None:
g_cost = ones_like(cost) g_cost = ones_like(cost)
...@@ -2212,8 +2222,8 @@ def grad(cost, wrt, g_cost=None, consider_constant=[]): ...@@ -2212,8 +2222,8 @@ def grad(cost, wrt, g_cost=None, consider_constant=[]):
gmap = gradient.grad_sources_inputs([(cost, g_cost)], inputs + consider_constant) gmap = gradient.grad_sources_inputs([(cost, g_cost)], inputs + consider_constant)
def zero(p): def zero(p):
return TensorConstant( return NDArrayConstant(
Tensor(dtype = p.type.dtype, broadcastable = []), NDArrayType(dtype = p.type.dtype, broadcastable = []),
numpy.asarray(0, dtype=p.type.dtype)) numpy.asarray(0, dtype=p.type.dtype))
try: try:
...@@ -2345,7 +2355,7 @@ def verify_grad(testcase, op, pt, n_tests=1, rng=numpy.random, eps=1.0e-7, tol=0 ...@@ -2345,7 +2355,7 @@ def verify_grad(testcase, op, pt, n_tests=1, rng=numpy.random, eps=1.0e-7, tol=0
o_fn_out = o_fn(*[p.copy() for p in pt]) o_fn_out = o_fn(*[p.copy() for p in pt])
#print "PT C", pt #print "PT C", pt
random_projection = rng.rand(*o_fn_out.shape) random_projection = rng.rand(*o_fn_out.shape)
t_r = as_tensor(random_projection) t_r = as_ndarray_result(random_projection)
#random projection of o onto t_r #random projection of o onto t_r
cost = sum(t_r * o_output) #This sum() is defined above, it's not the builtin sum. cost = sum(t_r * o_output) #This sum() is defined above, it's not the builtin sum.
...@@ -2353,7 +2363,7 @@ def verify_grad(testcase, op, pt, n_tests=1, rng=numpy.random, eps=1.0e-7, tol=0 ...@@ -2353,7 +2363,7 @@ def verify_grad(testcase, op, pt, n_tests=1, rng=numpy.random, eps=1.0e-7, tol=0
num_grad = numeric_grad(cost_fn, [p.copy() for p in pt], eps) num_grad = numeric_grad(cost_fn, [p.copy() for p in pt], eps)
symbolic_grad = grad(cost, tensor_pt,as_tensor(1.0,name='g_cost')) symbolic_grad = grad(cost, tensor_pt,as_ndarray_result(1.0,name='g_cost'))
if 0: if 0:
print '----------' print '----------'
......
...@@ -275,7 +275,7 @@ class Gemm(GemmRelated): ...@@ -275,7 +275,7 @@ class Gemm(GemmRelated):
E_z_uniq = 'argument z aliased to x or y' E_z_uniq = 'argument z aliased to x or y'
destroy_map = {0: [0]} destroy_map = {0: [0]}
def make_node(self, *inputs): def make_node(self, *inputs):
inputs = map(T.as_tensor, inputs) inputs = map(T.as_ndarray_result, inputs)
if len(inputs) != 5: if len(inputs) != 5:
raise TypeError("Wrong number of inputs for %s (expected 5, got %s)" % (self, len(inputs))) raise TypeError("Wrong number of inputs for %s (expected 5, got %s)" % (self, len(inputs)))
z, a, x, y, b = inputs z, a, x, y, b = inputs
...@@ -475,7 +475,7 @@ class GemmLocalOptimizer(LocalOptimizer): ...@@ -475,7 +475,7 @@ class GemmLocalOptimizer(LocalOptimizer):
@staticmethod @staticmethod
def _as_scalar(res): def _as_scalar(res):
"""Return None or a TensorResult whose type is in T.float_scalar_types""" """Return None or a NDArrayResult whose type is in T.float_scalar_types"""
if res.owner and isinstance(res.owner.op, T.DimShuffle): if res.owner and isinstance(res.owner.op, T.DimShuffle):
return GemmLocalOptimizer._as_scalar(res.owner.inputs[0]) return GemmLocalOptimizer._as_scalar(res.owner.inputs[0])
elif res.type in T.float_scalar_types: elif res.type in T.float_scalar_types:
......
...@@ -13,18 +13,18 @@ from copy import copy, deepcopy ...@@ -13,18 +13,18 @@ from copy import copy, deepcopy
# tensor depends on elemwise to provide definitions for several ops # tensor depends on elemwise to provide definitions for several ops
# but elemwise needs to make Tensor instances, so we have these as # but elemwise needs to make NDArrayType instances, so we have these as
# placeholders and the tensor module fills them # placeholders and the tensor module fills them
def as_tensor(data): def as_ndarray_result(data):
raise Exception("Circular dependencies prevent using this here. import tensor before elemwise") raise Exception("Circular dependencies prevent using this here. import tensor before elemwise")
def Tensor(*inputs, **kwargs): def NDArrayType(*inputs, **kwargs):
raise Exception("Circular dependencies prevent using this here. import tensor before elemwise") raise Exception("Circular dependencies prevent using this here. import tensor before elemwise")
def TensorResult(*inputs, **kwargs): def NDArrayResult(*inputs, **kwargs):
raise Exception("Circular dependencies prevent using this here. import tensor before elemwise") raise Exception("Circular dependencies prevent using this here. import tensor before elemwise")
def TensorConstant(*inputs, **kwargs): def NDArrayConstant(*inputs, **kwargs):
raise Exception("Circular dependencies prevent using this here. import tensor before elemwise") raise Exception("Circular dependencies prevent using this here. import tensor before elemwise")
...@@ -137,7 +137,7 @@ class DimShuffle(Op): ...@@ -137,7 +137,7 @@ class DimShuffle(Op):
else: else:
ob.append(ib[value]) ob.append(ib[value])
output = Tensor(dtype = input.type.dtype, output = NDArrayType(dtype = input.type.dtype,
broadcastable = ob).make_result() broadcastable = ob).make_result()
return Apply(self, [input], [output]) return Apply(self, [input], [output])
...@@ -256,7 +256,7 @@ class DimShuffle(Op): ...@@ -256,7 +256,7 @@ class DimShuffle(Op):
return full_code % dict(locals(), **sub) return full_code % dict(locals(), **sub)
def grad(self, (x, ), (gz, )): def grad(self, (x, ), (gz, )):
gz = as_tensor(gz) gz = as_ndarray_result(gz)
grad_order = ['x'] * len(x.type.broadcastable) grad_order = ['x'] * len(x.type.broadcastable)
for i, v in enumerate(self.new_order): for i, v in enumerate(self.new_order):
if v != 'x': if v != 'x':
...@@ -365,7 +365,7 @@ class Elemwise(Op): ...@@ -365,7 +365,7 @@ class Elemwise(Op):
using DimShuffle. using DimShuffle.
""" """
inputs = map(as_tensor, inputs) inputs = map(as_ndarray_result, inputs)
shadow = self.scalar_op.make_node(*[Scalar(dtype = t.type.dtype)() for t in inputs]) shadow = self.scalar_op.make_node(*[Scalar(dtype = t.type.dtype)() for t in inputs])
target_length = max([input.type.ndim for input in inputs]) target_length = max([input.type.ndim for input in inputs])
...@@ -403,7 +403,7 @@ class Elemwise(Op): ...@@ -403,7 +403,7 @@ class Elemwise(Op):
if any(inputs[i].type.dtype != out_dtypes[o] for o, i in inplace_pattern.items()): if any(inputs[i].type.dtype != out_dtypes[o] for o, i in inplace_pattern.items()):
raise TypeError("Cannot do an inplace operation on incompatible data types.", raise TypeError("Cannot do an inplace operation on incompatible data types.",
([i.type.dtype for i in inputs], out_dtypes, inplace_pattern)) ([i.type.dtype for i in inputs], out_dtypes, inplace_pattern))
outputs = [Tensor(dtype = dtype, broadcastable = broadcastable)() for dtype, broadcastable in zip(out_dtypes, out_broadcastables)] outputs = [NDArrayType(dtype = dtype, broadcastable = broadcastable)() for dtype, broadcastable in zip(out_dtypes, out_broadcastables)]
return Apply(self, inputs, outputs) return Apply(self, inputs, outputs)
def __eq__(self, other): def __eq__(self, other):
...@@ -431,7 +431,7 @@ class Elemwise(Op): ...@@ -431,7 +431,7 @@ class Elemwise(Op):
return self.name return self.name
def grad(self, inputs, ograds): def grad(self, inputs, ograds):
ograds = map(as_tensor, ograds) # this shouldn't be necessary... ograds = map(as_ndarray_result, ograds) # this shouldn't be necessary...
scalar_inputs = [Scalar(dtype = t.type.dtype)() for t in inputs] scalar_inputs = [Scalar(dtype = t.type.dtype)() for t in inputs]
scalar_ograds = [Scalar(dtype = ograd.type.dtype)() for ograd in ograds] scalar_ograds = [Scalar(dtype = ograd.type.dtype)() for ograd in ograds]
scalar_igrads = self.scalar_op.grad(scalar_inputs, scalar_ograds) scalar_igrads = self.scalar_op.grad(scalar_inputs, scalar_ograds)
...@@ -445,8 +445,8 @@ class Elemwise(Op): ...@@ -445,8 +445,8 @@ class Elemwise(Op):
node = r.owner node = r.owner
if node is None: if node is None:
# the gradient contains a constant, translate it as # the gradient contains a constant, translate it as
# an equivalent Tensor of size 1 and proper number of dimensions # an equivalent NDArrayType of size 1 and proper number of dimensions
res = TensorConstant(Tensor(dtype = r.type.dtype, res = NDArrayConstant(NDArrayType(dtype = r.type.dtype,
broadcastable = ()), broadcastable = ()),
numpy.asarray(r.data)) # .reshape(b) numpy.asarray(r.data)) # .reshape(b)
return DimShuffle((), ['x']*nd, inplace = True)(res) return DimShuffle((), ['x']*nd, inplace = True)(res)
...@@ -678,12 +678,12 @@ class CAReduce(Op): ...@@ -678,12 +678,12 @@ class CAReduce(Op):
self.ufunc = numpy.frompyfunc(scalar_op.impl, 2, 1) self.ufunc = numpy.frompyfunc(scalar_op.impl, 2, 1)
def make_node(self, input): def make_node(self, input):
input = as_tensor(input) input = as_ndarray_result(input)
axis = self.axis axis = self.axis
if axis is None: if axis is None:
axis = range(len(input.type.broadcastable)) axis = range(len(input.type.broadcastable))
output = Tensor(dtype = input.type.dtype, output = NDArrayType(dtype = input.type.dtype,
broadcastable = [x for i, x in enumerate(input.type.broadcastable) if i not in axis])() broadcastable = [x for i, x in enumerate(input.type.broadcastable) if i not in axis])()
return Apply(self, [input], [output]) return Apply(self, [input], [output])
def __getstate__(self): def __getstate__(self):
...@@ -809,7 +809,7 @@ class Sum(CAReduce): ...@@ -809,7 +809,7 @@ class Sum(CAReduce):
CAReduce.__init__(self, scalar.add, axis) CAReduce.__init__(self, scalar.add, axis)
def grad(self, (x, ), (gz, )): def grad(self, (x, ), (gz, )):
gz = as_tensor(gz) gz = as_ndarray_result(gz)
axis = self.axis axis = self.axis
if axis is None: if axis is None:
axis = range(x.type.ndim) axis = range(x.type.ndim)
......
...@@ -94,8 +94,8 @@ class SoftmaxWithBias(gof.Op): ...@@ -94,8 +94,8 @@ class SoftmaxWithBias(gof.Op):
gof.Op.__init__(self, **kwargs) gof.Op.__init__(self, **kwargs)
def make_node(self, x, b): def make_node(self, x, b):
x = tensor.as_tensor(x) x = tensor.as_ndarray_result(x)
b = tensor.as_tensor(b) b = tensor.as_ndarray_result(b)
if x.type.ndim != 2 \ if x.type.ndim != 2 \
or x.type.dtype not in ['float32', 'float64']: or x.type.dtype not in ['float32', 'float64']:
raise ValueError('x must be 2-d tensor of floats') raise ValueError('x must be 2-d tensor of floats')
...@@ -263,8 +263,8 @@ class SoftmaxWithBiasDx(gof.Op): ...@@ -263,8 +263,8 @@ class SoftmaxWithBiasDx(gof.Op):
gof.Op.__init__(self, **kwargs) gof.Op.__init__(self, **kwargs)
def make_node(self, dy, sm, **kwargs): def make_node(self, dy, sm, **kwargs):
dy = tensor.as_tensor(dy) dy = tensor.as_ndarray_result(dy)
sm = tensor.as_tensor(sm) sm = tensor.as_ndarray_result(sm)
return gof.Apply(self, [dy, sm], [sm.type.make_result()]) return gof.Apply(self, [dy, sm], [sm.type.make_result()])
def perform(self, node, input_storage, output_storage): def perform(self, node, input_storage, output_storage):
...@@ -368,9 +368,9 @@ class CrossentropySoftmaxArgmax1HotWithBias(gof.Op): ...@@ -368,9 +368,9 @@ class CrossentropySoftmaxArgmax1HotWithBias(gof.Op):
gof.Op.__init__(self, **kwargs) gof.Op.__init__(self, **kwargs)
def make_node(self, x, b, y_idx): def make_node(self, x, b, y_idx):
x = tensor.as_tensor(x) x = tensor.as_ndarray_result(x)
b = tensor.as_tensor(b) b = tensor.as_ndarray_result(b)
y_idx = tensor.as_tensor(y_idx) y_idx = tensor.as_ndarray_result(y_idx)
if x.type.ndim != 2 \ if x.type.ndim != 2 \
or x.type.dtype not in ['float32', 'float64']: or x.type.dtype not in ['float32', 'float64']:
raise ValueError('x must be 2-d tensor of floats') raise ValueError('x must be 2-d tensor of floats')
...@@ -382,9 +382,9 @@ class CrossentropySoftmaxArgmax1HotWithBias(gof.Op): ...@@ -382,9 +382,9 @@ class CrossentropySoftmaxArgmax1HotWithBias(gof.Op):
raise ValueError('y_idx must be 1-d tensor of ints') raise ValueError('y_idx must be 1-d tensor of ints')
# TODO: Is this correct? It used to be y, not y_idx # TODO: Is this correct? It used to be y, not y_idx
nll = tensor.Tensor(x.type.dtype, nll = tensor.NDArrayType(x.type.dtype,
y_idx.type.broadcastable).make_result() y_idx.type.broadcastable).make_result()
# nll = Tensor(x.dtype, y.broadcastable) # nll = NDArrayType(x.dtype, y.broadcastable)
sm = x.type.make_result() sm = x.type.make_result()
am = y_idx.type.make_result() am = y_idx.type.make_result()
return gof.Apply(self, [x, b, y_idx], [nll, sm, am]) return gof.Apply(self, [x, b, y_idx], [nll, sm, am])
...@@ -532,9 +532,9 @@ class CrossentropySoftmax1HotWithBiasDx (gof.Op): ...@@ -532,9 +532,9 @@ class CrossentropySoftmax1HotWithBiasDx (gof.Op):
def __init__(self, **kwargs): def __init__(self, **kwargs):
gof.Op.__init__(self,**kwargs) gof.Op.__init__(self,**kwargs)
def make_node(self, dy, sm, y_idx,**kwargs): def make_node(self, dy, sm, y_idx,**kwargs):
dy = tensor.as_tensor(dy) dy = tensor.as_ndarray_result(dy)
sm = tensor.as_tensor(sm) sm = tensor.as_ndarray_result(sm)
y_idx = tensor.as_tensor(y_idx) y_idx = tensor.as_ndarray_result(y_idx)
return gof.Apply(self, [dy, sm, y_idx],[sm.type.make_result()]) return gof.Apply(self, [dy, sm, y_idx],[sm.type.make_result()])
def perform(self, node, input_storage, output_storage): def perform(self, node, input_storage, output_storage):
dy,sm,y_idx = input_storage dy,sm,y_idx = input_storage
...@@ -672,8 +672,8 @@ class Prepend_scalar_constant_to_each_row(gof.Op): ...@@ -672,8 +672,8 @@ class Prepend_scalar_constant_to_each_row(gof.Op):
#check type of input #check type of input
if not isinstance(mat,gof.Result) or not mat.type==tensor.matrix().type: if not isinstance(mat,gof.Result) or not mat.type==tensor.matrix().type:
raise TypeError("Expected a matrix as input") raise TypeError("Expected a matrix as input")
x = tensor.as_tensor(mat) x = tensor.as_ndarray_result(mat)
y = tensor.as_tensor(self.val) y = tensor.as_ndarray_result(self.val)
if x.type.dtype != y.type.dtype: if x.type.dtype != y.type.dtype:
TypeError("the value to prepend don't have the same type as the matrix") TypeError("the value to prepend don't have the same type as the matrix")
...@@ -706,8 +706,8 @@ class Prepend_scalar_to_each_row(gof.Op): ...@@ -706,8 +706,8 @@ class Prepend_scalar_to_each_row(gof.Op):
val = scalar.constant(val) val = scalar.constant(val)
if not isinstance(mat,gof.Result) or not mat.type==tensor.matrix().type: if not isinstance(mat,gof.Result) or not mat.type==tensor.matrix().type:
raise TypeError("Expected a matrix as input") raise TypeError("Expected a matrix as input")
x = tensor.as_tensor(mat) x = tensor.as_ndarray_result(mat)
y = tensor.as_tensor(val) y = tensor.as_ndarray_result(val)
if x.type.dtype != y.type.dtype: if x.type.dtype != y.type.dtype:
TypeError("the value to prepend don't have the same type as the matrix") TypeError("the value to prepend don't have the same type as the matrix")
......
...@@ -534,7 +534,7 @@ class Canonizer(gof.LocalOptimizer): ...@@ -534,7 +534,7 @@ class Canonizer(gof.LocalOptimizer):
ln, ld = len(num), len(denum) ln, ld = len(num), len(denum)
if not ln and not ld: if not ln and not ld:
return T.as_tensor(self.calculate([], [])) return T.as_ndarray_result(self.calculate([], []))
if not ln: if not ln:
if self.use_reciprocal: if self.use_reciprocal:
return self.reciprocal(self.merge_num_denum(denum, [])) return self.reciprocal(self.merge_num_denum(denum, []))
...@@ -545,7 +545,7 @@ class Canonizer(gof.LocalOptimizer): ...@@ -545,7 +545,7 @@ class Canonizer(gof.LocalOptimizer):
if isinstance(num[0], gof.Result): if isinstance(num[0], gof.Result):
return num[0] return num[0]
else: else:
return T.as_tensor(num[0]) return T.as_ndarray_result(num[0])
else: else:
return self.main(*num) return self.main(*num)
return self.inverse(self.merge_num_denum(num, []), return self.inverse(self.merge_num_denum(num, []),
...@@ -844,7 +844,7 @@ def local_mul_specialize(node): ...@@ -844,7 +844,7 @@ def local_mul_specialize(node):
if len(new_inputs) < len(node.inputs): if len(new_inputs) < len(node.inputs):
if len(new_inputs) == 0: if len(new_inputs) == 0:
newval = -y.flatten()[0] if neg else y.flatten()[0] newval = -y.flatten()[0] if neg else y.flatten()[0]
return [T.TensorConstant(T.Tensor(dtype=node.outputs[0].type.dtype, return [T.NDArrayConstant(T.NDArrayType(dtype=node.outputs[0].type.dtype,
broadcastable = [True] * node.outputs[0].ndim), N.asarray(newval))] broadcastable = [True] * node.outputs[0].ndim), N.asarray(newval))]
if len(new_inputs) == 1: if len(new_inputs) == 1:
......
...@@ -131,7 +131,7 @@ class RandomStreams(Component): ...@@ -131,7 +131,7 @@ class RandomStreams(Component):
:returns: The symbolic random draw part of op()'s return value. This function stores :returns: The symbolic random draw part of op()'s return value. This function stores
the updated RandomStateType Result for use at `build` time. the updated RandomStateType Result for use at `build` time.
:rtype: TensorResult :rtype: NDArrayResult
""" """
random_state_result = raw_random.random_state_type() random_state_result = raw_random.random_state_type()
new_r, out = op(random_state_result, *args, **kwargs) new_r, out = op(random_state_result, *args, **kwargs)
......
...@@ -87,7 +87,7 @@ class RandomFunction(gof.Op): ...@@ -87,7 +87,7 @@ class RandomFunction(gof.Op):
fn, outtype, args, kwargs = state fn, outtype, args, kwargs = state
self.fn = getattr(numpy.random.RandomState, fn) if isinstance(fn, str) else fn self.fn = getattr(numpy.random.RandomState, fn) if isinstance(fn, str) else fn
self.outtype = outtype self.outtype = outtype
self.args = tuple(tensor.as_tensor(arg) for arg in args) self.args = tuple(tensor.as_ndarray_result(arg) for arg in args)
self.inplace = kwargs.pop('inplace', False) self.inplace = kwargs.pop('inplace', False)
if self.inplace: if self.inplace:
self.destroy_map = {0: [0]} self.destroy_map = {0: [0]}
...@@ -103,7 +103,7 @@ class RandomFunction(gof.Op): ...@@ -103,7 +103,7 @@ class RandomFunction(gof.Op):
:param args: the values associated with these results will be passed to the RandomState :param args: the values associated with these results will be passed to the RandomState
function during perform as extra "*args"-style arguments. These should be castable to function during perform as extra "*args"-style arguments. These should be castable to
results of Type Tensor. results of Type NDArrayType.
:rtype: Apply :rtype: Apply
...@@ -115,7 +115,7 @@ class RandomFunction(gof.Op): ...@@ -115,7 +115,7 @@ class RandomFunction(gof.Op):
if shape == () or shape == []: if shape == () or shape == []:
shape = tensor.lvector() shape = tensor.lvector()
else: else:
shape = tensor.as_tensor(shape, ndim=1) shape = tensor.as_ndarray_result(shape, ndim=1)
#print 'SHAPE TYPE', shape.type, tensor.lvector #print 'SHAPE TYPE', shape.type, tensor.lvector
assert shape.type.ndim == 1 assert shape.type.ndim == 1
assert (shape.type.dtype == 'int64') or (shape.type.dtype == 'int32') assert (shape.type.dtype == 'int64') or (shape.type.dtype == 'int32')
...@@ -127,9 +127,9 @@ class RandomFunction(gof.Op): ...@@ -127,9 +127,9 @@ class RandomFunction(gof.Op):
# shape.type # shape.type
# assert shape.type == tensor.lvector # assert shape.type == tensor.lvector
# convert args to Tensor instances # convert args to NDArrayType instances
# and append enough None's to match the length of self.args # and append enough None's to match the length of self.args
args = map(tensor.as_tensor, args) args = map(tensor.as_ndarray_result, args)
if len(args) > len(self.args): if len(args) > len(self.args):
raise TypeError('Too many args for this kind of random generator') raise TypeError('Too many args for this kind of random generator')
args += (None,) * (len(self.args) - len(args)) args += (None,) * (len(self.args) - len(args))
...@@ -202,14 +202,14 @@ def random_function(fn, dtype, *rfargs, **rfkwargs): ...@@ -202,14 +202,14 @@ def random_function(fn, dtype, *rfargs, **rfkwargs):
else: else:
r, shape, args = ndim, args[0], args[1:] r, shape, args = ndim, args[0], args[1:]
if shape == () or shape == []: if shape == () or shape == []:
shape = tensor.TensorConstant(type = tensor.lvector, data = shape) shape = tensor.NDArrayConstant(type = tensor.lvector, data = shape)
else: else:
shape = tensor.as_tensor(shape) shape = tensor.as_ndarray_result(shape)
ndim = tensor.get_vector_length(shape) ndim = tensor.get_vector_length(shape)
if ndim is None: if ndim is None:
raise ValueError('Cannot infer the number of dimensions from the shape argument.') raise ValueError('Cannot infer the number of dimensions from the shape argument.')
# note: rf could be cached for future use # note: rf could be cached for future use
rf = RandomFunction(fn, tensor.Tensor(dtype = dtype, broadcastable = (False,)*ndim), *rfargs, **rfkwargs) rf = RandomFunction(fn, tensor.NDArrayType(dtype = dtype, broadcastable = (False,)*ndim), *rfargs, **rfkwargs)
return rf(r, shape, *args, **kwargs) return rf(r, shape, *args, **kwargs)
return f return f
......
...@@ -595,7 +595,7 @@ class T_Shape(unittest.TestCase): ...@@ -595,7 +595,7 @@ class T_Shape(unittest.TestCase):
class T_Cast(unittest.TestCase): class T_Cast(unittest.TestCase):
def test_basic(self): def test_basic(self):
for type1 in ['int8', 'int16', 'int32', 'int64', 'float32', 'float64']: for type1 in ['int8', 'int16', 'int32', 'int64', 'float32', 'float64']:
x = Tensor(dtype = type1, broadcastable = (False, )).make_result() x = NDArrayType(dtype = type1, broadcastable = (False, )).make_result()
for type2, converter in zip(['int8', 'int16', 'int32', 'int64', 'float32', 'float64'], for type2, converter in zip(['int8', 'int16', 'int32', 'int64', 'float32', 'float64'],
[convert_to_int8, convert_to_int16, convert_to_int32, convert_to_int64, [convert_to_int8, convert_to_int16, convert_to_int32, convert_to_int64,
convert_to_float32, convert_to_float64]): convert_to_float32, convert_to_float64]):
...@@ -611,51 +611,51 @@ class T_max_and_argmax(unittest.TestCase): ...@@ -611,51 +611,51 @@ class T_max_and_argmax(unittest.TestCase):
MaxAndArgmax.debug = 0 MaxAndArgmax.debug = 0
def test0(self): def test0(self):
n = as_tensor(5.0) n = as_ndarray_result(5.0)
v,i = eval_outputs(max_and_argmax(n)) v,i = eval_outputs(max_and_argmax(n))
self.failUnless(v == 5.0) self.failUnless(v == 5.0)
self.failUnless(i == 0) self.failUnless(i == 0)
def test1(self): def test1(self):
n = as_tensor([1,2,3,2,-6]) n = as_ndarray_result([1,2,3,2,-6])
v,i = eval_outputs(max_and_argmax(n)) v,i = eval_outputs(max_and_argmax(n))
self.failUnless(v == 3) self.failUnless(v == 3)
self.failUnless(i == 2) self.failUnless(i == 2)
def test2(self): def test2(self):
data = numpy.random.rand(2,3) data = numpy.random.rand(2,3)
n = as_tensor(data) n = as_ndarray_result(data)
v,i = eval_outputs(max_and_argmax(n)) v,i = eval_outputs(max_and_argmax(n))
self.failUnless(numpy.all(v == numpy.max(data,-1))) self.failUnless(numpy.all(v == numpy.max(data,-1)))
self.failUnless(numpy.all(i == numpy.argmax(data,-1))) self.failUnless(numpy.all(i == numpy.argmax(data,-1)))
def test2b(self): def test2b(self):
data = numpy.random.rand(2,3) data = numpy.random.rand(2,3)
n = as_tensor(data) n = as_ndarray_result(data)
v,i = eval_outputs(max_and_argmax(n,0)) v,i = eval_outputs(max_and_argmax(n,0))
self.failUnless(numpy.all(v == numpy.max(data,0))) self.failUnless(numpy.all(v == numpy.max(data,0)))
self.failUnless(numpy.all(i == numpy.argmax(data,0))) self.failUnless(numpy.all(i == numpy.argmax(data,0)))
def test2_invalid(self): def test2_invalid(self):
n = as_tensor(numpy.random.rand(2,3)) n = as_ndarray_result(numpy.random.rand(2,3))
try: try:
eval_outputs(max_and_argmax(n,3)) eval_outputs(max_and_argmax(n,3))
except ValueError, e: except ValueError, e:
return return
self.fail() self.fail()
def test2_invalid_neg(self): def test2_invalid_neg(self):
n = as_tensor(numpy.random.rand(2,3)) n = as_ndarray_result(numpy.random.rand(2,3))
try: try:
eval_outputs(max_and_argmax(n,-3)) eval_outputs(max_and_argmax(n,-3))
except ValueError, e: except ValueError, e:
return return
self.fail() self.fail()
def test2_valid_neg(self): def test2_valid_neg(self):
n = as_tensor(numpy.random.rand(2,3)) n = as_ndarray_result(numpy.random.rand(2,3))
v,i = eval_outputs(max_and_argmax(n,-1)) v,i = eval_outputs(max_and_argmax(n,-1))
self.failUnless(v.shape == (2,)) self.failUnless(v.shape == (2,))
v,i = eval_outputs(max_and_argmax(n,-2)) v,i = eval_outputs(max_and_argmax(n,-2))
self.failUnless(v.shape == (3,)) self.failUnless(v.shape == (3,))
def test3(self): def test3(self):
n = as_tensor(numpy.random.rand(2,3,4)) n = as_ndarray_result(numpy.random.rand(2,3,4))
v,i = eval_outputs(max_and_argmax(n,0)) v,i = eval_outputs(max_and_argmax(n,0))
self.failUnless(v.shape == (3,4)) self.failUnless(v.shape == (3,4))
self.failUnless(i.shape == (3,4)) self.failUnless(i.shape == (3,4))
...@@ -674,7 +674,7 @@ class T_subtensor(unittest.TestCase): ...@@ -674,7 +674,7 @@ class T_subtensor(unittest.TestCase):
def test0_err_invalid(self): def test0_err_invalid(self):
#it is impossible to retrieve a view of a 0-d tensor #it is impossible to retrieve a view of a 0-d tensor
n = as_tensor(numpy.ones(())) n = as_ndarray_result(numpy.ones(()))
try: try:
t = n[0] t = n[0]
except ValueError, e: except ValueError, e:
...@@ -683,7 +683,7 @@ class T_subtensor(unittest.TestCase): ...@@ -683,7 +683,7 @@ class T_subtensor(unittest.TestCase):
self.fail() self.fail()
def test1_err_bounds(self): def test1_err_bounds(self):
n = as_tensor(numpy.ones(3)) n = as_ndarray_result(numpy.ones(3))
t = n[7] t = n[7]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
try: try:
...@@ -694,7 +694,7 @@ class T_subtensor(unittest.TestCase): ...@@ -694,7 +694,7 @@ class T_subtensor(unittest.TestCase):
return return
self.fail() self.fail()
def test1_err_subslice(self): def test1_err_subslice(self):
n = as_tensor(numpy.ones(3)) n = as_ndarray_result(numpy.ones(3))
try: try:
t = n[slice(0,slice(1,2,None),None)] t = n[slice(0,slice(1,2,None),None)]
except Exception, e: except Exception, e:
...@@ -704,21 +704,21 @@ class T_subtensor(unittest.TestCase): ...@@ -704,21 +704,21 @@ class T_subtensor(unittest.TestCase):
self.fail() self.fail()
def test1_ok_range_finite(self): def test1_ok_range_finite(self):
n = as_tensor(numpy.ones(3)*5) n = as_ndarray_result(numpy.ones(3)*5)
t = n[0:2] t = n[0:2]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t]) tval = eval_outputs([t])
self.failUnless(tval.shape == (2,)) self.failUnless(tval.shape == (2,))
self.failUnless(tval[1] == 5.0) self.failUnless(tval[1] == 5.0)
def test2_ok_range_finite(self): def test2_ok_range_finite(self):
n = as_tensor(numpy.ones((3,4))*5) n = as_ndarray_result(numpy.ones((3,4))*5)
t = n[0:2,3] t = n[0:2,3]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t]) tval = eval_outputs([t])
self.failUnless(tval.shape == (2,)) self.failUnless(tval.shape == (2,))
self.failUnless(tval[1] == 5.0) self.failUnless(tval[1] == 5.0)
def test1_err_invalid(self): def test1_err_invalid(self):
n = as_tensor(numpy.ones(1)) n = as_ndarray_result(numpy.ones(1))
try: try:
t = n[0,0] t = n[0,0]
except ValueError, e: except ValueError, e:
...@@ -726,7 +726,7 @@ class T_subtensor(unittest.TestCase): ...@@ -726,7 +726,7 @@ class T_subtensor(unittest.TestCase):
return return
self.fail() self.fail()
def test1_ok_elem(self): def test1_ok_elem(self):
n = as_tensor(numpy.ones(1)*5) n = as_ndarray_result(numpy.ones(1)*5)
t = n[0] t = n[0]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t]) tval = eval_outputs([t])
...@@ -734,14 +734,14 @@ class T_subtensor(unittest.TestCase): ...@@ -734,14 +734,14 @@ class T_subtensor(unittest.TestCase):
self.failUnless(tval == 5.0) self.failUnless(tval == 5.0)
def test1_ok_range_infinite(self): def test1_ok_range_infinite(self):
#Subtensor.debug = True #Subtensor.debug = True
n = as_tensor(numpy.ones(3)*5) n = as_ndarray_result(numpy.ones(3)*5)
t = n[1:] t = n[1:]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t]) tval = eval_outputs([t])
self.failUnless(tval.shape == (2,)) self.failUnless(tval.shape == (2,))
self.failUnless(tval[1] == 5.0) self.failUnless(tval[1] == 5.0)
def test1_ok_strided(self): def test1_ok_strided(self):
n = as_tensor(numpy.ones(5)*5) n = as_ndarray_result(numpy.ones(5)*5)
t = n[1::2] t = n[1::2]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t]) tval = eval_outputs([t])
...@@ -753,7 +753,7 @@ class T_subtensor(unittest.TestCase): ...@@ -753,7 +753,7 @@ class T_subtensor(unittest.TestCase):
self.failUnless(tval[1] == 5.0) self.failUnless(tval[1] == 5.0)
def test2_err_bounds0(self): def test2_err_bounds0(self):
n = as_tensor(numpy.ones((2,3))*5) n = as_ndarray_result(numpy.ones((2,3))*5)
t = n[0,4] t = n[0,4]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
try: try:
...@@ -762,7 +762,7 @@ class T_subtensor(unittest.TestCase): ...@@ -762,7 +762,7 @@ class T_subtensor(unittest.TestCase):
return return
self.fail() self.fail()
def test2_err_bounds1(self): def test2_err_bounds1(self):
n = as_tensor(numpy.ones((2,3))*5) n = as_ndarray_result(numpy.ones((2,3))*5)
t = n[4:5,2] t = n[4:5,2]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
try: try:
...@@ -771,14 +771,14 @@ class T_subtensor(unittest.TestCase): ...@@ -771,14 +771,14 @@ class T_subtensor(unittest.TestCase):
if e[0] != 'index out of bounds': if e[0] != 'index out of bounds':
raise raise
def test2_ok_elem(self): def test2_ok_elem(self):
n = as_tensor(numpy.asarray(range(6)).reshape((2,3))) n = as_ndarray_result(numpy.asarray(range(6)).reshape((2,3)))
t = n[0,2] t = n[0,2]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t]) tval = eval_outputs([t])
self.failUnless(tval.shape == ()) self.failUnless(tval.shape == ())
self.failUnless(numpy.all(tval == 2)) self.failUnless(numpy.all(tval == 2))
def test2_ok_row(self): def test2_ok_row(self):
n = as_tensor(numpy.asarray(range(6)).reshape((2,3))) n = as_ndarray_result(numpy.asarray(range(6)).reshape((2,3)))
t = n[1] t = n[1]
self.failIf(any(n.type.broadcastable)) self.failIf(any(n.type.broadcastable))
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
...@@ -787,7 +787,7 @@ class T_subtensor(unittest.TestCase): ...@@ -787,7 +787,7 @@ class T_subtensor(unittest.TestCase):
self.failUnless(numpy.all(tval == [3,4,5])) self.failUnless(numpy.all(tval == [3,4,5]))
def test2_ok_col(self): def test2_ok_col(self):
n = as_tensor(numpy.ones((2,3))*5) n = as_ndarray_result(numpy.ones((2,3))*5)
t = n[:,0] t = n[:,0]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
self.failIf(any(n.type.broadcastable)) self.failIf(any(n.type.broadcastable))
...@@ -796,7 +796,7 @@ class T_subtensor(unittest.TestCase): ...@@ -796,7 +796,7 @@ class T_subtensor(unittest.TestCase):
self.failUnless(numpy.all(tval == 5.0)) self.failUnless(numpy.all(tval == 5.0))
def test2_ok_rows_finite(self): def test2_ok_rows_finite(self):
n = as_tensor(numpy.ones((4,3))*5) n = as_ndarray_result(numpy.ones((4,3))*5)
t = n[1:3,0] t = n[1:3,0]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t]) tval = eval_outputs([t])
...@@ -804,7 +804,7 @@ class T_subtensor(unittest.TestCase): ...@@ -804,7 +804,7 @@ class T_subtensor(unittest.TestCase):
self.failUnless(numpy.all(tval == 5.0)) self.failUnless(numpy.all(tval == 5.0))
def test2_ok_cols_infinite(self): def test2_ok_cols_infinite(self):
n = as_tensor(numpy.asarray(range(12)).reshape((4,3))) n = as_ndarray_result(numpy.asarray(range(12)).reshape((4,3)))
t = n[1,2:] t = n[1,2:]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t]) tval = eval_outputs([t])
...@@ -812,7 +812,7 @@ class T_subtensor(unittest.TestCase): ...@@ -812,7 +812,7 @@ class T_subtensor(unittest.TestCase):
self.failUnless(numpy.all(tval == 5)) self.failUnless(numpy.all(tval == 5))
def test2_ok_strided(self): def test2_ok_strided(self):
n = as_tensor(numpy.asarray(range(20)).reshape((4,5))) n = as_ndarray_result(numpy.asarray(range(20)).reshape((4,5)))
t = n[1:4:2,1:5:2] t = n[1:4:2,1:5:2]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t]) tval = eval_outputs([t])
...@@ -820,7 +820,7 @@ class T_subtensor(unittest.TestCase): ...@@ -820,7 +820,7 @@ class T_subtensor(unittest.TestCase):
self.failUnless(numpy.all(tval == [[6, 8],[16, 18]])) self.failUnless(numpy.all(tval == [[6, 8],[16, 18]]))
def test3_ok_mat(self): def test3_ok_mat(self):
n = as_tensor(numpy.asarray(range(24)).reshape((2,3,4))) n = as_ndarray_result(numpy.asarray(range(24)).reshape((2,3,4)))
t = n[0,0,0] t = n[0,0,0]
self.failUnless(isinstance(t.owner.op, Subtensor)) self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t]) tval = eval_outputs([t])
...@@ -830,7 +830,7 @@ class T_subtensor(unittest.TestCase): ...@@ -830,7 +830,7 @@ class T_subtensor(unittest.TestCase):
def test_grad_1d(self): def test_grad_1d(self):
subi = 0 subi = 0
data = numpy.random.rand(2,3) data = numpy.random.rand(2,3)
n = as_tensor(data) n = as_ndarray_result(data)
z = scal.constant(subi) z = scal.constant(subi)
t = n[z:,z] t = n[z:,z]
gn = grad(sum(exp(t)), n) gn = grad(sum(exp(t)), n)
...@@ -841,7 +841,7 @@ class T_subtensor(unittest.TestCase): ...@@ -841,7 +841,7 @@ class T_subtensor(unittest.TestCase):
def test_grad_0d(self): def test_grad_0d(self):
data = numpy.random.rand(2,3) data = numpy.random.rand(2,3)
n = as_tensor(data) n = as_ndarray_result(data)
t = n[1,0] t = n[1,0]
gn = grad(sum(exp(t)), n) gn = grad(sum(exp(t)), n)
gval = eval_outputs([gn]) gval = eval_outputs([gn])
...@@ -857,7 +857,7 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -857,7 +857,7 @@ class T_Join_and_Split(unittest.TestCase):
class Join1(Op): class Join1(Op):
def make_node(self, *inputs): def make_node(self, *inputs):
inputs = [as_tensor(t) for t in inputs] inputs = [as_ndarray_result(t) for t in inputs]
outputs = [lscalar()] + [i.type() for i in inputs] outputs = [lscalar()] + [i.type() for i in inputs]
return Apply(self, inputs, outputs) return Apply(self, inputs, outputs)
def perform(self, node, inputs, outputs): def perform(self, node, inputs, outputs):
...@@ -871,8 +871,8 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -871,8 +871,8 @@ class T_Join_and_Split(unittest.TestCase):
Join.debug = False Join.debug = False
def test_join_scalar(self): def test_join_scalar(self):
a = as_tensor(1) a = as_ndarray_result(1)
b = as_tensor(2) b = as_ndarray_result(2)
try: try:
s = join(0, a, b) s = join(0, a, b)
except: except:
...@@ -880,18 +880,18 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -880,18 +880,18 @@ class T_Join_and_Split(unittest.TestCase):
self.fail() self.fail()
def test_stack_mixed_type_constants(self): def test_stack_mixed_type_constants(self):
a = as_tensor(1) a = as_ndarray_result(1)
b = as_tensor(2.0) b = as_ndarray_result(2.0)
c = as_tensor(3.0) c = as_ndarray_result(3.0)
s = stack(a, b, c) s = stack(a, b, c)
want = numpy.array([1, 2, 3]) want = numpy.array([1, 2, 3])
self.failUnless((eval_outputs([s]) == want).all()) self.failUnless((eval_outputs([s]) == want).all())
def test_stack_scalar(self): def test_stack_scalar(self):
a = as_tensor(1) a = as_ndarray_result(1)
b = as_tensor(2) b = as_ndarray_result(2)
c = as_tensor(3) c = as_ndarray_result(3)
s = stack(a, b, c) s = stack(a, b, c)
want = numpy.array([1, 2, 3]) want = numpy.array([1, 2, 3])
...@@ -899,24 +899,24 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -899,24 +899,24 @@ class T_Join_and_Split(unittest.TestCase):
def test_join_vector(self): def test_join_vector(self):
a = as_tensor(numpy.array([1, 2, 3])) a = as_ndarray_result(numpy.array([1, 2, 3]))
b = as_tensor(numpy.array([7, 8, 9])) b = as_ndarray_result(numpy.array([7, 8, 9]))
s = join(0, a, b) s = join(0, a, b)
want = numpy.array([1, 2, 3, 7, 8, 9]) want = numpy.array([1, 2, 3, 7, 8, 9])
self.failUnless((eval_outputs([s]) == want).all()) self.failUnless((eval_outputs([s]) == want).all())
def test_stack_vector(self): def test_stack_vector(self):
a = as_tensor(numpy.array([1, 2, 3])) a = as_ndarray_result(numpy.array([1, 2, 3]))
b = as_tensor(numpy.array([7, 8, 9])) b = as_ndarray_result(numpy.array([7, 8, 9]))
s = stack(a, b) s = stack(a, b)
want = numpy.array([[1, 2, 3],[ 7, 8, 9]]) want = numpy.array([[1, 2, 3],[ 7, 8, 9]])
self.failUnless((eval_outputs([s]) == want).all()) self.failUnless((eval_outputs([s]) == want).all())
def test_join_matrix0(self): def test_join_matrix0(self):
a = as_tensor(numpy.array([[1, 2, 3], [4, 5, 6]])) a = as_ndarray_result(numpy.array([[1, 2, 3], [4, 5, 6]]))
b = as_tensor(numpy.array([[7, 8, 9]])) b = as_ndarray_result(numpy.array([[7, 8, 9]]))
s = join(0, a, b) s = join(0, a, b)
want = numpy.array([[1, 2, 3],[4,5,6],[7, 8, 9]]) want = numpy.array([[1, 2, 3],[4,5,6],[7, 8, 9]])
...@@ -925,8 +925,8 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -925,8 +925,8 @@ class T_Join_and_Split(unittest.TestCase):
def test_join_matrix1(self): def test_join_matrix1(self):
av=numpy.array([[1, 2, 3], [4, 5, 6]], dtype='float32') av=numpy.array([[1, 2, 3], [4, 5, 6]], dtype='float32')
bv= numpy.array([[7], [8]],dtype='float32') bv= numpy.array([[7], [8]],dtype='float32')
a = as_tensor(av) a = as_ndarray_result(av)
b = as_tensor(bv) b = as_ndarray_result(bv)
s = join(1, a, b) s = join(1, a, b)
want = numpy.array([[1, 2, 3, 7], [4, 5, 6, 8]], dtype='float32') want = numpy.array([[1, 2, 3, 7], [4, 5, 6, 8]], dtype='float32')
self.failUnless((eval_outputs([s]) == want).all()) self.failUnless((eval_outputs([s]) == want).all())
...@@ -934,9 +934,9 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -934,9 +934,9 @@ class T_Join_and_Split(unittest.TestCase):
verify_grad(self, lambda a, b: join(1,a,b), [av, bv], eps=1.0e-4, tol=1.0e-3) verify_grad(self, lambda a, b: join(1,a,b), [av, bv], eps=1.0e-4, tol=1.0e-3)
def test_join_matrix1_using_vertical_stack(self): def test_join_matrix1_using_vertical_stack(self):
a = as_tensor(numpy.array([[1, 2, 3], [4, 5, 6]])) a = as_ndarray_result(numpy.array([[1, 2, 3], [4, 5, 6]]))
b = as_tensor(numpy.array([[7, 8, 9]])) b = as_ndarray_result(numpy.array([[7, 8, 9]]))
c = as_tensor(numpy.array([[9, 8, 7]])) c = as_ndarray_result(numpy.array([[9, 8, 7]]))
s = vertical_stack(a, b, c) s = vertical_stack(a, b, c)
want = numpy.array([[1, 2, 3],[4,5,6],[7, 8, 9], [9, 8, 7]]) want = numpy.array([[1, 2, 3],[4,5,6],[7, 8, 9], [9, 8, 7]])
...@@ -946,9 +946,9 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -946,9 +946,9 @@ class T_Join_and_Split(unittest.TestCase):
av=numpy.array([[1, 2, 3], [4, 5, 6]], dtype='float32') av=numpy.array([[1, 2, 3], [4, 5, 6]], dtype='float32')
bv=numpy.array([[7], [8]],dtype='float32') bv=numpy.array([[7], [8]],dtype='float32')
cv=numpy.array([[3, 2, 1], [6, 5, 4]], dtype='float32') cv=numpy.array([[3, 2, 1], [6, 5, 4]], dtype='float32')
a = as_tensor(av) a = as_ndarray_result(av)
b = as_tensor(bv) b = as_ndarray_result(bv)
c = as_tensor(cv) c = as_ndarray_result(cv)
s = horizontal_stack(a, b, c) s = horizontal_stack(a, b, c)
want = numpy.array([[1, 2, 3, 7, 3, 2, 1], [4, 5, 6, 8, 6, 5, 4]], dtype='float32') want = numpy.array([[1, 2, 3, 7, 3, 2, 1], [4, 5, 6, 8, 6, 5, 4]], dtype='float32')
self.failUnless((eval_outputs([s]) == want).all()) self.failUnless((eval_outputs([s]) == want).all())
...@@ -958,8 +958,8 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -958,8 +958,8 @@ class T_Join_and_Split(unittest.TestCase):
def test_join_matrixV(self): def test_join_matrixV(self):
"""variable join axis""" """variable join axis"""
v = numpy.array([[1., 2., 3.], [4., 5., 6.]]) v = numpy.array([[1., 2., 3.], [4., 5., 6.]])
a = as_tensor(v.copy()) a = as_ndarray_result(v.copy())
b = as_tensor(v.copy()) b = as_ndarray_result(v.copy())
ax = lscalar() ax = lscalar()
s = join(ax, a, b) s = join(ax, a, b)
...@@ -1108,12 +1108,12 @@ class T_exp(unittest.TestCase): ...@@ -1108,12 +1108,12 @@ class T_exp(unittest.TestCase):
# class T_abs(unittest.TestCase): # class T_abs(unittest.TestCase):
# def test_impl(self): # def test_impl(self):
# t = as_tensor(1.0) # t = as_ndarray_result(1.0)
# check_eq(self, t, abs(t), 1.0, 1.0) # check_eq(self, t, abs(t), 1.0, 1.0)
# check_eq(self, t, abs(t), -1.0, 1.0) # check_eq(self, t, abs(t), -1.0, 1.0)
# for shape in (2,), (3,4): # for shape in (2,), (3,4):
# t = as_tensor(numpy.ones(shape)) # t = as_ndarray_result(numpy.ones(shape))
# d = numpy.random.rand(*shape)*2-1.0 # d = numpy.random.rand(*shape)*2-1.0
# check_eq(self, t, abs(t), d, abs(d)) # check_eq(self, t, abs(t), d, abs(d))
# check_eq(self, t, abs(t), -d, abs(-d)) # check_eq(self, t, abs(t), -d, abs(-d))
...@@ -1148,7 +1148,7 @@ class T_exp(unittest.TestCase): ...@@ -1148,7 +1148,7 @@ class T_exp(unittest.TestCase):
# self.failUnless(numpy.all(eval_outputs([t]) == [9,9,9])) # self.failUnless(numpy.all(eval_outputs([t]) == [9,9,9]))
# def test1(self): # def test1(self):
# x = as_tensor(numpy.ones((4,5))) # x = as_ndarray_result(numpy.ones((4,5)))
# l = ones_like(x[:,0:1]) # l = ones_like(x[:,0:1])
# r = ones_like(x[0:1,:]) # r = ones_like(x[0:1,:])
# xx = x + dot(l,r) # xx = x + dot(l,r)
...@@ -1156,11 +1156,11 @@ class T_exp(unittest.TestCase): ...@@ -1156,11 +1156,11 @@ class T_exp(unittest.TestCase):
# class T_sum(unittest.TestCase): # class T_sum(unittest.TestCase):
# def test_impl(self): # def test_impl(self):
# t = as_tensor(0.0) # t = as_ndarray_result(0.0)
# check_eq(self, t, Sum(t).out, 1.0, 1.0) # check_eq(self, t, Sum(t).out, 1.0, 1.0)
# check_eq(self, t, Sum(t).out, -1.0, -1.0) # check_eq(self, t, Sum(t).out, -1.0, -1.0)
# t = as_tensor([0.0, 0.0]) # t = as_ndarray_result([0.0, 0.0])
# d = numpy.asarray([-0.4, 1.2]) # d = numpy.asarray([-0.4, 1.2])
# check_eq(self, t, Sum(t).out, d, numpy.sum(d)) # check_eq(self, t, Sum(t).out, d, numpy.sum(d))
# check_eq(self, t, Sum(t).out, -d, -numpy.sum(d)) # check_eq(self, t, Sum(t).out, -d, -numpy.sum(d))
...@@ -1170,13 +1170,13 @@ class T_exp(unittest.TestCase): ...@@ -1170,13 +1170,13 @@ class T_exp(unittest.TestCase):
# unittest_tools.seed_rng() # unittest_tools.seed_rng()
# def test_elemwise(self): # def test_elemwise(self):
# a = as_tensor(0.0) # a = as_ndarray_result(0.0)
# b = as_tensor(0.0) # b = as_ndarray_result(0.0)
# check_eq2_both(self, [a,b], mul(a,b), [3.0, 4.0], 12.0) # check_eq2_both(self, [a,b], mul(a,b), [3.0, 4.0], 12.0)
# check_eq2_both(self, [a,b], mul(b,a), [-1.0,2.0], -2.0) # check_eq2_both(self, [a,b], mul(b,a), [-1.0,2.0], -2.0)
# a = as_tensor(numpy.ones(2)) # a = as_ndarray_result(numpy.ones(2))
# b = as_tensor(numpy.ones(2)) # b = as_ndarray_result(numpy.ones(2))
# aa = numpy.asarray([-0.5, 4.0]) # aa = numpy.asarray([-0.5, 4.0])
# bb = numpy.asarray([-0.5, 2.0]) # bb = numpy.asarray([-0.5, 2.0])
# check_eq2_both(self, [a,b], mul(a,b), [aa,bb], numpy.asarray([0.25, 8.0])) # check_eq2_both(self, [a,b], mul(a,b), [aa,bb], numpy.asarray([0.25, 8.0]))
...@@ -1184,8 +1184,8 @@ class T_exp(unittest.TestCase): ...@@ -1184,8 +1184,8 @@ class T_exp(unittest.TestCase):
# def test_scalar(self): # def test_scalar(self):
# r = numpy.random.rand(2,3) # r = numpy.random.rand(2,3)
# a = as_tensor(r) # a = as_ndarray_result(r)
# b = as_tensor(2.0) # b = as_ndarray_result(2.0)
# check_eq2_both(self, [a,b], mul(a,b), [r, 2.0], r*2.0) # check_eq2_both(self, [a,b], mul(a,b), [r, 2.0], r*2.0)
# check_eq2_both(self, [a,b], mul(a,b), [r, 4.0], r*4.0) # check_eq2_both(self, [a,b], mul(a,b), [r, 4.0], r*4.0)
# self.failUnless(b.data == 2.0) # self.failUnless(b.data == 2.0)
...@@ -1194,7 +1194,7 @@ class T_exp(unittest.TestCase): ...@@ -1194,7 +1194,7 @@ class T_exp(unittest.TestCase):
# r1 = numpy.random.rand(3,5) # r1 = numpy.random.rand(3,5)
# r2 = numpy.random.rand(1,5) # r2 = numpy.random.rand(1,5)
# r3 = numpy.random.rand(3,1) # r3 = numpy.random.rand(3,1)
# a1, a2, a3 = as_tensor(r1), as_tensor(r2), as_tensor(r3) # a1, a2, a3 = as_ndarray_result(r1), as_ndarray_result(r2), as_ndarray_result(r3)
# check_eq2_both(self, [a1,a2], mul(a1,a2), [r1, r2], r1*r2) # check_eq2_both(self, [a1,a2], mul(a1,a2), [r1, r2], r1*r2)
# check_eq2_both(self, [a1,a3], mul(a1,a3), [r1, r3], r1*r3) # check_eq2_both(self, [a1,a3], mul(a1,a3), [r1, r3], r1*r3)
...@@ -1213,8 +1213,8 @@ class T_exp(unittest.TestCase): ...@@ -1213,8 +1213,8 @@ class T_exp(unittest.TestCase):
# verify_grad(self, Mul, [numpy.random.rand(3, 5), numpy.random.rand(3, 1)]) # verify_grad(self, Mul, [numpy.random.rand(3, 5), numpy.random.rand(3, 1)])
# def test_wrong_shapes(self): # def test_wrong_shapes(self):
# a = as_tensor(numpy.ones(3)) # a = as_ndarray_result(numpy.ones(3))
# b = as_tensor(numpy.ones(4)) # b = as_ndarray_result(numpy.ones(4))
# try: # try:
# check_eq2(self, [a,b], Mul(a,b).out, # check_eq2(self, [a,b], Mul(a,b).out,
# [numpy.ones(3), numpy.ones(4)], 1.0) # [numpy.ones(3), numpy.ones(4)], 1.0)
...@@ -1253,8 +1253,8 @@ class T_exp(unittest.TestCase): ...@@ -1253,8 +1253,8 @@ class T_exp(unittest.TestCase):
# def test0(self): # def test0(self):
# verify_grad(self, Log, [numpy.random.rand(3,1)+0.0001]) # verify_grad(self, Log, [numpy.random.rand(3,1)+0.0001])
# def test1(self): # def test1(self):
# a = as_tensor(numpy.ones(2)) # a = as_ndarray_result(numpy.ones(2))
# b = as_tensor(numpy.ones(2)) # b = as_ndarray_result(numpy.ones(2))
# aa = numpy.asarray([0.5, 4.0]) # aa = numpy.asarray([0.5, 4.0])
# bb = numpy.asarray([0.5, 2.0]) # bb = numpy.asarray([0.5, 2.0])
# check_eq2(self, [a], log(a), [aa], numpy.log(numpy.asarray(aa))) # check_eq2(self, [a], log(a), [aa], numpy.log(numpy.asarray(aa)))
...@@ -1283,12 +1283,12 @@ class test_matinv(unittest.TestCase): ...@@ -1283,12 +1283,12 @@ class test_matinv(unittest.TestCase):
# symbolic program # symbolic program
# broadcastable=[False,False] means that the shape of matrix is two dimensional, # broadcastable=[False,False] means that the shape of matrix is two dimensional,
# and none of the dimensions are constrained to have length 1. # and none of the dimensions are constrained to have length 1.
# Note that Tensor's constructor does not actually allocate any memory. # Note that NDArrayType's constructor does not actually allocate any memory.
# TODO: Make Tensor syntax more explicit, and maybe give shape or number of dimensions. # TODO: Make NDArrayType syntax more explicit, and maybe give shape or number of dimensions.
a, b = matrices('ab') a, b = matrices('ab')
ab = a*b ab = a*b
# Here, as_tensor actually uses the data allocated by numpy. # Here, as_ndarray_result actually uses the data allocated by numpy.
diff = ab - as_tensor(numpy.ones((dim,dim))) diff = ab - as_ndarray_result(numpy.ones((dim,dim)))
# Sum of squared errors # Sum of squared errors
ssdiff = sum((diff**2.0)) ssdiff = sum((diff**2.0))
...@@ -1339,7 +1339,7 @@ class t_dot(unittest.TestCase): ...@@ -1339,7 +1339,7 @@ class t_dot(unittest.TestCase):
x = numpy.asarray(x) x = numpy.asarray(x)
return type(x), x.dtype, x.shape return type(x), x.dtype, x.shape
nz = numpy.dot(x,y) nz = numpy.dot(x,y)
tz = eval_outputs([dot(as_tensor(x), as_tensor(y))]) tz = eval_outputs([dot(as_ndarray_result(x), as_ndarray_result(y))])
self.failUnless(tz.dtype == nz.dtype) self.failUnless(tz.dtype == nz.dtype)
self.failUnless(tz.shape == nz.shape) self.failUnless(tz.shape == nz.shape)
self.failUnless(_approx_eq(nz, tz)) self.failUnless(_approx_eq(nz, tz))
...@@ -1406,7 +1406,7 @@ class T_tensorfromscalar(unittest.TestCase): ...@@ -1406,7 +1406,7 @@ class T_tensorfromscalar(unittest.TestCase):
def test1(self): def test1(self):
s = scal.constant(56) s = scal.constant(56)
t = as_tensor(s) t = as_ndarray_result(s)
self.failUnless(t.owner.op is tensor_from_scalar) self.failUnless(t.owner.op is tensor_from_scalar)
self.failUnless(t.type.broadcastable == (), t.type.broadcastable) self.failUnless(t.type.broadcastable == (), t.type.broadcastable)
self.failUnless(t.type.ndim == 0, t.type.ndim) self.failUnless(t.type.ndim == 0, t.type.ndim)
...@@ -1420,13 +1420,13 @@ class T_tensorfromscalar(unittest.TestCase): ...@@ -1420,13 +1420,13 @@ class T_tensorfromscalar(unittest.TestCase):
# def _tensor(data, broadcastable=None, name=None): # def _tensor(data, broadcastable=None, name=None):
# """Return a Tensor containing given data""" # """Return a NDArrayType containing given data"""
# data = numpy.asarray(data) # data = numpy.asarray(data)
# if broadcastable is None: # if broadcastable is None:
# broadcastable = [s==1 for s in data.shape] # broadcastable = [s==1 for s in data.shape]
# elif broadcastable in [0, 1]: # elif broadcastable in [0, 1]:
# broadcastable = [broadcastable] * len(data.shape) # broadcastable = [broadcastable] * len(data.shape)
# rval = Tensor(data.dtype, broadcastable, name) # rval = NDArrayType(data.dtype, broadcastable, name)
# rval.data = data # will raise if broadcastable was mis-specified # rval.data = data # will raise if broadcastable was mis-specified
# return rval # return rval
...@@ -1437,7 +1437,7 @@ class T_tensorfromscalar(unittest.TestCase): ...@@ -1437,7 +1437,7 @@ class T_tensorfromscalar(unittest.TestCase):
# unittest_tools.seed_rng() # unittest_tools.seed_rng()
# def test0(self): # allocate from a scalar float # def test0(self): # allocate from a scalar float
# t = _tensor(1.0) # t = _tensor(1.0)
# self.failUnless(isinstance(t, Tensor)) # self.failUnless(isinstance(t, NDArrayType))
# self.failUnless(t.dtype == 'float64') # self.failUnless(t.dtype == 'float64')
# self.failUnless(t.broadcastable == ()) # self.failUnless(t.broadcastable == ())
# self.failUnless(t.role == None) # self.failUnless(t.role == None)
...@@ -1446,25 +1446,25 @@ class T_tensorfromscalar(unittest.TestCase): ...@@ -1446,25 +1446,25 @@ class T_tensorfromscalar(unittest.TestCase):
# self.failUnless(t.data == 1.0) # self.failUnless(t.data == 1.0)
# def test0_int(self): # allocate from a scalar float # def test0_int(self): # allocate from a scalar float
# t = _tensor(1) # t = _tensor(1)
# self.failUnless(isinstance(t, Tensor)) # self.failUnless(isinstance(t, NDArrayType))
# self.failUnless(t.dtype == 'int64' or t.dtype == 'int32') # self.failUnless(t.dtype == 'int64' or t.dtype == 'int32')
# def test1(self): # allocate from a vector of ints, not broadcastable # def test1(self): # allocate from a vector of ints, not broadcastable
# t = _tensor(numpy.ones(5,dtype='int32')) # t = _tensor(numpy.ones(5,dtype='int32'))
# self.failUnless(isinstance(t, Tensor)) # self.failUnless(isinstance(t, NDArrayType))
# self.failUnless(t.dtype == 'int32') # self.failUnless(t.dtype == 'int32')
# self.failUnless(t.broadcastable == (0,)) # self.failUnless(t.broadcastable == (0,))
# self.failUnless(isinstance(t.data, numpy.ndarray)) # self.failUnless(isinstance(t.data, numpy.ndarray))
# self.failUnless(str(t.data.dtype) == 'int32') # self.failUnless(str(t.data.dtype) == 'int32')
# def test2(self): # allocate from a column matrix of complex with name # def test2(self): # allocate from a column matrix of complex with name
# t = _tensor(numpy.ones((5,1),dtype='complex64'),name='bart') # t = _tensor(numpy.ones((5,1),dtype='complex64'),name='bart')
# self.failUnless(isinstance(t, Tensor)) # self.failUnless(isinstance(t, NDArrayType))
# self.failUnless(t.dtype == 'complex64') # self.failUnless(t.dtype == 'complex64')
# self.failUnless(t.broadcastable == (0,1)) # self.failUnless(t.broadcastable == (0,1))
# self.failUnless(isinstance(t.data, numpy.ndarray)) # self.failUnless(isinstance(t.data, numpy.ndarray))
# self.failUnless(t.name == 'bart') # self.failUnless(t.name == 'bart')
# def test2b(self): # allocate from a column matrix, not broadcastable # def test2b(self): # allocate from a column matrix, not broadcastable
# t = _tensor(numpy.ones((5,1),dtype='complex64'),broadcastable=0) # t = _tensor(numpy.ones((5,1),dtype='complex64'),broadcastable=0)
# self.failUnless(isinstance(t, Tensor)) # self.failUnless(isinstance(t, NDArrayType))
# self.failUnless(t.dtype == 'complex64') # self.failUnless(t.dtype == 'complex64')
# self.failUnless(t.broadcastable == (0,0)) # self.failUnless(t.broadcastable == (0,0))
# self.failUnless(isinstance(t.data, numpy.ndarray)) # self.failUnless(isinstance(t.data, numpy.ndarray))
...@@ -1484,39 +1484,39 @@ class T_tensorfromscalar(unittest.TestCase): ...@@ -1484,39 +1484,39 @@ class T_tensorfromscalar(unittest.TestCase):
# t.data = numpy.ones((2,7,1)) # t.data = numpy.ones((2,7,1))
# self.fail() # self.fail()
# except ValueError, e: # except ValueError, e:
# self.failUnless(e[0] is Tensor.filter.E_rank) # self.failUnless(e[0] is NDArrayType.filter.E_rank)
# try: # try:
# t.data = numpy.ones(1) # t.data = numpy.ones(1)
# self.fail() # self.fail()
# except ValueError, e: # except ValueError, e:
# self.failUnless(e[0] is Tensor.filter.E_rank) # self.failUnless(e[0] is NDArrayType.filter.E_rank)
# def test_data_badrank1(self): # def test_data_badrank1(self):
# t = _tensor(numpy.ones((1,1),dtype='complex64'), broadcastable=1) # t = _tensor(numpy.ones((1,1),dtype='complex64'), broadcastable=1)
# try: # try:
# t.data = numpy.ones((1,1,1)) # t.data = numpy.ones((1,1,1))
# self.fail() # self.fail()
# except ValueError, e: # except ValueError, e:
# self.failUnless(e[0] is Tensor.filter.E_rank) # self.failUnless(e[0] is NDArrayType.filter.E_rank)
# try: # try:
# t.data = numpy.ones(1) # t.data = numpy.ones(1)
# self.fail() # self.fail()
# except ValueError, e: # except ValueError, e:
# self.failUnless(e[0] is Tensor.filter.E_rank) # self.failUnless(e[0] is NDArrayType.filter.E_rank)
# def test_data_badshape0(self): # def test_data_badshape0(self):
# t = _tensor(numpy.ones((1,1),dtype='complex64'), broadcastable=1) # t = _tensor(numpy.ones((1,1),dtype='complex64'), broadcastable=1)
# try: # try:
# t.data = numpy.ones((1,2)) # t.data = numpy.ones((1,2))
# self.fail() # self.fail()
# except ValueError, e: # except ValueError, e:
# self.failUnless(e[0] is Tensor.filter.E_shape) # self.failUnless(e[0] is NDArrayType.filter.E_shape)
# try: # try:
# t.data = numpy.ones((0,1)) # t.data = numpy.ones((0,1))
# self.fail() # self.fail()
# except ValueError, e: # except ValueError, e:
# self.failUnless(e[0] is Tensor.filter.E_shape) # self.failUnless(e[0] is NDArrayType.filter.E_shape)
# def test_cast0(self): # def test_cast0(self):
# t = Tensor('float32', [0]) # t = NDArrayType('float32', [0])
# t.data = numpy.random.rand(4) > 0.5 # t.data = numpy.random.rand(4) > 0.5
# self.failUnless(str(t.data.dtype) == t.dtype) # self.failUnless(str(t.data.dtype) == t.dtype)
...@@ -1585,7 +1585,7 @@ class test_grad(unittest.TestCase): ...@@ -1585,7 +1585,7 @@ class test_grad(unittest.TestCase):
o = test_grad.O() o = test_grad.O()
a1 = o.make_node() a1 = o.make_node()
g = grad(a1.outputs[0], a1.outputs[1]) g = grad(a1.outputs[0], a1.outputs[1])
self.failUnless(isinstance(g, TensorConstant)) self.failUnless(isinstance(g, NDArrayConstant))
self.failUnless(g.data == 0) self.failUnless(g.data == 0)
try: try:
grad(a1.outputs[0], 'wtf') grad(a1.outputs[0], 'wtf')
...@@ -1600,7 +1600,7 @@ class test_grad(unittest.TestCase): ...@@ -1600,7 +1600,7 @@ class test_grad(unittest.TestCase):
g0,g1,g2 = grad(a1.outputs[0], a1.inputs + [scalar('z')]) g0,g1,g2 = grad(a1.outputs[0], a1.inputs + [scalar('z')])
self.failUnless(o.gval0 is g0) self.failUnless(o.gval0 is g0)
self.failUnless(o.gval1 is g1) self.failUnless(o.gval1 is g1)
self.failUnless(isinstance(g2, TensorConstant)) self.failUnless(isinstance(g2, NDArrayConstant))
self.failUnless(g2.data == 0) self.failUnless(g2.data == 0)
class T_op_cache(unittest.TestCase): class T_op_cache(unittest.TestCase):
...@@ -1703,7 +1703,7 @@ def test_flatten_outdim2(): ...@@ -1703,7 +1703,7 @@ def test_flatten_outdim2():
tensor.verify_grad(None, Flatten(2), [a_val]) tensor.verify_grad(None, Flatten(2), [a_val])
def test_flatten_outdim2_of_3(): def test_flatten_outdim2_of_3():
a = Tensor('float64', (False, False, False))() a = NDArrayType('float64', (False, False, False))()
c = flatten(a, 2) c = flatten(a, 2)
f = inplace_func([a], c) f = inplace_func([a], c)
a_val = numpy.asarray([[[0,1],[2,3]], [[4,5],[6,7]]], dtype='float64') a_val = numpy.asarray([[[0,1],[2,3]], [[4,5],[6,7]]], dtype='float64')
...@@ -1774,7 +1774,7 @@ class test_tensordot(unittest.TestCase): ...@@ -1774,7 +1774,7 @@ class test_tensordot(unittest.TestCase):
tensor.verify_grad(None, TensorDot(axes), [aval,bval]) tensor.verify_grad(None, TensorDot(axes), [aval,bval])
# test ndarray-matrix, sum over one dim of matrix # test ndarray-matrix, sum over one dim of matrix
atens = Tensor('float64', broadcastable=(False,)*4)() atens = NDArrayType('float64', broadcastable=(False,)*4)()
axes = ((2,),(1,)) axes = ((2,),(1,))
c = tensordot(axes)(atens, bmat) c = tensordot(axes)(atens, bmat)
f4 = inplace_func([atens,bmat],c) f4 = inplace_func([atens,bmat],c)
...@@ -1785,8 +1785,8 @@ class test_tensordot(unittest.TestCase): ...@@ -1785,8 +1785,8 @@ class test_tensordot(unittest.TestCase):
tensor.verify_grad(None, TensorDot(axes), [aval,bval]) tensor.verify_grad(None, TensorDot(axes), [aval,bval])
# test ndarray-ndarray # test ndarray-ndarray
atens = Tensor('float64', broadcastable=(False,)*4)() atens = NDArrayType('float64', broadcastable=(False,)*4)()
btens = Tensor('float64', broadcastable=(False,)*3)() btens = NDArrayType('float64', broadcastable=(False,)*3)()
axes = ((1,3),(0,2)) axes = ((1,3),(0,2))
c = tensordot(axes)(atens, btens) c = tensordot(axes)(atens, btens)
f5 = inplace_func([atens,btens],c) f5 = inplace_func([atens,btens],c)
......
...@@ -12,7 +12,7 @@ _as_scalar = GemmLocalOptimizer._as_scalar ...@@ -12,7 +12,7 @@ _as_scalar = GemmLocalOptimizer._as_scalar
_is_real_matrix = GemmLocalOptimizer._is_real_matrix _is_real_matrix = GemmLocalOptimizer._is_real_matrix
from theano import In, Out from theano import In, Out
from .test_basic import (_approx_eq, as_tensor, inplace_func, from .test_basic import (_approx_eq, as_ndarray_result, inplace_func,
compile, value, constant, inplace, eval_outputs) compile, value, constant, inplace, eval_outputs)
class t_gemm(TestCase): class t_gemm(TestCase):
...@@ -35,7 +35,7 @@ class t_gemm(TestCase): ...@@ -35,7 +35,7 @@ class t_gemm(TestCase):
def cmp_linker(z, a, x, y, b, l): def cmp_linker(z, a, x, y, b, l):
z,a,x,y,b = [numpy.asarray(p) for p in z,a,x,y,b] z,a,x,y,b = [numpy.asarray(p) for p in z,a,x,y,b]
z_orig = z.copy() z_orig = z.copy()
tz,ta,tx,ty,tb = [as_tensor(p).type() for p in z,a,x,y,b] tz,ta,tx,ty,tb = [as_ndarray_result(p).type() for p in z,a,x,y,b]
f = inplace_func([tz,ta,tx,ty,tb], gemm(tz,ta,tx,ty,tb), mode=compile.Mode(optimizer = None, linker = l)) f = inplace_func([tz,ta,tx,ty,tb], gemm(tz,ta,tx,ty,tb), mode=compile.Mode(optimizer = None, linker = l))
new_z = f(z,a,x,y,b) new_z = f(z,a,x,y,b)
...@@ -100,7 +100,7 @@ class t_gemm(TestCase): ...@@ -100,7 +100,7 @@ class t_gemm(TestCase):
def test_destroy_map0(self): def test_destroy_map0(self):
"""test that only first input can be overwritten""" """test that only first input can be overwritten"""
Z = as_tensor(self.rand(2,2)) Z = as_ndarray_result(self.rand(2,2))
try: try:
gemm(Z, 1.0, Z, Z, 1.0) gemm(Z, 1.0, Z, Z, 1.0)
except ValueError, e: except ValueError, e:
...@@ -109,8 +109,8 @@ class t_gemm(TestCase): ...@@ -109,8 +109,8 @@ class t_gemm(TestCase):
self.fail() self.fail()
def test_destroy_map1(self): def test_destroy_map1(self):
"""test that only first input can be overwritten""" """test that only first input can be overwritten"""
Z = as_tensor(self.rand(2,2)) Z = as_ndarray_result(self.rand(2,2))
A = as_tensor(self.rand(2,2)) A = as_ndarray_result(self.rand(2,2))
try: try:
gemm(Z, 1.0, A, inplace.transpose_inplace(Z), 1.0) gemm(Z, 1.0, A, inplace.transpose_inplace(Z), 1.0)
except ValueError, e: except ValueError, e:
...@@ -119,8 +119,8 @@ class t_gemm(TestCase): ...@@ -119,8 +119,8 @@ class t_gemm(TestCase):
self.fail() self.fail()
def test_destroy_map2(self): def test_destroy_map2(self):
"""test that only first input can be overwritten""" """test that only first input can be overwritten"""
Z = as_tensor(self.rand(2,2)) Z = as_ndarray_result(self.rand(2,2))
A = as_tensor(self.rand(2,2)) A = as_ndarray_result(self.rand(2,2))
try: try:
gemm(Z, 1.0, inplace.transpose_inplace(Z), A, 1.0) gemm(Z, 1.0, inplace.transpose_inplace(Z), A, 1.0)
except ValueError, e: except ValueError, e:
...@@ -129,8 +129,8 @@ class t_gemm(TestCase): ...@@ -129,8 +129,8 @@ class t_gemm(TestCase):
self.fail() self.fail()
def test_destroy_map3(self): def test_destroy_map3(self):
"""test that only first input can be overwritten""" """test that only first input can be overwritten"""
Z = as_tensor(self.rand(2,2)) Z = as_ndarray_result(self.rand(2,2))
A = as_tensor(self.rand(2,2)) A = as_ndarray_result(self.rand(2,2))
try: try:
gemm(Z, 1.0, Z, A, 1.0) gemm(Z, 1.0, Z, A, 1.0)
except ValueError, e: except ValueError, e:
......
...@@ -27,7 +27,7 @@ class test_DimShuffle(unittest.TestCase): ...@@ -27,7 +27,7 @@ class test_DimShuffle(unittest.TestCase):
((1, 4, 3, 2, 1), (3, 2, 1), (2, 3, 4)), ((1, 4, 3, 2, 1), (3, 2, 1), (2, 3, 4)),
((1, 1, 4), (1, 2), (1, 4))]: ((1, 1, 4), (1, 2), (1, 4))]:
ib = [(entry == 1) for entry in xsh] ib = [(entry == 1) for entry in xsh]
x = Tensor('float64', ib)('x') x = NDArrayType('float64', ib)('x')
e = DimShuffle(ib, shuffle)(x) e = DimShuffle(ib, shuffle)(x)
f = copy(linker).accept(Env([x], [e])).make_function() f = copy(linker).accept(Env([x], [e])).make_function()
assert f(numpy.ones(xsh)).shape == zsh assert f(numpy.ones(xsh)).shape == zsh
...@@ -50,8 +50,8 @@ class test_Broadcast(unittest.TestCase): ...@@ -50,8 +50,8 @@ class test_Broadcast(unittest.TestCase):
((2, 3, 4, 5), (1, 3, 1, 5)), ((2, 3, 4, 5), (1, 3, 1, 5)),
((2, 3, 4, 5), (1, 1, 1, 1)), ((2, 3, 4, 5), (1, 1, 1, 1)),
((), ())]: ((), ())]:
x = Tensor('float64', [(entry == 1) for entry in xsh])('x') x = NDArrayType('float64', [(entry == 1) for entry in xsh])('x')
y = Tensor('float64', [(entry == 1) for entry in ysh])('y') y = NDArrayType('float64', [(entry == 1) for entry in ysh])('y')
e = Elemwise(add)(x, y) e = Elemwise(add)(x, y)
f = copy(linker).accept(Env([x, y], [e])).make_function() f = copy(linker).accept(Env([x, y], [e])).make_function()
xv = numpy.asarray(numpy.random.rand(*xsh)) xv = numpy.asarray(numpy.random.rand(*xsh))
...@@ -69,8 +69,8 @@ class test_Broadcast(unittest.TestCase): ...@@ -69,8 +69,8 @@ class test_Broadcast(unittest.TestCase):
((2, 3, 4, 5), (1, 3, 1, 5)), ((2, 3, 4, 5), (1, 3, 1, 5)),
((2, 3, 4, 5), (1, 1, 1, 1)), ((2, 3, 4, 5), (1, 1, 1, 1)),
((), ())]: ((), ())]:
x = Tensor('float64', [(entry == 1) for entry in xsh])('x') x = NDArrayType('float64', [(entry == 1) for entry in xsh])('x')
y = Tensor('float64', [(entry == 1) for entry in ysh])('y') y = NDArrayType('float64', [(entry == 1) for entry in ysh])('y')
e = Elemwise(Add(transfer_type(0)), {0:0})(x, y) e = Elemwise(Add(transfer_type(0)), {0:0})(x, y)
f = copy(linker).accept(Env([x, y], [e])).make_function() f = copy(linker).accept(Env([x, y], [e])).make_function()
xv = numpy.asarray(numpy.random.rand(*xsh)) xv = numpy.asarray(numpy.random.rand(*xsh))
...@@ -94,8 +94,8 @@ class test_Broadcast(unittest.TestCase): ...@@ -94,8 +94,8 @@ class test_Broadcast(unittest.TestCase):
self.with_linker_inplace(gof.CLinker()) self.with_linker_inplace(gof.CLinker())
def test_fill(self): def test_fill(self):
x = Tensor('float64', [0, 0])('x') x = NDArrayType('float64', [0, 0])('x')
y = Tensor('float64', [1, 1])('y') y = NDArrayType('float64', [1, 1])('y')
e = Elemwise(Second(transfer_type(0)), {0:0})(x, y) e = Elemwise(Second(transfer_type(0)), {0:0})(x, y)
f = gof.CLinker().accept(Env([x, y], [e])).make_function() f = gof.CLinker().accept(Env([x, y], [e])).make_function()
xv = numpy.ones((5, 5)) xv = numpy.ones((5, 5))
...@@ -104,8 +104,8 @@ class test_Broadcast(unittest.TestCase): ...@@ -104,8 +104,8 @@ class test_Broadcast(unittest.TestCase):
assert (xv == yv).all() assert (xv == yv).all()
def test_weird_strides(self): def test_weird_strides(self):
x = Tensor('float64', [0, 0, 0, 0, 0])('x') x = NDArrayType('float64', [0, 0, 0, 0, 0])('x')
y = Tensor('float64', [0, 0, 0, 0, 0])('y') y = NDArrayType('float64', [0, 0, 0, 0, 0])('y')
e = Elemwise(add)(x, y) e = Elemwise(add)(x, y)
f = gof.CLinker().accept(Env([x, y], [e])).make_function() f = gof.CLinker().accept(Env([x, y], [e])).make_function()
xv = numpy.random.rand(2, 2, 2, 2, 2) xv = numpy.random.rand(2, 2, 2, 2, 2)
...@@ -114,7 +114,7 @@ class test_Broadcast(unittest.TestCase): ...@@ -114,7 +114,7 @@ class test_Broadcast(unittest.TestCase):
assert (f(xv, yv) == zv).all() assert (f(xv, yv) == zv).all()
def test_same_inputs(self): def test_same_inputs(self):
x = Tensor('float64', [0, 0])('x') x = NDArrayType('float64', [0, 0])('x')
e = Elemwise(add)(x, x) e = Elemwise(add)(x, x)
f = gof.CLinker().accept(Env([x], [e])).make_function() f = gof.CLinker().accept(Env([x], [e])).make_function()
xv = numpy.random.rand(2, 2) xv = numpy.random.rand(2, 2)
...@@ -134,7 +134,7 @@ class test_CAReduce(unittest.TestCase): ...@@ -134,7 +134,7 @@ class test_CAReduce(unittest.TestCase):
((5, 6), ()), ((5, 6), ()),
((2, 3, 4, 5), (0, 1, 3)), ((2, 3, 4, 5), (0, 1, 3)),
((), ())]: ((), ())]:
x = Tensor('float64', [(entry == 1) for entry in xsh])('x') x = NDArrayType('float64', [(entry == 1) for entry in xsh])('x')
e = CAReduce(add, axis = tosum)(x) e = CAReduce(add, axis = tosum)(x)
if tosum is None: tosum = range(len(xsh)) if tosum is None: tosum = range(len(xsh))
f = copy(linker).accept(Env([x], [e])).make_function() f = copy(linker).accept(Env([x], [e])).make_function()
......
...@@ -63,7 +63,7 @@ def test_merge_with_weird_eq(): ...@@ -63,7 +63,7 @@ def test_merge_with_weird_eq():
assert node.inputs[0] is node.inputs[1] assert node.inputs[0] is node.inputs[1]
#NONSCALAR CASE #NONSCALAR CASE
# This was created to test TensorConstantSignature # This was created to test NDArrayConstantSignature
x = T.constant(numpy.ones(5), name='x') x = T.constant(numpy.ones(5), name='x')
y = T.constant(numpy.ones(5), name='y') y = T.constant(numpy.ones(5), name='y')
g = Env([x, y], [x+y]) g = Env([x, y], [x+y])
......
...@@ -6,7 +6,7 @@ import unittest ...@@ -6,7 +6,7 @@ import unittest
from theano import gof from theano import gof
from theano.tensor.opt import * from theano.tensor.opt import *
from theano import tensor from theano import tensor
from theano.tensor import Tensor from theano.tensor import NDArrayType
from theano.gof import Env from theano.gof import Env
from theano.tensor.elemwise import DimShuffle from theano.tensor.elemwise import DimShuffle
from theano import pprint from theano import pprint
...@@ -18,9 +18,9 @@ from theano import function ...@@ -18,9 +18,9 @@ from theano import function
def inputs(xbc = (0, 0), ybc = (0, 0), zbc = (0, 0)): def inputs(xbc = (0, 0), ybc = (0, 0), zbc = (0, 0)):
x = Tensor(broadcastable = xbc, dtype = 'float64')('x') x = NDArrayType(broadcastable = xbc, dtype = 'float64')('x')
y = Tensor(broadcastable = ybc, dtype = 'float64')('y') y = NDArrayType(broadcastable = ybc, dtype = 'float64')('y')
z = Tensor(broadcastable = zbc, dtype = 'float64')('z') z = NDArrayType(broadcastable = zbc, dtype = 'float64')('z')
return x, y, z return x, y, z
......
...@@ -3,7 +3,7 @@ from theano.tensor.xlogx import xlogx ...@@ -3,7 +3,7 @@ from theano.tensor.xlogx import xlogx
import unittest import unittest
import theano import theano
from theano.tensor import as_tensor from theano.tensor import as_ndarray_result
import test_basic as TT import test_basic as TT
import random import random
...@@ -15,7 +15,7 @@ class T_XlogX(unittest.TestCase): ...@@ -15,7 +15,7 @@ class T_XlogX(unittest.TestCase):
unittest_tools.seed_rng() unittest_tools.seed_rng()
def test0(self): def test0(self):
x = as_tensor([1, 0]) x = as_ndarray_result([1, 0])
y = xlogx(x) y = xlogx(x)
f = theano.function([], [y]) f = theano.function([], [y])
self.failUnless(numpy.all(f() == numpy.asarray([0, 0.]))) self.failUnless(numpy.all(f() == numpy.asarray([0, 0.])))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论