提交 67091013 authored 作者: Olivier Breuleux's avatar Olivier Breuleux

moved Tensor -> NDArrayType, TensorResult -> NDArrayResult, as_tensor ->…

moved Tensor -> NDArrayType, TensorResult -> NDArrayResult, as_tensor -> as_tensor_result, Sparse -> SparseType, as_sparse -> as_sparse_result, closes #243
上级 55c5d0b3
......@@ -22,7 +22,7 @@ class BROKEN_ON_PURPOSE_StructuredDotCSC(gof.Op):
def __hash__(self):
return 29834 ^ hash(type(self)) ^ hash(self.py_offset)
def make_node(self, a_val, a_ind, a_ptr, a_nrows, b):
a_nrows = theano.tensor.as_tensor(a_nrows)
a_nrows = theano.tensor.as_ndarray_result(a_nrows)
assert a_val.type.dtype == b.type.dtype
r = gof.Apply(self, [a_val, a_ind, a_ptr, a_nrows, b],
[theano.tensor.tensor(a_val.type.dtype, (False, False))])
......
......@@ -18,7 +18,7 @@ class StochasticGradientDescent(module.FancyModule):
def __init__(self, args, cost, params, gradients=None, stepsize=None, WEIRD_STUFF=True):
"""
:param stepsize: the step to take in (negative) gradient direction
:type stepsize: None, scalar value, or scalar TensorResult
:type stepsize: None, scalar value, or scalar NDArrayResult
"""
super(StochasticGradientDescent, self).__init__()
self.WEIRD_STUFF = WEIRD_STUFF
......@@ -26,7 +26,7 @@ class StochasticGradientDescent(module.FancyModule):
if stepsize is None:
self.stepsize = (T.dscalar())
elif isinstance(stepsize, T.TensorResult):
elif isinstance(stepsize, T.NDArrayResult):
self.stepsize = stepsize
else:
if self.WEIRD_STUFF:
......@@ -89,9 +89,9 @@ class TanhRnn(Op):
:type A: matrix (M by M)
"""
x = T.as_tensor(x)
z0 = T.as_tensor(z0)
A = T.as_tensor(A)
x = T.as_ndarray_result(x)
z0 = T.as_ndarray_result(z0)
A = T.as_ndarray_result(A)
z = x.type() #make a new symbolic result with the same type as x
return Apply(self, [x, z0, A], [z])
......
......@@ -289,9 +289,9 @@ class Type(object2, PureType, CLinkerType):
- `Generic`: for any python type
- `Tensor`: for numpy.ndarray
- `NDArrayType`: for numpy.ndarray
- `Sparse`: for scipy.sparse
- `SparseType`: for scipy.sparse
But you are encouraged to write your own, as described in WRITEME.
......
......@@ -35,19 +35,19 @@ if scipy.__version__ != '0.7.0':
def _is_sparse_result(x):
"""
@rtype: boolean
@return: True iff x is a L{SparseResult} (and not a L{tensor.Tensor})
@return: True iff x is a L{SparseResult} (and not a L{tensor.NDArrayType})
"""
if not isinstance(x.type, Sparse) and not isinstance(x.type, tensor.Tensor):
raise NotImplementedError("this function should only be called on *results* (of type sparse.Sparse or tensor.Tensor), not,", x)
return isinstance(x.type, Sparse)
if not isinstance(x.type, SparseType) and not isinstance(x.type, tensor.NDArrayType):
raise NotImplementedError("this function should only be called on *results* (of type sparse.SparseType or tensor.NDArrayType), not,", x)
return isinstance(x.type, SparseType)
def _is_dense_result(x):
"""
@rtype: boolean
@return: True unless x is a L{SparseResult} (and not a L{tensor.Tensor})
@return: True unless x is a L{SparseResult} (and not a L{tensor.NDArrayType})
"""
if not isinstance(x.type, Sparse) and not isinstance(x.type, tensor.Tensor):
raise NotImplementedError("this function should only be called on *results* (of type sparse.Sparse or tensor.Tensor), not,", x)
return isinstance(x.type, tensor.Tensor)
if not isinstance(x.type, SparseType) and not isinstance(x.type, tensor.NDArrayType):
raise NotImplementedError("this function should only be called on *results* (of type sparse.SparseType or tensor.NDArrayType), not,", x)
return isinstance(x.type, tensor.NDArrayType)
def _is_sparse(x):
"""
......@@ -78,10 +78,10 @@ def _kmap_hash(a):
# Wrapper type
def as_sparse(x):
def as_sparse_result(x):
"""
Wrapper around SparseResult constructor.
@param x: A sparse matrix. as_sparse reads dtype and format properties
@param x: A sparse matrix. as_sparse_result reads dtype and format properties
out of this sparse matrix.
@return: SparseResult version of sp.
......@@ -93,38 +93,40 @@ def as_sparse(x):
else:
x = x.outputs[0]
if isinstance(x, gof.Result):
if not isinstance(x.type, Sparse):
raise TypeError("Result type field must be a Sparse.", x, x.type)
if not isinstance(x.type, SparseType):
raise TypeError("Result type field must be a SparseType.", x, x.type)
return x
try:
return constant(x)
except TypeError:
raise TypeError("Cannot convert %s to Sparse" % x, type(x))
raise TypeError("Cannot convert %s to SparseType" % x, type(x))
as_sparse = as_sparse_result
def constant(x):
if not isinstance(x, sparse.spmatrix):
raise TypeError("sparse.constant must be called on a scipy.sparse.spmatrix")
try:
return SparseConstant(Sparse(format = x.format,
return SparseConstant(SparseType(format = x.format,
dtype = x.dtype), x)
except TypeError:
raise TypeError("Could not convert %s to Sparse" % x, type(x))
raise TypeError("Could not convert %s to SparseType" % x, type(x))
def value(x):
if not isinstance(x, sparse.spmatrix):
raise TypeError("sparse.value must be called on a scipy.sparse.spmatrix")
try:
return SparseValue(Sparse(format = x.format,
return SparseValue(SparseType(format = x.format,
dtype = x.dtype), x)
except TypeError:
raise TypeError("Could not convert %s to Sparse" % x, type(x))
raise TypeError("Could not convert %s to SparseType" % x, type(x))
def sp_ones_like(x):
data, indices, indptr, shape = csm_properties(x) #TODO: don't restrict to CSM formats
return CSM(format=x.format)(tensor.ones_like(data), indices, indptr, shape)
class Sparse(gof.Type):
class SparseType(gof.Type):
"""
@type dtype: numpy dtype string such as 'int64' or 'float64' (among others)
@type format: string
......@@ -196,8 +198,8 @@ class Sparse(gof.Type):
def is_valid_value(self, a):
return scipy.sparse.issparse(a) and (a.format == self.format)
csc_matrix = Sparse(format='csc')
csr_matrix = Sparse(format='csr')
csc_matrix = SparseType(format='csc')
csr_matrix = SparseType(format='csr')
class _sparse_py_operators:
T = property(lambda self: transpose(self), doc = "Return aliased transpose of self (read-only)")
......@@ -248,8 +250,8 @@ class CSMProperties(gof.Op):
return 8234 ^ hash(type(self)) ^ _kmap_hash(self.kmap)
def make_node(self, csm):
csm = as_sparse(csm)
data = tensor.Tensor(dtype=csm.type.dtype, broadcastable = (False,)).make_result()
csm = as_sparse_result(csm)
data = tensor.NDArrayType(dtype=csm.type.dtype, broadcastable = (False,)).make_result()
return gof.Apply(self, [csm],
[data, tensor.ivector(), tensor.ivector(), tensor.ivector()])
......@@ -319,10 +321,10 @@ class CSM(gof.Op):
:type indptr: 1-d tensor of ints
"""
data = tensor.as_tensor(data)
indices = tensor.as_tensor(indices)
indptr = tensor.as_tensor(indptr)
shape = tensor.as_tensor(shape)
data = tensor.as_ndarray_result(data)
indices = tensor.as_ndarray_result(indices)
indptr = tensor.as_ndarray_result(indptr)
shape = tensor.as_ndarray_result(shape)
if data.type.ndim != 1:
raise TypeError('data argument must be a vector', data.type)
......@@ -335,7 +337,7 @@ class CSM(gof.Op):
return gof.Apply(self,
[data, indices, indptr, shape],
[Sparse(dtype = data.type.dtype,
[SparseType(dtype = data.type.dtype,
format = self.format).make_result()])
def perform(self, node, (data, indices, indptr, shape), (out,)):
......@@ -366,7 +368,7 @@ class CSM(gof.Op):
def grad(self, (data, indices, indptr, shape), (g_out,)):
"""Return a gradient on the data vector"""
#unpack the data vector and wrap it as a 1d Tensor
#unpack the data vector and wrap it as a 1d NDArrayType
g_data = csm_grad(self.kmap)(data, csm_data(g_out),csm_indices(g_out))
return [g_data, None, None, None]
......@@ -423,10 +425,10 @@ class DenseFromSparse(gof.op.Op):
"""WRITEME"""
def make_node(self, x):
x = as_sparse(x)
x = as_sparse_result(x)
return gof.Apply(self,
[x],
[tensor.Tensor(dtype = x.type.dtype,
[tensor.NDArrayType(dtype = x.type.dtype,
broadcastable = (False, False)).make_result()])
def perform(self, node, (x, ), (out, )):
if _is_dense(x):
......@@ -453,13 +455,13 @@ class SparseFromDense(gof.op.Op):
return 982374 ^ hash(self.format) ^ hash(DenseFromSparse)
def make_node(self, x):
x = tensor.as_tensor(x)
x = tensor.as_ndarray_result(x)
return gof.Apply(self,
[x],
[Sparse(dtype = x.type.dtype,
[SparseType(dtype = x.type.dtype,
format = self.format).make_result()])
def perform(self, node, (x, ), (out, )):
out[0] = Sparse.format_cls[self.format](x)
out[0] = SparseType.format_cls[self.format](x)
def grad(self, (x, ), (gz, )):
return dense_from_sparse(gz),
csr_from_dense = SparseFromDense('csr')
......@@ -473,10 +475,10 @@ class Transpose(gof.op.Op):
format_map = {'csr' : 'csc',
'csc' : 'csr'}
def make_node(self, x):
x = as_sparse(x)
x = as_sparse_result(x)
return gof.Apply(self,
[x],
[Sparse(dtype = x.type.dtype,
[SparseType(dtype = x.type.dtype,
format = self.format_map[x.type.format]).make_result()])
def perform(self, node, (x, ), (out, )):
assert _is_sparse(x)
......@@ -488,7 +490,7 @@ transpose = Transpose()
class Neg(gof.op.Op):
def make_node(self, x):
x = as_sparse(x)
x = as_sparse_result(x)
return gof.Apply(self, [x], [x.type()])
def perform(self, node, (x, ), (out, )):
assert _is_sparse(x)
......@@ -501,7 +503,7 @@ neg = Neg()
class AddSS(gof.op.Op):
'''Add two sparse matrices '''
def make_node(self, x, y):
x, y = map(as_sparse, [x, y])
x, y = map(as_sparse_result, [x, y])
if x.type.dtype != y.type.dtype:
raise NotImplementedError()
if x.type.format != y.type.format:
......@@ -509,7 +511,7 @@ class AddSS(gof.op.Op):
raise NotImplementedError()
return gof.Apply(self,
[x, y],
[Sparse(dtype = x.type.dtype,
[SparseType(dtype = x.type.dtype,
format = x.type.format).make_result()])
def perform(self, node, (x, y), (out, )):
assert _is_sparse(x) and _is_sparse(y)
......@@ -523,7 +525,7 @@ add_s_s = AddSS()
class AddSD(gof.op.Op):
''' Add a sparse and a dense matrix '''
def make_node(self, x, y):
x, y = as_sparse(x), tensor.as_tensor(y)
x, y = as_sparse_result(x), tensor.as_ndarray_result(y)
if x.type.dtype != y.type.dtype:
raise NotImplementedError()
# The magic number two here arises because L{scipy.sparse}
......@@ -531,7 +533,7 @@ class AddSD(gof.op.Op):
assert y.type.ndim == 2
return gof.Apply(self,
[x, y],
[tensor.Tensor(dtype = y.type.dtype,
[tensor.NDArrayType(dtype = y.type.dtype,
broadcastable = y.type.broadcastable).make_result()])
def perform(self, node, (x, y), (out, )):
assert _is_sparse(x) and _is_dense(y)
......@@ -545,8 +547,8 @@ def add(x,y):
"""
Add two matrices, at least one of which is sparse.
"""
if hasattr(x, 'getnnz'): x = as_sparse(x)
if hasattr(y, 'getnnz'): y = as_sparse(y)
if hasattr(x, 'getnnz'): x = as_sparse_result(x)
if hasattr(y, 'getnnz'): y = as_sparse_result(y)
x_is_sparse_result = _is_sparse_result(x)
y_is_sparse_result = _is_sparse_result(y)
......@@ -564,7 +566,7 @@ def sub(x,y):
class MulSS(gof.op.Op):
''' Elementwise multiply a sparse and a ndarray '''
def make_node(self, x, y):
x, y = as_sparse(x), as_sparse(y)
x, y = as_sparse_result(x), as_sparse_result(y)
if x.type != y.type:
raise NotImplementedError()
return gof.Apply(self, [x, y], [x.type()])
......@@ -583,7 +585,7 @@ mul_s_s = MulSS()
class MulSD(gof.op.Op):
''' Elementwise multiply a sparse and a ndarray '''
def make_node(self, x, y):
x, y = as_sparse(x), tensor.as_tensor(y)
x, y = as_sparse_result(x), tensor.as_ndarray_result(y)
if x.type.dtype != y.type.dtype:
raise NotImplementedError()
# The magic number two here arises because L{scipy.sparse}
......@@ -641,8 +643,8 @@ def mul(x,y):
"""
Multiply (elementwise) two matrices, at least one of which is sparse.
"""
if hasattr(x, 'getnnz'): x = as_sparse(x)
if hasattr(y, 'getnnz'): y = as_sparse(y)
if hasattr(x, 'getnnz'): x = as_sparse_result(x)
if hasattr(y, 'getnnz'): y = as_sparse_result(y)
x_is_sparse_result = _is_sparse_result(x)
y_is_sparse_result = _is_sparse_result(y)
......@@ -661,7 +663,7 @@ class StructuredDot(gof.Op):
"""Structured Dot is like dot, except that only the gradient wrt non-zero elements of the
sparse matrix A are calculated and propagated.
The output is presumed to be a dense matrix, and is represented by a Tensor instance.
The output is presumed to be a dense matrix, and is represented by a NDArrayType instance.
"""
def make_node(self, a, b):
assert a.type.dtype == b.type.dtype
......@@ -710,8 +712,8 @@ def structured_dot(x, y):
@todo: Maybe the triple-transposition formulation (when x is dense)
is slow. See if there is a direct way to do this.
"""
if hasattr(x, 'getnnz'): x = as_sparse(x)
if hasattr(y, 'getnnz'): y = as_sparse(y)
if hasattr(x, 'getnnz'): x = as_sparse_result(x)
if hasattr(y, 'getnnz'): y = as_sparse_result(y)
x_is_sparse_result = _is_sparse_result(x)
y_is_sparse_result = _is_sparse_result(y)
......
......@@ -66,8 +66,8 @@ def true_dot(x, y, grad_preserves_dense=True):
@todo: Maybe the triple-transposition formulation (when x is dense)
is slow. See if there is a direct way to do this.
"""
if hasattr(x, 'getnnz'): x = as_sparse(x)
if hasattr(y, 'getnnz'): y = as_sparse(y)
if hasattr(x, 'getnnz'): x = as_sparse_result(x)
if hasattr(y, 'getnnz'): y = as_sparse_result(y)
x_is_sparse_result = _is_sparse_result(x)
y_is_sparse_result = _is_sparse_result(y)
......@@ -86,7 +86,7 @@ class test_true_dot(unittest.TestCase):
def test_basicSS(self):
for mtype in _mtypes:
x = as_sparse(mtype((500,3)))
x = as_sparse_result(mtype((500,3)))
x.data[(10, 1)] = 1
x.data[(20, 2)] = 2
self.failUnless(_is_sparse_result(x))
......@@ -117,12 +117,12 @@ class test_true_dot(unittest.TestCase):
def test_basicSD(self):
for mtype in _mtypes:
x = as_sparse(mtype((500,3)))
x = as_sparse_result(mtype((500,3)))
x.data[(10, 1)] = 1
x.data[(20, 2)] = 2
self.failUnless(_is_sparse_result(x))
y = tensor.as_tensor([[1., 2], [3, 4], [2, 1]])
y = tensor.as_ndarray_result([[1., 2], [3, 4], [2, 1]])
self.failUnless(_is_dense_result(y))
zop = true_dot(x,y)
......@@ -150,12 +150,12 @@ class test_true_dot(unittest.TestCase):
def test_basicDS(self):
for mtype in _mtypes:
x = as_sparse(mtype((500,3)))
x = as_sparse_result(mtype((500,3)))
x.data[(10, 1)] = 1
x.data[(20, 2)] = 2
self.failUnless(_is_sparse_result(x))
y = tensor.as_tensor([[1., 2], [3, 4], [2, 1]])
y = tensor.as_ndarray_result([[1., 2], [3, 4], [2, 1]])
self.failUnless(_is_dense_result(y))
x.data = x.data.T
......@@ -189,7 +189,7 @@ class test_true_dot(unittest.TestCase):
def test_graph_bprop0(self):
for mtype in _mtypes:
x = tensor.matrix('x') #Tensor('float64', broadcastable=[False,False], name='x')
x = tensor.matrix('x') #NDArrayType('float64', broadcastable=[False,False], name='x')
w = Sparse(dtype = 'float64', format = _mtype_to_str[mtype]).make_result()
xw = dense_from_sparse(true_dot(w, x))
y = dense_from_sparse(true_dot(w.T, xw))
......
......@@ -22,7 +22,7 @@ class T_transpose(unittest.TestCase):
def test_transpose_csc(self):
sp = sparse.csc_matrix(sparse.eye(5,3))
a = as_sparse(sp)
a = as_sparse_result(sp)
self.failUnless(a.data is sp)
self.failUnless(a.data.shape == (5,3))
self.failUnless(a.type.dtype == 'float64', a.type.dtype)
......@@ -34,7 +34,7 @@ class T_transpose(unittest.TestCase):
vta = eval_outputs([ta])
self.failUnless(vta.shape == (3,5))
def test_transpose_csr(self):
a = as_sparse(sparse.csr_matrix(sparse.eye(5,3)))
a = as_sparse_result(sparse.csr_matrix(sparse.eye(5,3)))
self.failUnless(a.data.shape == (5,3))
self.failUnless(a.type.dtype == 'float64')
self.failUnless(a.type.format == 'csr')
......@@ -49,13 +49,13 @@ class T_Add(unittest.TestCase):
def testSS(self):
for mtype in _mtypes:
a = mtype(numpy.array([[1., 0], [3, 0], [0, 6]]))
aR = as_sparse(a)
aR = as_sparse_result(a)
self.failUnless(aR.data is a)
self.failUnless(_is_sparse(a))
self.failUnless(_is_sparse_result(aR))
b = mtype(numpy.asarray([[0, 2.], [0, 4], [5, 0]]))
bR = as_sparse(b)
bR = as_sparse_result(b)
self.failUnless(bR.data is b)
self.failUnless(_is_sparse(b))
self.failUnless(_is_sparse_result(bR))
......@@ -76,13 +76,13 @@ class T_Add(unittest.TestCase):
def testSD(self):
for mtype in _mtypes:
a = numpy.array([[1., 0], [3, 0], [0, 6]])
aR = tensor.as_tensor(a)
aR = tensor.as_ndarray_result(a)
self.failUnless(aR.data is a)
self.failUnless(_is_dense(a))
self.failUnless(_is_dense_result(aR))
b = mtype(numpy.asarray([[0, 2.], [0, 4], [5, 0]]))
bR = as_sparse(b)
bR = as_sparse_result(b)
self.failUnless(bR.data is b)
self.failUnless(_is_sparse(b))
self.failUnless(_is_sparse_result(bR))
......@@ -101,13 +101,13 @@ class T_Add(unittest.TestCase):
def testDS(self):
for mtype in _mtypes:
a = mtype(numpy.array([[1., 0], [3, 0], [0, 6]]))
aR = as_sparse(a)
aR = as_sparse_result(a)
self.failUnless(aR.data is a)
self.failUnless(_is_sparse(a))
self.failUnless(_is_sparse_result(aR))
b = numpy.asarray([[0, 2.], [0, 4], [5, 0]])
bR = tensor.as_tensor(b)
bR = tensor.as_ndarray_result(b)
self.failUnless(bR.data is b)
self.failUnless(_is_dense(b))
self.failUnless(_is_dense_result(bR))
......@@ -128,14 +128,14 @@ class T_conversion(unittest.TestCase):
unittest_tools.seed_rng()
def test0(self):
a = tensor.as_tensor(numpy.random.rand(5))
a = tensor.as_ndarray_result(numpy.random.rand(5))
s = csc_from_dense(a)
val = eval_outputs([s])
self.failUnless(str(val.dtype)=='float64')
self.failUnless(val.format == 'csc')
def test1(self):
a = tensor.as_tensor(numpy.random.rand(5))
a = tensor.as_ndarray_result(numpy.random.rand(5))
s = csr_from_dense(a)
val = eval_outputs([s])
self.failUnless(str(val.dtype)=='float64')
......
......@@ -59,11 +59,11 @@ def __oplist_tag(thing, tag):
thing.__oplist_tags = tags
def as_tensor(x, name = None, ndim=None):
"""Return `x`, transformed into a `Tensor`
def as_ndarray_result(x, name = None, ndim=None):
"""Return `x`, transformed into a `NDArrayType`
This function is often used by `make_node` methods of `Op` subclasses to
turn ndarrays, numbers, `Scalar` instances, `Apply` instances and `Tensor`
turn ndarrays, numbers, `Scalar` instances, `Apply` instances and `NDArrayType`
instances into valid input list elemnts.
:Parameters:
......@@ -78,7 +78,7 @@ def as_tensor(x, name = None, ndim=None):
:Exceptions:
- `ValueError`: raised if an `Apply` with no default output is fetched
- `TypeError`: raised if `x` cannot be converted to a Tensor Result
- `TypeError`: raised if `x` cannot be converted to a NDArrayType Result
"""
......@@ -92,15 +92,15 @@ def as_tensor(x, name = None, ndim=None):
if isinstance(x.type, scal.Scalar):
x = tensor_from_scalar(x)
if not isinstance(x.type, Tensor):
raise TypeError("Result type field must be a Tensor.", x, x.type)
if not isinstance(x.type, NDArrayType):
raise TypeError("Result type field must be a NDArrayType.", x, x.type)
if ndim is None:
return x
else:
if (x.type.ndim > ndim):
#TODO: strip off leading broadcastable dimensions
raise ValueError('Tensor could not be cast to have %i dimensions' % ndim, x.type)
raise ValueError('NDArrayType could not be cast to have %i dimensions' % ndim, x.type)
elif (x.type.ndim < ndim):
return shape_padleft(x, n_ones=(ndim - x.type.ndim))
else:
......@@ -112,11 +112,14 @@ def as_tensor(x, name = None, ndim=None):
str_x = str(x)
except:
str_x = repr(x)
raise TypeError("Cannot convert %s to Tensor" % str_x, type(x))
raise TypeError("Cannot convert %s to NDArrayType" % str_x, type(x))
# this has a different name, because _as_tensor is the function which ops use
# this has a different name, because _as_ndarray_result is the function which ops use
# to upcast their arguments... this internal-use function is a good place to put debugging stuff, better than the global astensor.
_as_tensor = as_tensor
_as_ndarray_result = as_ndarray_result
as_tensor = as_ndarray_result
def constant_or_value(x, rtype, name=None, ndim=None):
"""Return a symbolic `Constant` with value `x`
......@@ -141,19 +144,19 @@ def constant_or_value(x, rtype, name=None, ndim=None):
assert len(bcastable) == ndim
try:
return rtype(Tensor(dtype = x_.dtype, broadcastable = bcastable), x_, name=name)
return rtype(NDArrayType(dtype = x_.dtype, broadcastable = bcastable), x_, name=name)
except:
raise TypeError("Could not convert %s to Tensor" % x, type(x))
raise TypeError("Could not convert %s to NDArrayType" % x, type(x))
def constant(x, name=None, ndim=None):
return constant_or_value(x, rtype=TensorConstant, name=name, ndim=ndim)
return constant_or_value(x, rtype=NDArrayConstant, name=name, ndim=ndim)
def value(x, name=None, ndim=None):
return constant_or_value(x, rtype=TensorValue, name=name, ndim=ndim)
return constant_or_value(x, rtype=NDArrayValue, name=name, ndim=ndim)
class Tensor(Type):
class NDArrayType(Type):
"""Symbolic `Type` representing a numpy.ndarray value."""
def __init__(self, dtype, broadcastable, name = None):
......@@ -178,7 +181,7 @@ class Tensor(Type):
self.name = name
def filter(self, data, strict = False):
"""Convert `data` to something which can be associated to a `TensorResult`.
"""Convert `data` to something which can be associated to a `NDArrayResult`.
This function is not meant to be called in user code. It is for
`Linker` instances to use when running a compiled graph.
......@@ -228,7 +231,7 @@ class Tensor(Type):
return scal.Scalar(dtype = self.dtype)
def __eq__(self, other):
"""Compare True iff other is the same kind of Tensor"""
"""Compare True iff other is the same kind of NDArrayType"""
return type(self) == type(other) and other.dtype == self.dtype and other.broadcastable == self.broadcastable
def values_eq_approx(self, a, b):
......@@ -236,26 +239,26 @@ class Tensor(Type):
and (a.shape == b.shape) and numpy.allclose(a, b)
def __hash__(self):
"""Hash equal for same kinds of Tensor"""
"""Hash equal for same kinds of NDArrayType"""
return hash(self.dtype) ^ hash(self.broadcastable)
ndim = property(lambda self: len(self.broadcastable), doc = "number of dimensions")
"""Number of dimensions
This read-only property is the preferred way to get the number of dimensions
of a `Tensor`.
of a `NDArrayType`.
"""
def make_result(self, name = None):
"""Return a `TensorResult` of this type
"""Return a `NDArrayResult` of this type
:Parameters:
- `name`: str
A pretty name to identify this `Result` when printing and debugging
"""
return TensorResult(self, name = name)
return NDArrayResult(self, name = name)
def __str__(self):
if self.name:
......@@ -268,11 +271,11 @@ class Tensor(Type):
(False, True): 'col',
(True, False): 'row',
(False, False): 'matrix'}.get(b, "%iD" % len(b) if not any(b) else str(b))
return "Tensor(%s, %s)" % (str(self.dtype), bcast)
return "NDArrayType(%s, %s)" % (str(self.dtype), bcast)
def __repr__(self):
return str(self)
#"Tensor{%s, %s}" % (str(self.dtype), str(self.broadcastable))
#"NDArrayType{%s, %s}" % (str(self.dtype), str(self.broadcastable))
def c_declare(self, name, sub):
"""Override `CLinkerOp.c_declare` """
......@@ -386,7 +389,7 @@ class Tensor(Type):
# Easy constructors
def tensor(*args, **kwargs):
return Tensor(*args, **kwargs).make_result()
return NDArrayType(*args, **kwargs).make_result()
def _multi(*fns):
def f2(f, *names):
......@@ -407,16 +410,16 @@ def _multi(*fns):
else:
return [partial(f2, f) for f in fns]
cscalar = Tensor('complex64', ())
zscalar = Tensor('complex128', ())
fscalar = Tensor('float32', ())
dscalar = Tensor('float64', ())
bscalar = Tensor('int8', ())
wscalar = Tensor('int16', ())
iscalar = Tensor('int32', ())
lscalar = Tensor('int64', ())
cscalar = NDArrayType('complex64', ())
zscalar = NDArrayType('complex128', ())
fscalar = NDArrayType('float32', ())
dscalar = NDArrayType('float64', ())
bscalar = NDArrayType('int8', ())
wscalar = NDArrayType('int16', ())
iscalar = NDArrayType('int32', ())
lscalar = NDArrayType('int64', ())
def scalar(name = None, dtype = 'float64'):
type = Tensor(dtype, ())
type = NDArrayType(dtype, ())
return type(name)
scalars, fscalars, dscalars, iscalars, lscalars = _multi(scalar, fscalar, dscalar, iscalar, lscalar)
......@@ -427,16 +430,16 @@ int_scalar_types = int_types
float_scalar_types = float_types
complex_scalar_types = complex_types
cvector = Tensor('complex64', (False, ))
zvector = Tensor('complex128', (False, ))
fvector = Tensor('float32', (False, ))
dvector = Tensor('float64', (False, ))
bvector = Tensor('int8', (False,))
wvector = Tensor('int16', (False,))
ivector = Tensor('int32', (False, ))
lvector = Tensor('int64', (False, ))
cvector = NDArrayType('complex64', (False, ))
zvector = NDArrayType('complex128', (False, ))
fvector = NDArrayType('float32', (False, ))
dvector = NDArrayType('float64', (False, ))
bvector = NDArrayType('int8', (False,))
wvector = NDArrayType('int16', (False,))
ivector = NDArrayType('int32', (False, ))
lvector = NDArrayType('int64', (False, ))
def vector(name = None, dtype = 'float64'):
type = Tensor(dtype, (False, ))
type = NDArrayType(dtype, (False, ))
return type(name)
vectors, fvectors, dvectors, ivectors, lvectors = _multi(vector, fvector, dvector, ivector, lvector)
......@@ -444,16 +447,16 @@ int_vector_types = bvector, wvector, ivector, lvector
float_vector_types = fvector, dvector
complex_vector_types = cvector, zvector
cmatrix = Tensor('complex64', (False, False))
zmatrix = Tensor('complex128', (False, False))
fmatrix = Tensor('float32', (False, False))
dmatrix = Tensor('float64', (False, False))
bmatrix = Tensor('int8', (False, False))
wmatrix = Tensor('int16', (False, False))
imatrix = Tensor('int32', (False, False))
lmatrix = Tensor('int64', (False, False))
cmatrix = NDArrayType('complex64', (False, False))
zmatrix = NDArrayType('complex128', (False, False))
fmatrix = NDArrayType('float32', (False, False))
dmatrix = NDArrayType('float64', (False, False))
bmatrix = NDArrayType('int8', (False, False))
wmatrix = NDArrayType('int16', (False, False))
imatrix = NDArrayType('int32', (False, False))
lmatrix = NDArrayType('int64', (False, False))
def matrix(name = None, dtype = 'float64'):
type = Tensor(dtype, (False, False))
type = NDArrayType(dtype, (False, False))
return type(name)
matrices, fmatrices, dmatrices, imatrices, lmatrices = _multi(matrix, fmatrix, dmatrix, imatrix, lmatrix)
......@@ -461,29 +464,29 @@ int_matrix_types = bmatrix, wmatrix, imatrix, lmatrix
float_matrix_types = fmatrix, dmatrix
complex_matrix_types = cmatrix, zmatrix
crow = Tensor('complex64', (True, False))
zrow = Tensor('complex128', (True, False))
frow = Tensor('float32', (True, False))
drow = Tensor('float64', (True, False))
brow = Tensor('int8', (True, False))
wrow = Tensor('int16', (True, False))
irow = Tensor('int32', (True, False))
lrow = Tensor('int64', (True, False))
crow = NDArrayType('complex64', (True, False))
zrow = NDArrayType('complex128', (True, False))
frow = NDArrayType('float32', (True, False))
drow = NDArrayType('float64', (True, False))
brow = NDArrayType('int8', (True, False))
wrow = NDArrayType('int16', (True, False))
irow = NDArrayType('int32', (True, False))
lrow = NDArrayType('int64', (True, False))
def row(name = None, dtype = 'float64'):
type = Tensor(dtype, (True, False))
type = NDArrayType(dtype, (True, False))
return type(name)
rows, frows, drows, irows, lrows = _multi(row, frow, drow, irow, lrow)
ccol = Tensor('complex64', (False, True))
zcol = Tensor('complex128', (False, True))
fcol = Tensor('float32', (False, True))
dcol = Tensor('float64', (False, True))
bcol = Tensor('int8', (False, True))
wcol = Tensor('int16', (False, True))
icol = Tensor('int32', (False, True))
lcol = Tensor('int64', (False, True))
ccol = NDArrayType('complex64', (False, True))
zcol = NDArrayType('complex128', (False, True))
fcol = NDArrayType('float32', (False, True))
dcol = NDArrayType('float64', (False, True))
bcol = NDArrayType('int8', (False, True))
wcol = NDArrayType('int16', (False, True))
icol = NDArrayType('int32', (False, True))
lcol = NDArrayType('int64', (False, True))
def col(name = None, dtype = 'float64'):
type = Tensor(dtype, (False, True))
type = NDArrayType(dtype, (False, True))
return type(name)
cols, fcols, dcols, icols, lcols = _multi(col, fcol, dcol, icol, lcol)
......@@ -588,7 +591,7 @@ class _tensor_py_operators:
def __iter__(self):
# This prevents accidental iteration via builtin.sum(self)
raise TypeError('Tensor does not support iteration. '
raise TypeError('NDArrayType does not support iteration. '
'Maybe you are using builtin.sum instead of theano.tensor.sum? (Maybe .max?)')
......@@ -621,10 +624,10 @@ class _tensor_py_operators:
return pow(pow(abs_(self), L).sum(axis=axis), 1.0/L)
class TensorResult(Result, _tensor_py_operators):
class NDArrayResult(Result, _tensor_py_operators):
"""Subclass to add the tensor operators to the basic `Result` class."""
class TensorConstantSignature(tuple):
class NDArrayConstantSignature(tuple):
def __eq__(self, other):
(a, b), (x,y) = self, other
#N.B. compare shape to ensure no broadcasting in ==
......@@ -633,26 +636,33 @@ class TensorConstantSignature(tuple):
a, b = self
return hash(type(self)) ^ hash(a) ^ hash(b.shape)
class TensorConstant(Constant, _tensor_py_operators):
class NDArrayConstant(Constant, _tensor_py_operators):
"""Subclass to add the tensor operators to the basic `Constant` class.
To create a TensorConstant, use the `constant` function in this module.
To create a NDArrayConstant, use the `constant` function in this module.
"""
def signature(self):
return TensorConstantSignature((self.type, self.data))
return NDArrayConstantSignature((self.type, self.data))
class TensorValue(Value, _tensor_py_operators):
class NDArrayValue(Value, _tensor_py_operators):
"""Subclass to add the tensor operators to the basic `Value` class.
To create a TensorValue, use the `value` function in this module.
To create a NDArrayValue, use the `value` function in this module.
"""
Tensor = NDArrayType
TensorResult = NDArrayResult
TensorConstant = NDArrayConstant
TensorValue = NDArrayValue
#QUESTION: why are we doing this!?
elemwise.as_tensor = as_tensor
elemwise.Tensor = Tensor
elemwise.TensorResult = TensorResult
elemwise.TensorConstant = TensorConstant
elemwise.TensorValue = TensorValue
elemwise.as_ndarray_result = as_ndarray_result
elemwise.NDArrayType = NDArrayType
elemwise.NDArrayResult = NDArrayResult
elemwise.NDArrayConstant = NDArrayConstant
elemwise.NDArrayValue = NDArrayValue
......@@ -724,7 +734,7 @@ def _scal_elemwise(symbol):
# Casting Operations
#########################
class TensorFromScalar(Op):
class NDArrayFromScalar(Op):
def make_node(self, s):
assert isinstance(s.type, scal.Scalar)
return Apply(self,
......@@ -734,12 +744,12 @@ class TensorFromScalar(Op):
def perform(self, node, (s, ), (out, )):
out[0] = numpy.asarray(s)
def grad(self, (s,), (dt,)):
return [ScalarFromTensor(dt)]
tensor_from_scalar = TensorFromScalar()
return [ScalarFromNDArray(dt)]
tensor_from_scalar = NDArrayFromScalar()
class ScalarFromTensor(Op):
class ScalarFromNDArray(Op):
def make_node(self, t):
assert isinstance(t.type, Tensor)
assert isinstance(t.type, NDArrayType)
assert t.type.broadcastable == ()
return Apply(self,
[t],
......@@ -747,8 +757,8 @@ class ScalarFromTensor(Op):
def perform(self, node, (s, ), (out, )):
out[0] = s.flatten()[0]
def grad(self, (s,), (dt,)):
return [TensorFromScalar(dt)]
scalar_from_tensor = ScalarFromTensor()
return [NDArrayFromScalar(dt)]
scalar_from_tensor = ScalarFromNDArray()
@constructor
......@@ -807,7 +817,7 @@ class Shape(Op):
@note: Non-differentiable.
"""
def make_node(self, x):
x = as_tensor(x)
x = as_ndarray_result(x)
return Apply(self, [x], [lvector()])
def perform(self, node, (x, ), (out, )):
out[0] = numpy.asarray(x.shape, dtype = 'int64')
......@@ -827,10 +837,10 @@ class MaxAndArgmax(Op):
E_axis = 'invalid axis'
def make_node(self, x, axis=None):
x = _as_tensor(x)
x = _as_ndarray_result(x)
if axis is None:
axis = x.type.ndim - 1
axis = _as_tensor(axis)
axis = _as_ndarray_result(axis)
inputs = [x, axis]
broadcastable = [False] * (x.type.ndim - 1)
outputs = [tensor(x.type.dtype, broadcastable),
......@@ -975,7 +985,7 @@ def invert(a):
def abs_(a):
"""|`a`|
TensorResult overloads the `TensorResult.__abs__` operator so that
NDArrayResult overloads the `NDArrayResult.__abs__` operator so that
this function is called when you type abs(a).
"""
......@@ -1076,11 +1086,11 @@ class Filler(gof.Op):
self.value = value
self.ndim = ndim
self.dtype = dtype
self.type = Tensor(dtype = dtype,
self.type = NDArrayType(dtype = dtype,
broadcastable = (False,)*ndim)
def make_node(self, dims):
dims = as_tensor(dims)
dims = as_ndarray_result(dims)
return gof.Apply(self, [dims], [self.type()])
def perform(self, node, (dims,), (out,)):
......@@ -1165,10 +1175,10 @@ def mean(input, axis = None):
class Repeat(gof.Op):
def make_node(self, input, repeats, axis):
assert isinstance(input.type, Tensor)
assert isinstance(input.type, NDArrayType)
assert repeats.type == iscalar
assert axis.type == iscalar
type = Tensor(dtype = input.type.dtype,
type = NDArrayType(dtype = input.type.dtype,
broadcastable = [False if i==axis else x for i, x in enumerate(input.broadcastable)])
return gof.Apply(self, [inputs, repeats, axis], [type()])
......@@ -1293,9 +1303,9 @@ class Subtensor(Op):
self.idx_list = map(self.convert, idx_list)
def make_node(self, x, *inputs):
x = as_tensor(x)
x = as_ndarray_result(x)
def my_as_scalar(a):
if isinstance(a, gof.Result) and isinstance(a.type, Tensor):
if isinstance(a, gof.Result) and isinstance(a.type, NDArrayType):
return scalar_from_tensor(a)
else:
return scal.as_scalar(a)
......@@ -1397,7 +1407,7 @@ pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Subtensor), S
class SetSubtensor(Op):
"""Set just some elements of a larger Tensor.
"""Set just some elements of a larger NDArrayType.
This is like numpy's
......@@ -1434,7 +1444,7 @@ class SetSubtensor(Op):
self.__class__.__name__, ", ".join(indices))
def make_node(self, x, y, *inputs):
x, y = map(as_tensor, [x, y])
x, y = map(as_ndarray_result, [x, y])
inputs = tuple(map(scal.as_scalar, inputs))
idx_list = list(self.idx_list)
......@@ -1487,7 +1497,7 @@ def split(x, splits_size, n_splits, axis=0):
return the_split(x, axis, splits_size)
class Split(Op):
"""Partition a `TensorResult` along some axis.
"""Partition a `NDArrayResult` along some axis.
.. python::
......@@ -1523,9 +1533,9 @@ class Split(Op):
def make_node(self, x, axis, splits):
"""WRITEME"""
x = as_tensor(x)
axis = as_tensor(axis)
splits = as_tensor(splits)
x = as_ndarray_result(x)
axis = as_ndarray_result(axis)
splits = as_ndarray_result(splits)
if splits.type not in int_vector_types:
raise TypeError('splits must have type tensor.lvector', splits.type)
......@@ -1567,10 +1577,10 @@ class Split(Op):
class Join(Op):
"""
Concatenate two `TensorResult`s along some axis.
Concatenate two `NDArrayResult`s along some axis.
These tensors must have the same shape along all dimensions other than this axis.
Of course, TensorResult instances don't have a shape, so this error can't be caught until
Of course, NDArrayResult instances don't have a shape, so this error can't be caught until
runtime. See `perform()`.
For joins involving scalar values, see @stack.
......@@ -1600,16 +1610,16 @@ class Join(Op):
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
if not tensors:
raise ValueError('Cannot join an empty list of tensors')
as_tensor_args= [as_tensor(x) for x in tensors]
dtypes = [x.type.dtype for x in as_tensor_args]
as_ndarray_result_args= [as_ndarray_result(x) for x in tensors]
dtypes = [x.type.dtype for x in as_ndarray_result_args]
out_dtype = scal.upcast(*dtypes)
if not all(targs.type.ndim for targs in as_tensor_args):
if not all(targs.type.ndim for targs in as_ndarray_result_args):
raise TypeError('Join cannot handle arguments of dimension 0. For joining scalar values, see @stack');
# When the axis may vary, no dimension can be guaranteed to be
# broadcastable.
bcastable = [False] * len(as_tensor_args[0].type.broadcastable)
bcastable = [False] * len(as_ndarray_result_args[0].type.broadcastable)
# When the axis is fixed, the broadcastable dimensions remain, except
# for the axis dimension.
......@@ -1617,17 +1627,17 @@ class Join(Op):
# dimensions.
if isinstance(axis, int):
bcasts = [x.type.broadcastable[0:axis] + \
x.type.broadcastable[axis + 1:] for x in as_tensor_args]
x.type.broadcastable[axis + 1:] for x in as_ndarray_result_args]
if not all([bcasts[0] == bc for bc in bcasts[1:]]):
raise ValueError('Dimensions other than the given axis must'
' match', tensors)
bcastable[:] = as_tensor_args[0].type.broadcastable
bcastable[:] = as_ndarray_result_args[0].type.broadcastable
try:
bcastable[axis] = False
except IndexError, e:
raise ValueError('Join argument "axis" is out of range (given input dimensions)')
inputs = [as_tensor(axis)] + as_tensor_args
inputs = [as_ndarray_result(axis)] + as_ndarray_result_args
if inputs[0].type not in int_types:
raise TypeError('Axis could not be cast to an integer type', axis, inputs[0].type, int_types)
......@@ -1683,7 +1693,7 @@ class Join(Op):
@_redefine_asRoutine(Join())
def join(axis, *tensors):
"""
Convenience function to concatenate `Tensor`s along the given axis.
Convenience function to concatenate `NDArrayType`s along the given axis.
:Parameters:
- `tensors` : list of tensors (or list-like)
......@@ -1711,7 +1721,7 @@ def shape_padleft(t, n_ones=1):
See also: `shape_padright` and `Dimshuffle`
"""
_t = as_tensor(t)
_t = as_ndarray_result(t)
pattern = ['x']*n_ones + [i for i in range(_t.type.ndim)]
return DimShuffle(_t.broadcastable, pattern)(_t)
......@@ -1722,7 +1732,7 @@ def shape_padright(t, n_ones=1):
See also: `shape_padleft` and `Dimshuffle`
"""
_t = as_tensor(t)
_t = as_ndarray_result(t)
pattern = [i for i in range(_t.type.ndim)] + ['x']*n_ones
return DimShuffle(_t.broadcastable, pattern)(_t)
......@@ -1759,7 +1769,7 @@ def get_vector_length(v):
"""Return the run-time length of a symbolic vector.
:Parameters:
- `v` : A rank-1 Tensor result.
- `v` : A rank-1 NDArrayType result.
:Exceptions:
- `TypeError` : `v` hasn't the proper type.
......@@ -1788,9 +1798,9 @@ def get_vector_length(v):
@constructor
def horizontal_stack(*args):
"""
Horizontally stack two L{Tensor}s.
Stack two L{Tensor}s along the second axis (column wise). These
L{Tensor}s must have the same shape along all dimensions but the
Horizontally stack two L{NDArrayType}s.
Stack two L{NDArrayType}s along the second axis (column wise). These
L{NDArrayType}s must have the same shape along all dimensions but the
second.
"""
assert len(args) >= 2
......@@ -1806,17 +1816,17 @@ def vertical_stack(*args):
if 0: #vertical and horizontal stacking are deprecated. Better to use stack() and join().
class VerticalStack(Op):
"""
Vertically stack two L{Tensor}s.
Stack two L{Tensor}s along the first axis (row wise). These
L{Tensor}s must have the same shape along all dimensions but the
Vertically stack two L{NDArrayType}s.
Stack two L{NDArrayType}s along the first axis (row wise). These
L{NDArrayType}s must have the same shape along all dimensions but the
first.
@attention: Because we use vstack as the implementation, if the
inputs have 1-dimension, the output will have 2-dimensions.
"""
def make_node(self, x, y):
x = as_tensor(x)
y = as_tensor(y)
x = as_ndarray_result(x)
y = as_ndarray_result(y)
assert x.type.dtype == y.type.dtype
if x.type.broadcastable[1:] != y.type.broadcastable[1:]:
raise NotImplementedError
......@@ -1853,9 +1863,9 @@ class MakeVector(Op):
def __init__(self, stype):
self.stype = stype
def make_node(self, *inputs):
inputs = map(as_tensor, inputs)
inputs = map(as_ndarray_result, inputs)
assert all(a.type == self.stype for a in inputs)
return Apply(self, inputs, [Tensor(broadcastable = (False,),
return Apply(self, inputs, [NDArrayType(broadcastable = (False,),
dtype = self.stype.dtype)()])
def perform(self, node, inputs, (out,)):
out[0] = numpy.asarray(inputs)
......@@ -1891,8 +1901,8 @@ class Reshape(Op):
def __hash__(self):
return hash(Reshape) ^ hash(self.ndim)
def make_node(self, x, shp):
x = as_tensor(x)
shp = as_tensor(shp)
x = as_ndarray_result(x)
shp = as_ndarray_result(shp)
return gof.Apply(self, [x, shp], [tensor(x.type.dtype, [False]*self.ndim)])
def perform(self, node, (x, shp), (out,)):
if (len(shp) != self.ndim):
......@@ -1928,7 +1938,7 @@ class Flatten(Op):
def __hash__(self):
return hash(type(self))^hash(self.outdim)
def make_node(self, x):
t_x = as_tensor(x)
t_x = as_ndarray_result(x)
if self.outdim < 1 or (x.ndim and self.outdim > x.ndim):
raise ValueError('invalid output ndimensions(%i) for tensor of rank %i' %(self.outdim, t_x.ndim))
return gof.Apply(self, [t_x], [tensor(x.type.dtype, (False,)*self.outdim)])
......@@ -1974,8 +1984,8 @@ class Tile(Op):
return hash(Tile) ^ hash(self.ndim)
def make_node(self, x, reps):
x = as_tensor(x)
reps = as_tensor(reps)
x = as_ndarray_result(x)
reps = as_ndarray_result(reps)
return gof.Apply(self, [x, reps], [tensor(x.type.dtype, [False,] * self.ndim)])
def perform(self, node, (x, reps), (out,)):
out[0] = numpy.tile(x, reps)
......@@ -2007,7 +2017,7 @@ class Dot(Op):
"""
def make_node(self, *inputs):
inputs = map(as_tensor, inputs)
inputs = map(as_ndarray_result, inputs)
numpy_semantics = 0
if numpy_semantics:
......@@ -2128,7 +2138,7 @@ class TensorDot(Op):
def make_node(self, x, y):
axesdim = numpy.size(self.axes)/2
x, y = map(as_tensor, [x, y])
x, y = map(as_ndarray_result, [x, y])
if axesdim > x.type.ndim or axesdim > y.type.ndim:
raise TypeError('Cannot sum over more dimensions than input. %i > %i,%i' %
......@@ -2159,7 +2169,7 @@ class Outer(Op):
""" Compute vector-vector outer product
"""
def make_node(self, *inputs):
inputs = map(as_tensor, inputs)
inputs = map(as_ndarray_result, inputs)
x, y = inputs
nx = x.type.ndim
......@@ -2199,12 +2209,12 @@ def grad(cost, wrt, g_cost=None, consider_constant=[]):
@return: symbolic expression of gradient of I{cost} with respect to I{wrt}.
If I{wrt} is a list, then return a list containing the gradient of I{cost} wrt
each element of the list. If an element of I{wrt} is not differentiable
with respect to the output, then a L{TensorConstant} with an appropriate
with respect to the output, then a L{NDArrayConstant} with an appropriate
kind of zero is returned.
"""
if not isinstance(cost, TensorResult):
raise TypeError('In tensor.grad(), cost argument should be a TensorResult.', cost)
if not isinstance(cost, NDArrayResult):
raise TypeError('In tensor.grad(), cost argument should be a NDArrayResult.', cost)
if g_cost is None:
g_cost = ones_like(cost)
......@@ -2212,8 +2222,8 @@ def grad(cost, wrt, g_cost=None, consider_constant=[]):
gmap = gradient.grad_sources_inputs([(cost, g_cost)], inputs + consider_constant)
def zero(p):
return TensorConstant(
Tensor(dtype = p.type.dtype, broadcastable = []),
return NDArrayConstant(
NDArrayType(dtype = p.type.dtype, broadcastable = []),
numpy.asarray(0, dtype=p.type.dtype))
try:
......@@ -2345,7 +2355,7 @@ def verify_grad(testcase, op, pt, n_tests=1, rng=numpy.random, eps=1.0e-7, tol=0
o_fn_out = o_fn(*[p.copy() for p in pt])
#print "PT C", pt
random_projection = rng.rand(*o_fn_out.shape)
t_r = as_tensor(random_projection)
t_r = as_ndarray_result(random_projection)
#random projection of o onto t_r
cost = sum(t_r * o_output) #This sum() is defined above, it's not the builtin sum.
......@@ -2353,7 +2363,7 @@ def verify_grad(testcase, op, pt, n_tests=1, rng=numpy.random, eps=1.0e-7, tol=0
num_grad = numeric_grad(cost_fn, [p.copy() for p in pt], eps)
symbolic_grad = grad(cost, tensor_pt,as_tensor(1.0,name='g_cost'))
symbolic_grad = grad(cost, tensor_pt,as_ndarray_result(1.0,name='g_cost'))
if 0:
print '----------'
......
......@@ -275,7 +275,7 @@ class Gemm(GemmRelated):
E_z_uniq = 'argument z aliased to x or y'
destroy_map = {0: [0]}
def make_node(self, *inputs):
inputs = map(T.as_tensor, inputs)
inputs = map(T.as_ndarray_result, inputs)
if len(inputs) != 5:
raise TypeError("Wrong number of inputs for %s (expected 5, got %s)" % (self, len(inputs)))
z, a, x, y, b = inputs
......@@ -475,7 +475,7 @@ class GemmLocalOptimizer(LocalOptimizer):
@staticmethod
def _as_scalar(res):
"""Return None or a TensorResult whose type is in T.float_scalar_types"""
"""Return None or a NDArrayResult whose type is in T.float_scalar_types"""
if res.owner and isinstance(res.owner.op, T.DimShuffle):
return GemmLocalOptimizer._as_scalar(res.owner.inputs[0])
elif res.type in T.float_scalar_types:
......
......@@ -13,18 +13,18 @@ from copy import copy, deepcopy
# tensor depends on elemwise to provide definitions for several ops
# but elemwise needs to make Tensor instances, so we have these as
# but elemwise needs to make NDArrayType instances, so we have these as
# placeholders and the tensor module fills them
def as_tensor(data):
def as_ndarray_result(data):
raise Exception("Circular dependencies prevent using this here. import tensor before elemwise")
def Tensor(*inputs, **kwargs):
def NDArrayType(*inputs, **kwargs):
raise Exception("Circular dependencies prevent using this here. import tensor before elemwise")
def TensorResult(*inputs, **kwargs):
def NDArrayResult(*inputs, **kwargs):
raise Exception("Circular dependencies prevent using this here. import tensor before elemwise")
def TensorConstant(*inputs, **kwargs):
def NDArrayConstant(*inputs, **kwargs):
raise Exception("Circular dependencies prevent using this here. import tensor before elemwise")
......@@ -137,7 +137,7 @@ class DimShuffle(Op):
else:
ob.append(ib[value])
output = Tensor(dtype = input.type.dtype,
output = NDArrayType(dtype = input.type.dtype,
broadcastable = ob).make_result()
return Apply(self, [input], [output])
......@@ -256,7 +256,7 @@ class DimShuffle(Op):
return full_code % dict(locals(), **sub)
def grad(self, (x, ), (gz, )):
gz = as_tensor(gz)
gz = as_ndarray_result(gz)
grad_order = ['x'] * len(x.type.broadcastable)
for i, v in enumerate(self.new_order):
if v != 'x':
......@@ -365,7 +365,7 @@ class Elemwise(Op):
using DimShuffle.
"""
inputs = map(as_tensor, inputs)
inputs = map(as_ndarray_result, inputs)
shadow = self.scalar_op.make_node(*[Scalar(dtype = t.type.dtype)() for t in inputs])
target_length = max([input.type.ndim for input in inputs])
......@@ -403,7 +403,7 @@ class Elemwise(Op):
if any(inputs[i].type.dtype != out_dtypes[o] for o, i in inplace_pattern.items()):
raise TypeError("Cannot do an inplace operation on incompatible data types.",
([i.type.dtype for i in inputs], out_dtypes, inplace_pattern))
outputs = [Tensor(dtype = dtype, broadcastable = broadcastable)() for dtype, broadcastable in zip(out_dtypes, out_broadcastables)]
outputs = [NDArrayType(dtype = dtype, broadcastable = broadcastable)() for dtype, broadcastable in zip(out_dtypes, out_broadcastables)]
return Apply(self, inputs, outputs)
def __eq__(self, other):
......@@ -431,7 +431,7 @@ class Elemwise(Op):
return self.name
def grad(self, inputs, ograds):
ograds = map(as_tensor, ograds) # this shouldn't be necessary...
ograds = map(as_ndarray_result, ograds) # this shouldn't be necessary...
scalar_inputs = [Scalar(dtype = t.type.dtype)() for t in inputs]
scalar_ograds = [Scalar(dtype = ograd.type.dtype)() for ograd in ograds]
scalar_igrads = self.scalar_op.grad(scalar_inputs, scalar_ograds)
......@@ -445,8 +445,8 @@ class Elemwise(Op):
node = r.owner
if node is None:
# the gradient contains a constant, translate it as
# an equivalent Tensor of size 1 and proper number of dimensions
res = TensorConstant(Tensor(dtype = r.type.dtype,
# an equivalent NDArrayType of size 1 and proper number of dimensions
res = NDArrayConstant(NDArrayType(dtype = r.type.dtype,
broadcastable = ()),
numpy.asarray(r.data)) # .reshape(b)
return DimShuffle((), ['x']*nd, inplace = True)(res)
......@@ -678,12 +678,12 @@ class CAReduce(Op):
self.ufunc = numpy.frompyfunc(scalar_op.impl, 2, 1)
def make_node(self, input):
input = as_tensor(input)
input = as_ndarray_result(input)
axis = self.axis
if axis is None:
axis = range(len(input.type.broadcastable))
output = Tensor(dtype = input.type.dtype,
broadcastable = [x for i, x in enumerate(input.type.broadcastable) if i not in axis])()
output = NDArrayType(dtype = input.type.dtype,
broadcastable = [x for i, x in enumerate(input.type.broadcastable) if i not in axis])()
return Apply(self, [input], [output])
def __getstate__(self):
......@@ -809,7 +809,7 @@ class Sum(CAReduce):
CAReduce.__init__(self, scalar.add, axis)
def grad(self, (x, ), (gz, )):
gz = as_tensor(gz)
gz = as_ndarray_result(gz)
axis = self.axis
if axis is None:
axis = range(x.type.ndim)
......
......@@ -94,8 +94,8 @@ class SoftmaxWithBias(gof.Op):
gof.Op.__init__(self, **kwargs)
def make_node(self, x, b):
x = tensor.as_tensor(x)
b = tensor.as_tensor(b)
x = tensor.as_ndarray_result(x)
b = tensor.as_ndarray_result(b)
if x.type.ndim != 2 \
or x.type.dtype not in ['float32', 'float64']:
raise ValueError('x must be 2-d tensor of floats')
......@@ -263,8 +263,8 @@ class SoftmaxWithBiasDx(gof.Op):
gof.Op.__init__(self, **kwargs)
def make_node(self, dy, sm, **kwargs):
dy = tensor.as_tensor(dy)
sm = tensor.as_tensor(sm)
dy = tensor.as_ndarray_result(dy)
sm = tensor.as_ndarray_result(sm)
return gof.Apply(self, [dy, sm], [sm.type.make_result()])
def perform(self, node, input_storage, output_storage):
......@@ -368,9 +368,9 @@ class CrossentropySoftmaxArgmax1HotWithBias(gof.Op):
gof.Op.__init__(self, **kwargs)
def make_node(self, x, b, y_idx):
x = tensor.as_tensor(x)
b = tensor.as_tensor(b)
y_idx = tensor.as_tensor(y_idx)
x = tensor.as_ndarray_result(x)
b = tensor.as_ndarray_result(b)
y_idx = tensor.as_ndarray_result(y_idx)
if x.type.ndim != 2 \
or x.type.dtype not in ['float32', 'float64']:
raise ValueError('x must be 2-d tensor of floats')
......@@ -382,9 +382,9 @@ class CrossentropySoftmaxArgmax1HotWithBias(gof.Op):
raise ValueError('y_idx must be 1-d tensor of ints')
# TODO: Is this correct? It used to be y, not y_idx
nll = tensor.Tensor(x.type.dtype,
nll = tensor.NDArrayType(x.type.dtype,
y_idx.type.broadcastable).make_result()
# nll = Tensor(x.dtype, y.broadcastable)
# nll = NDArrayType(x.dtype, y.broadcastable)
sm = x.type.make_result()
am = y_idx.type.make_result()
return gof.Apply(self, [x, b, y_idx], [nll, sm, am])
......@@ -532,9 +532,9 @@ class CrossentropySoftmax1HotWithBiasDx (gof.Op):
def __init__(self, **kwargs):
gof.Op.__init__(self,**kwargs)
def make_node(self, dy, sm, y_idx,**kwargs):
dy = tensor.as_tensor(dy)
sm = tensor.as_tensor(sm)
y_idx = tensor.as_tensor(y_idx)
dy = tensor.as_ndarray_result(dy)
sm = tensor.as_ndarray_result(sm)
y_idx = tensor.as_ndarray_result(y_idx)
return gof.Apply(self, [dy, sm, y_idx],[sm.type.make_result()])
def perform(self, node, input_storage, output_storage):
dy,sm,y_idx = input_storage
......@@ -672,8 +672,8 @@ class Prepend_scalar_constant_to_each_row(gof.Op):
#check type of input
if not isinstance(mat,gof.Result) or not mat.type==tensor.matrix().type:
raise TypeError("Expected a matrix as input")
x = tensor.as_tensor(mat)
y = tensor.as_tensor(self.val)
x = tensor.as_ndarray_result(mat)
y = tensor.as_ndarray_result(self.val)
if x.type.dtype != y.type.dtype:
TypeError("the value to prepend don't have the same type as the matrix")
......@@ -706,8 +706,8 @@ class Prepend_scalar_to_each_row(gof.Op):
val = scalar.constant(val)
if not isinstance(mat,gof.Result) or not mat.type==tensor.matrix().type:
raise TypeError("Expected a matrix as input")
x = tensor.as_tensor(mat)
y = tensor.as_tensor(val)
x = tensor.as_ndarray_result(mat)
y = tensor.as_ndarray_result(val)
if x.type.dtype != y.type.dtype:
TypeError("the value to prepend don't have the same type as the matrix")
......
......@@ -534,7 +534,7 @@ class Canonizer(gof.LocalOptimizer):
ln, ld = len(num), len(denum)
if not ln and not ld:
return T.as_tensor(self.calculate([], []))
return T.as_ndarray_result(self.calculate([], []))
if not ln:
if self.use_reciprocal:
return self.reciprocal(self.merge_num_denum(denum, []))
......@@ -545,7 +545,7 @@ class Canonizer(gof.LocalOptimizer):
if isinstance(num[0], gof.Result):
return num[0]
else:
return T.as_tensor(num[0])
return T.as_ndarray_result(num[0])
else:
return self.main(*num)
return self.inverse(self.merge_num_denum(num, []),
......@@ -844,7 +844,7 @@ def local_mul_specialize(node):
if len(new_inputs) < len(node.inputs):
if len(new_inputs) == 0:
newval = -y.flatten()[0] if neg else y.flatten()[0]
return [T.TensorConstant(T.Tensor(dtype=node.outputs[0].type.dtype,
return [T.NDArrayConstant(T.NDArrayType(dtype=node.outputs[0].type.dtype,
broadcastable = [True] * node.outputs[0].ndim), N.asarray(newval))]
if len(new_inputs) == 1:
......
......@@ -131,7 +131,7 @@ class RandomStreams(Component):
:returns: The symbolic random draw part of op()'s return value. This function stores
the updated RandomStateType Result for use at `build` time.
:rtype: TensorResult
:rtype: NDArrayResult
"""
random_state_result = raw_random.random_state_type()
new_r, out = op(random_state_result, *args, **kwargs)
......
......@@ -87,7 +87,7 @@ class RandomFunction(gof.Op):
fn, outtype, args, kwargs = state
self.fn = getattr(numpy.random.RandomState, fn) if isinstance(fn, str) else fn
self.outtype = outtype
self.args = tuple(tensor.as_tensor(arg) for arg in args)
self.args = tuple(tensor.as_ndarray_result(arg) for arg in args)
self.inplace = kwargs.pop('inplace', False)
if self.inplace:
self.destroy_map = {0: [0]}
......@@ -103,7 +103,7 @@ class RandomFunction(gof.Op):
:param args: the values associated with these results will be passed to the RandomState
function during perform as extra "*args"-style arguments. These should be castable to
results of Type Tensor.
results of Type NDArrayType.
:rtype: Apply
......@@ -115,7 +115,7 @@ class RandomFunction(gof.Op):
if shape == () or shape == []:
shape = tensor.lvector()
else:
shape = tensor.as_tensor(shape, ndim=1)
shape = tensor.as_ndarray_result(shape, ndim=1)
#print 'SHAPE TYPE', shape.type, tensor.lvector
assert shape.type.ndim == 1
assert (shape.type.dtype == 'int64') or (shape.type.dtype == 'int32')
......@@ -127,9 +127,9 @@ class RandomFunction(gof.Op):
# shape.type
# assert shape.type == tensor.lvector
# convert args to Tensor instances
# convert args to NDArrayType instances
# and append enough None's to match the length of self.args
args = map(tensor.as_tensor, args)
args = map(tensor.as_ndarray_result, args)
if len(args) > len(self.args):
raise TypeError('Too many args for this kind of random generator')
args += (None,) * (len(self.args) - len(args))
......@@ -202,14 +202,14 @@ def random_function(fn, dtype, *rfargs, **rfkwargs):
else:
r, shape, args = ndim, args[0], args[1:]
if shape == () or shape == []:
shape = tensor.TensorConstant(type = tensor.lvector, data = shape)
shape = tensor.NDArrayConstant(type = tensor.lvector, data = shape)
else:
shape = tensor.as_tensor(shape)
shape = tensor.as_ndarray_result(shape)
ndim = tensor.get_vector_length(shape)
if ndim is None:
raise ValueError('Cannot infer the number of dimensions from the shape argument.')
# note: rf could be cached for future use
rf = RandomFunction(fn, tensor.Tensor(dtype = dtype, broadcastable = (False,)*ndim), *rfargs, **rfkwargs)
rf = RandomFunction(fn, tensor.NDArrayType(dtype = dtype, broadcastable = (False,)*ndim), *rfargs, **rfkwargs)
return rf(r, shape, *args, **kwargs)
return f
......
......@@ -595,7 +595,7 @@ class T_Shape(unittest.TestCase):
class T_Cast(unittest.TestCase):
def test_basic(self):
for type1 in ['int8', 'int16', 'int32', 'int64', 'float32', 'float64']:
x = Tensor(dtype = type1, broadcastable = (False, )).make_result()
x = NDArrayType(dtype = type1, broadcastable = (False, )).make_result()
for type2, converter in zip(['int8', 'int16', 'int32', 'int64', 'float32', 'float64'],
[convert_to_int8, convert_to_int16, convert_to_int32, convert_to_int64,
convert_to_float32, convert_to_float64]):
......@@ -611,51 +611,51 @@ class T_max_and_argmax(unittest.TestCase):
MaxAndArgmax.debug = 0
def test0(self):
n = as_tensor(5.0)
n = as_ndarray_result(5.0)
v,i = eval_outputs(max_and_argmax(n))
self.failUnless(v == 5.0)
self.failUnless(i == 0)
def test1(self):
n = as_tensor([1,2,3,2,-6])
n = as_ndarray_result([1,2,3,2,-6])
v,i = eval_outputs(max_and_argmax(n))
self.failUnless(v == 3)
self.failUnless(i == 2)
def test2(self):
data = numpy.random.rand(2,3)
n = as_tensor(data)
n = as_ndarray_result(data)
v,i = eval_outputs(max_and_argmax(n))
self.failUnless(numpy.all(v == numpy.max(data,-1)))
self.failUnless(numpy.all(i == numpy.argmax(data,-1)))
def test2b(self):
data = numpy.random.rand(2,3)
n = as_tensor(data)
n = as_ndarray_result(data)
v,i = eval_outputs(max_and_argmax(n,0))
self.failUnless(numpy.all(v == numpy.max(data,0)))
self.failUnless(numpy.all(i == numpy.argmax(data,0)))
def test2_invalid(self):
n = as_tensor(numpy.random.rand(2,3))
n = as_ndarray_result(numpy.random.rand(2,3))
try:
eval_outputs(max_and_argmax(n,3))
except ValueError, e:
return
self.fail()
def test2_invalid_neg(self):
n = as_tensor(numpy.random.rand(2,3))
n = as_ndarray_result(numpy.random.rand(2,3))
try:
eval_outputs(max_and_argmax(n,-3))
except ValueError, e:
return
self.fail()
def test2_valid_neg(self):
n = as_tensor(numpy.random.rand(2,3))
n = as_ndarray_result(numpy.random.rand(2,3))
v,i = eval_outputs(max_and_argmax(n,-1))
self.failUnless(v.shape == (2,))
v,i = eval_outputs(max_and_argmax(n,-2))
self.failUnless(v.shape == (3,))
def test3(self):
n = as_tensor(numpy.random.rand(2,3,4))
n = as_ndarray_result(numpy.random.rand(2,3,4))
v,i = eval_outputs(max_and_argmax(n,0))
self.failUnless(v.shape == (3,4))
self.failUnless(i.shape == (3,4))
......@@ -674,7 +674,7 @@ class T_subtensor(unittest.TestCase):
def test0_err_invalid(self):
#it is impossible to retrieve a view of a 0-d tensor
n = as_tensor(numpy.ones(()))
n = as_ndarray_result(numpy.ones(()))
try:
t = n[0]
except ValueError, e:
......@@ -683,7 +683,7 @@ class T_subtensor(unittest.TestCase):
self.fail()
def test1_err_bounds(self):
n = as_tensor(numpy.ones(3))
n = as_ndarray_result(numpy.ones(3))
t = n[7]
self.failUnless(isinstance(t.owner.op, Subtensor))
try:
......@@ -694,7 +694,7 @@ class T_subtensor(unittest.TestCase):
return
self.fail()
def test1_err_subslice(self):
n = as_tensor(numpy.ones(3))
n = as_ndarray_result(numpy.ones(3))
try:
t = n[slice(0,slice(1,2,None),None)]
except Exception, e:
......@@ -704,21 +704,21 @@ class T_subtensor(unittest.TestCase):
self.fail()
def test1_ok_range_finite(self):
n = as_tensor(numpy.ones(3)*5)
n = as_ndarray_result(numpy.ones(3)*5)
t = n[0:2]
self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t])
self.failUnless(tval.shape == (2,))
self.failUnless(tval[1] == 5.0)
def test2_ok_range_finite(self):
n = as_tensor(numpy.ones((3,4))*5)
n = as_ndarray_result(numpy.ones((3,4))*5)
t = n[0:2,3]
self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t])
self.failUnless(tval.shape == (2,))
self.failUnless(tval[1] == 5.0)
def test1_err_invalid(self):
n = as_tensor(numpy.ones(1))
n = as_ndarray_result(numpy.ones(1))
try:
t = n[0,0]
except ValueError, e:
......@@ -726,7 +726,7 @@ class T_subtensor(unittest.TestCase):
return
self.fail()
def test1_ok_elem(self):
n = as_tensor(numpy.ones(1)*5)
n = as_ndarray_result(numpy.ones(1)*5)
t = n[0]
self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t])
......@@ -734,14 +734,14 @@ class T_subtensor(unittest.TestCase):
self.failUnless(tval == 5.0)
def test1_ok_range_infinite(self):
#Subtensor.debug = True
n = as_tensor(numpy.ones(3)*5)
n = as_ndarray_result(numpy.ones(3)*5)
t = n[1:]
self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t])
self.failUnless(tval.shape == (2,))
self.failUnless(tval[1] == 5.0)
def test1_ok_strided(self):
n = as_tensor(numpy.ones(5)*5)
n = as_ndarray_result(numpy.ones(5)*5)
t = n[1::2]
self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t])
......@@ -753,7 +753,7 @@ class T_subtensor(unittest.TestCase):
self.failUnless(tval[1] == 5.0)
def test2_err_bounds0(self):
n = as_tensor(numpy.ones((2,3))*5)
n = as_ndarray_result(numpy.ones((2,3))*5)
t = n[0,4]
self.failUnless(isinstance(t.owner.op, Subtensor))
try:
......@@ -762,7 +762,7 @@ class T_subtensor(unittest.TestCase):
return
self.fail()
def test2_err_bounds1(self):
n = as_tensor(numpy.ones((2,3))*5)
n = as_ndarray_result(numpy.ones((2,3))*5)
t = n[4:5,2]
self.failUnless(isinstance(t.owner.op, Subtensor))
try:
......@@ -771,14 +771,14 @@ class T_subtensor(unittest.TestCase):
if e[0] != 'index out of bounds':
raise
def test2_ok_elem(self):
n = as_tensor(numpy.asarray(range(6)).reshape((2,3)))
n = as_ndarray_result(numpy.asarray(range(6)).reshape((2,3)))
t = n[0,2]
self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t])
self.failUnless(tval.shape == ())
self.failUnless(numpy.all(tval == 2))
def test2_ok_row(self):
n = as_tensor(numpy.asarray(range(6)).reshape((2,3)))
n = as_ndarray_result(numpy.asarray(range(6)).reshape((2,3)))
t = n[1]
self.failIf(any(n.type.broadcastable))
self.failUnless(isinstance(t.owner.op, Subtensor))
......@@ -787,7 +787,7 @@ class T_subtensor(unittest.TestCase):
self.failUnless(numpy.all(tval == [3,4,5]))
def test2_ok_col(self):
n = as_tensor(numpy.ones((2,3))*5)
n = as_ndarray_result(numpy.ones((2,3))*5)
t = n[:,0]
self.failUnless(isinstance(t.owner.op, Subtensor))
self.failIf(any(n.type.broadcastable))
......@@ -796,7 +796,7 @@ class T_subtensor(unittest.TestCase):
self.failUnless(numpy.all(tval == 5.0))
def test2_ok_rows_finite(self):
n = as_tensor(numpy.ones((4,3))*5)
n = as_ndarray_result(numpy.ones((4,3))*5)
t = n[1:3,0]
self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t])
......@@ -804,7 +804,7 @@ class T_subtensor(unittest.TestCase):
self.failUnless(numpy.all(tval == 5.0))
def test2_ok_cols_infinite(self):
n = as_tensor(numpy.asarray(range(12)).reshape((4,3)))
n = as_ndarray_result(numpy.asarray(range(12)).reshape((4,3)))
t = n[1,2:]
self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t])
......@@ -812,7 +812,7 @@ class T_subtensor(unittest.TestCase):
self.failUnless(numpy.all(tval == 5))
def test2_ok_strided(self):
n = as_tensor(numpy.asarray(range(20)).reshape((4,5)))
n = as_ndarray_result(numpy.asarray(range(20)).reshape((4,5)))
t = n[1:4:2,1:5:2]
self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t])
......@@ -820,7 +820,7 @@ class T_subtensor(unittest.TestCase):
self.failUnless(numpy.all(tval == [[6, 8],[16, 18]]))
def test3_ok_mat(self):
n = as_tensor(numpy.asarray(range(24)).reshape((2,3,4)))
n = as_ndarray_result(numpy.asarray(range(24)).reshape((2,3,4)))
t = n[0,0,0]
self.failUnless(isinstance(t.owner.op, Subtensor))
tval = eval_outputs([t])
......@@ -830,7 +830,7 @@ class T_subtensor(unittest.TestCase):
def test_grad_1d(self):
subi = 0
data = numpy.random.rand(2,3)
n = as_tensor(data)
n = as_ndarray_result(data)
z = scal.constant(subi)
t = n[z:,z]
gn = grad(sum(exp(t)), n)
......@@ -841,7 +841,7 @@ class T_subtensor(unittest.TestCase):
def test_grad_0d(self):
data = numpy.random.rand(2,3)
n = as_tensor(data)
n = as_ndarray_result(data)
t = n[1,0]
gn = grad(sum(exp(t)), n)
gval = eval_outputs([gn])
......@@ -857,7 +857,7 @@ class T_Join_and_Split(unittest.TestCase):
class Join1(Op):
def make_node(self, *inputs):
inputs = [as_tensor(t) for t in inputs]
inputs = [as_ndarray_result(t) for t in inputs]
outputs = [lscalar()] + [i.type() for i in inputs]
return Apply(self, inputs, outputs)
def perform(self, node, inputs, outputs):
......@@ -871,8 +871,8 @@ class T_Join_and_Split(unittest.TestCase):
Join.debug = False
def test_join_scalar(self):
a = as_tensor(1)
b = as_tensor(2)
a = as_ndarray_result(1)
b = as_ndarray_result(2)
try:
s = join(0, a, b)
except:
......@@ -880,18 +880,18 @@ class T_Join_and_Split(unittest.TestCase):
self.fail()
def test_stack_mixed_type_constants(self):
a = as_tensor(1)
b = as_tensor(2.0)
c = as_tensor(3.0)
a = as_ndarray_result(1)
b = as_ndarray_result(2.0)
c = as_ndarray_result(3.0)
s = stack(a, b, c)
want = numpy.array([1, 2, 3])
self.failUnless((eval_outputs([s]) == want).all())
def test_stack_scalar(self):
a = as_tensor(1)
b = as_tensor(2)
c = as_tensor(3)
a = as_ndarray_result(1)
b = as_ndarray_result(2)
c = as_ndarray_result(3)
s = stack(a, b, c)
want = numpy.array([1, 2, 3])
......@@ -899,24 +899,24 @@ class T_Join_and_Split(unittest.TestCase):
def test_join_vector(self):
a = as_tensor(numpy.array([1, 2, 3]))
b = as_tensor(numpy.array([7, 8, 9]))
a = as_ndarray_result(numpy.array([1, 2, 3]))
b = as_ndarray_result(numpy.array([7, 8, 9]))
s = join(0, a, b)
want = numpy.array([1, 2, 3, 7, 8, 9])
self.failUnless((eval_outputs([s]) == want).all())
def test_stack_vector(self):
a = as_tensor(numpy.array([1, 2, 3]))
b = as_tensor(numpy.array([7, 8, 9]))
a = as_ndarray_result(numpy.array([1, 2, 3]))
b = as_ndarray_result(numpy.array([7, 8, 9]))
s = stack(a, b)
want = numpy.array([[1, 2, 3],[ 7, 8, 9]])
self.failUnless((eval_outputs([s]) == want).all())
def test_join_matrix0(self):
a = as_tensor(numpy.array([[1, 2, 3], [4, 5, 6]]))
b = as_tensor(numpy.array([[7, 8, 9]]))
a = as_ndarray_result(numpy.array([[1, 2, 3], [4, 5, 6]]))
b = as_ndarray_result(numpy.array([[7, 8, 9]]))
s = join(0, a, b)
want = numpy.array([[1, 2, 3],[4,5,6],[7, 8, 9]])
......@@ -925,8 +925,8 @@ class T_Join_and_Split(unittest.TestCase):
def test_join_matrix1(self):
av=numpy.array([[1, 2, 3], [4, 5, 6]], dtype='float32')
bv= numpy.array([[7], [8]],dtype='float32')
a = as_tensor(av)
b = as_tensor(bv)
a = as_ndarray_result(av)
b = as_ndarray_result(bv)
s = join(1, a, b)
want = numpy.array([[1, 2, 3, 7], [4, 5, 6, 8]], dtype='float32')
self.failUnless((eval_outputs([s]) == want).all())
......@@ -934,9 +934,9 @@ class T_Join_and_Split(unittest.TestCase):
verify_grad(self, lambda a, b: join(1,a,b), [av, bv], eps=1.0e-4, tol=1.0e-3)
def test_join_matrix1_using_vertical_stack(self):
a = as_tensor(numpy.array([[1, 2, 3], [4, 5, 6]]))
b = as_tensor(numpy.array([[7, 8, 9]]))
c = as_tensor(numpy.array([[9, 8, 7]]))
a = as_ndarray_result(numpy.array([[1, 2, 3], [4, 5, 6]]))
b = as_ndarray_result(numpy.array([[7, 8, 9]]))
c = as_ndarray_result(numpy.array([[9, 8, 7]]))
s = vertical_stack(a, b, c)
want = numpy.array([[1, 2, 3],[4,5,6],[7, 8, 9], [9, 8, 7]])
......@@ -946,9 +946,9 @@ class T_Join_and_Split(unittest.TestCase):
av=numpy.array([[1, 2, 3], [4, 5, 6]], dtype='float32')
bv=numpy.array([[7], [8]],dtype='float32')
cv=numpy.array([[3, 2, 1], [6, 5, 4]], dtype='float32')
a = as_tensor(av)
b = as_tensor(bv)
c = as_tensor(cv)
a = as_ndarray_result(av)
b = as_ndarray_result(bv)
c = as_ndarray_result(cv)
s = horizontal_stack(a, b, c)
want = numpy.array([[1, 2, 3, 7, 3, 2, 1], [4, 5, 6, 8, 6, 5, 4]], dtype='float32')
self.failUnless((eval_outputs([s]) == want).all())
......@@ -958,8 +958,8 @@ class T_Join_and_Split(unittest.TestCase):
def test_join_matrixV(self):
"""variable join axis"""
v = numpy.array([[1., 2., 3.], [4., 5., 6.]])
a = as_tensor(v.copy())
b = as_tensor(v.copy())
a = as_ndarray_result(v.copy())
b = as_ndarray_result(v.copy())
ax = lscalar()
s = join(ax, a, b)
......@@ -1108,12 +1108,12 @@ class T_exp(unittest.TestCase):
# class T_abs(unittest.TestCase):
# def test_impl(self):
# t = as_tensor(1.0)
# t = as_ndarray_result(1.0)
# check_eq(self, t, abs(t), 1.0, 1.0)
# check_eq(self, t, abs(t), -1.0, 1.0)
# for shape in (2,), (3,4):
# t = as_tensor(numpy.ones(shape))
# t = as_ndarray_result(numpy.ones(shape))
# d = numpy.random.rand(*shape)*2-1.0
# check_eq(self, t, abs(t), d, abs(d))
# check_eq(self, t, abs(t), -d, abs(-d))
......@@ -1148,7 +1148,7 @@ class T_exp(unittest.TestCase):
# self.failUnless(numpy.all(eval_outputs([t]) == [9,9,9]))
# def test1(self):
# x = as_tensor(numpy.ones((4,5)))
# x = as_ndarray_result(numpy.ones((4,5)))
# l = ones_like(x[:,0:1])
# r = ones_like(x[0:1,:])
# xx = x + dot(l,r)
......@@ -1156,11 +1156,11 @@ class T_exp(unittest.TestCase):
# class T_sum(unittest.TestCase):
# def test_impl(self):
# t = as_tensor(0.0)
# t = as_ndarray_result(0.0)
# check_eq(self, t, Sum(t).out, 1.0, 1.0)
# check_eq(self, t, Sum(t).out, -1.0, -1.0)
# t = as_tensor([0.0, 0.0])
# t = as_ndarray_result([0.0, 0.0])
# d = numpy.asarray([-0.4, 1.2])
# check_eq(self, t, Sum(t).out, d, numpy.sum(d))
# check_eq(self, t, Sum(t).out, -d, -numpy.sum(d))
......@@ -1170,13 +1170,13 @@ class T_exp(unittest.TestCase):
# unittest_tools.seed_rng()
# def test_elemwise(self):
# a = as_tensor(0.0)
# b = as_tensor(0.0)
# a = as_ndarray_result(0.0)
# b = as_ndarray_result(0.0)
# check_eq2_both(self, [a,b], mul(a,b), [3.0, 4.0], 12.0)
# check_eq2_both(self, [a,b], mul(b,a), [-1.0,2.0], -2.0)
# a = as_tensor(numpy.ones(2))
# b = as_tensor(numpy.ones(2))
# a = as_ndarray_result(numpy.ones(2))
# b = as_ndarray_result(numpy.ones(2))
# aa = numpy.asarray([-0.5, 4.0])
# bb = numpy.asarray([-0.5, 2.0])
# check_eq2_both(self, [a,b], mul(a,b), [aa,bb], numpy.asarray([0.25, 8.0]))
......@@ -1184,8 +1184,8 @@ class T_exp(unittest.TestCase):
# def test_scalar(self):
# r = numpy.random.rand(2,3)
# a = as_tensor(r)
# b = as_tensor(2.0)
# a = as_ndarray_result(r)
# b = as_ndarray_result(2.0)
# check_eq2_both(self, [a,b], mul(a,b), [r, 2.0], r*2.0)
# check_eq2_both(self, [a,b], mul(a,b), [r, 4.0], r*4.0)
# self.failUnless(b.data == 2.0)
......@@ -1194,7 +1194,7 @@ class T_exp(unittest.TestCase):
# r1 = numpy.random.rand(3,5)
# r2 = numpy.random.rand(1,5)
# r3 = numpy.random.rand(3,1)
# a1, a2, a3 = as_tensor(r1), as_tensor(r2), as_tensor(r3)
# a1, a2, a3 = as_ndarray_result(r1), as_ndarray_result(r2), as_ndarray_result(r3)
# check_eq2_both(self, [a1,a2], mul(a1,a2), [r1, r2], r1*r2)
# check_eq2_both(self, [a1,a3], mul(a1,a3), [r1, r3], r1*r3)
......@@ -1213,8 +1213,8 @@ class T_exp(unittest.TestCase):
# verify_grad(self, Mul, [numpy.random.rand(3, 5), numpy.random.rand(3, 1)])
# def test_wrong_shapes(self):
# a = as_tensor(numpy.ones(3))
# b = as_tensor(numpy.ones(4))
# a = as_ndarray_result(numpy.ones(3))
# b = as_ndarray_result(numpy.ones(4))
# try:
# check_eq2(self, [a,b], Mul(a,b).out,
# [numpy.ones(3), numpy.ones(4)], 1.0)
......@@ -1253,8 +1253,8 @@ class T_exp(unittest.TestCase):
# def test0(self):
# verify_grad(self, Log, [numpy.random.rand(3,1)+0.0001])
# def test1(self):
# a = as_tensor(numpy.ones(2))
# b = as_tensor(numpy.ones(2))
# a = as_ndarray_result(numpy.ones(2))
# b = as_ndarray_result(numpy.ones(2))
# aa = numpy.asarray([0.5, 4.0])
# bb = numpy.asarray([0.5, 2.0])
# check_eq2(self, [a], log(a), [aa], numpy.log(numpy.asarray(aa)))
......@@ -1283,12 +1283,12 @@ class test_matinv(unittest.TestCase):
# symbolic program
# broadcastable=[False,False] means that the shape of matrix is two dimensional,
# and none of the dimensions are constrained to have length 1.
# Note that Tensor's constructor does not actually allocate any memory.
# TODO: Make Tensor syntax more explicit, and maybe give shape or number of dimensions.
# Note that NDArrayType's constructor does not actually allocate any memory.
# TODO: Make NDArrayType syntax more explicit, and maybe give shape or number of dimensions.
a, b = matrices('ab')
ab = a*b
# Here, as_tensor actually uses the data allocated by numpy.
diff = ab - as_tensor(numpy.ones((dim,dim)))
# Here, as_ndarray_result actually uses the data allocated by numpy.
diff = ab - as_ndarray_result(numpy.ones((dim,dim)))
# Sum of squared errors
ssdiff = sum((diff**2.0))
......@@ -1339,7 +1339,7 @@ class t_dot(unittest.TestCase):
x = numpy.asarray(x)
return type(x), x.dtype, x.shape
nz = numpy.dot(x,y)
tz = eval_outputs([dot(as_tensor(x), as_tensor(y))])
tz = eval_outputs([dot(as_ndarray_result(x), as_ndarray_result(y))])
self.failUnless(tz.dtype == nz.dtype)
self.failUnless(tz.shape == nz.shape)
self.failUnless(_approx_eq(nz, tz))
......@@ -1406,7 +1406,7 @@ class T_tensorfromscalar(unittest.TestCase):
def test1(self):
s = scal.constant(56)
t = as_tensor(s)
t = as_ndarray_result(s)
self.failUnless(t.owner.op is tensor_from_scalar)
self.failUnless(t.type.broadcastable == (), t.type.broadcastable)
self.failUnless(t.type.ndim == 0, t.type.ndim)
......@@ -1420,13 +1420,13 @@ class T_tensorfromscalar(unittest.TestCase):
# def _tensor(data, broadcastable=None, name=None):
# """Return a Tensor containing given data"""
# """Return a NDArrayType containing given data"""
# data = numpy.asarray(data)
# if broadcastable is None:
# broadcastable = [s==1 for s in data.shape]
# elif broadcastable in [0, 1]:
# broadcastable = [broadcastable] * len(data.shape)
# rval = Tensor(data.dtype, broadcastable, name)
# rval = NDArrayType(data.dtype, broadcastable, name)
# rval.data = data # will raise if broadcastable was mis-specified
# return rval
......@@ -1437,7 +1437,7 @@ class T_tensorfromscalar(unittest.TestCase):
# unittest_tools.seed_rng()
# def test0(self): # allocate from a scalar float
# t = _tensor(1.0)
# self.failUnless(isinstance(t, Tensor))
# self.failUnless(isinstance(t, NDArrayType))
# self.failUnless(t.dtype == 'float64')
# self.failUnless(t.broadcastable == ())
# self.failUnless(t.role == None)
......@@ -1446,25 +1446,25 @@ class T_tensorfromscalar(unittest.TestCase):
# self.failUnless(t.data == 1.0)
# def test0_int(self): # allocate from a scalar float
# t = _tensor(1)
# self.failUnless(isinstance(t, Tensor))
# self.failUnless(isinstance(t, NDArrayType))
# self.failUnless(t.dtype == 'int64' or t.dtype == 'int32')
# def test1(self): # allocate from a vector of ints, not broadcastable
# t = _tensor(numpy.ones(5,dtype='int32'))
# self.failUnless(isinstance(t, Tensor))
# self.failUnless(isinstance(t, NDArrayType))
# self.failUnless(t.dtype == 'int32')
# self.failUnless(t.broadcastable == (0,))
# self.failUnless(isinstance(t.data, numpy.ndarray))
# self.failUnless(str(t.data.dtype) == 'int32')
# def test2(self): # allocate from a column matrix of complex with name
# t = _tensor(numpy.ones((5,1),dtype='complex64'),name='bart')
# self.failUnless(isinstance(t, Tensor))
# self.failUnless(isinstance(t, NDArrayType))
# self.failUnless(t.dtype == 'complex64')
# self.failUnless(t.broadcastable == (0,1))
# self.failUnless(isinstance(t.data, numpy.ndarray))
# self.failUnless(t.name == 'bart')
# def test2b(self): # allocate from a column matrix, not broadcastable
# t = _tensor(numpy.ones((5,1),dtype='complex64'),broadcastable=0)
# self.failUnless(isinstance(t, Tensor))
# self.failUnless(isinstance(t, NDArrayType))
# self.failUnless(t.dtype == 'complex64')
# self.failUnless(t.broadcastable == (0,0))
# self.failUnless(isinstance(t.data, numpy.ndarray))
......@@ -1484,39 +1484,39 @@ class T_tensorfromscalar(unittest.TestCase):
# t.data = numpy.ones((2,7,1))
# self.fail()
# except ValueError, e:
# self.failUnless(e[0] is Tensor.filter.E_rank)
# self.failUnless(e[0] is NDArrayType.filter.E_rank)
# try:
# t.data = numpy.ones(1)
# self.fail()
# except ValueError, e:
# self.failUnless(e[0] is Tensor.filter.E_rank)
# self.failUnless(e[0] is NDArrayType.filter.E_rank)
# def test_data_badrank1(self):
# t = _tensor(numpy.ones((1,1),dtype='complex64'), broadcastable=1)
# try:
# t.data = numpy.ones((1,1,1))
# self.fail()
# except ValueError, e:
# self.failUnless(e[0] is Tensor.filter.E_rank)
# self.failUnless(e[0] is NDArrayType.filter.E_rank)
# try:
# t.data = numpy.ones(1)
# self.fail()
# except ValueError, e:
# self.failUnless(e[0] is Tensor.filter.E_rank)
# self.failUnless(e[0] is NDArrayType.filter.E_rank)
# def test_data_badshape0(self):
# t = _tensor(numpy.ones((1,1),dtype='complex64'), broadcastable=1)
# try:
# t.data = numpy.ones((1,2))
# self.fail()
# except ValueError, e:
# self.failUnless(e[0] is Tensor.filter.E_shape)
# self.failUnless(e[0] is NDArrayType.filter.E_shape)
# try:
# t.data = numpy.ones((0,1))
# self.fail()
# except ValueError, e:
# self.failUnless(e[0] is Tensor.filter.E_shape)
# self.failUnless(e[0] is NDArrayType.filter.E_shape)
# def test_cast0(self):
# t = Tensor('float32', [0])
# t = NDArrayType('float32', [0])
# t.data = numpy.random.rand(4) > 0.5
# self.failUnless(str(t.data.dtype) == t.dtype)
......@@ -1585,7 +1585,7 @@ class test_grad(unittest.TestCase):
o = test_grad.O()
a1 = o.make_node()
g = grad(a1.outputs[0], a1.outputs[1])
self.failUnless(isinstance(g, TensorConstant))
self.failUnless(isinstance(g, NDArrayConstant))
self.failUnless(g.data == 0)
try:
grad(a1.outputs[0], 'wtf')
......@@ -1600,7 +1600,7 @@ class test_grad(unittest.TestCase):
g0,g1,g2 = grad(a1.outputs[0], a1.inputs + [scalar('z')])
self.failUnless(o.gval0 is g0)
self.failUnless(o.gval1 is g1)
self.failUnless(isinstance(g2, TensorConstant))
self.failUnless(isinstance(g2, NDArrayConstant))
self.failUnless(g2.data == 0)
class T_op_cache(unittest.TestCase):
......@@ -1703,7 +1703,7 @@ def test_flatten_outdim2():
tensor.verify_grad(None, Flatten(2), [a_val])
def test_flatten_outdim2_of_3():
a = Tensor('float64', (False, False, False))()
a = NDArrayType('float64', (False, False, False))()
c = flatten(a, 2)
f = inplace_func([a], c)
a_val = numpy.asarray([[[0,1],[2,3]], [[4,5],[6,7]]], dtype='float64')
......@@ -1774,7 +1774,7 @@ class test_tensordot(unittest.TestCase):
tensor.verify_grad(None, TensorDot(axes), [aval,bval])
# test ndarray-matrix, sum over one dim of matrix
atens = Tensor('float64', broadcastable=(False,)*4)()
atens = NDArrayType('float64', broadcastable=(False,)*4)()
axes = ((2,),(1,))
c = tensordot(axes)(atens, bmat)
f4 = inplace_func([atens,bmat],c)
......@@ -1785,8 +1785,8 @@ class test_tensordot(unittest.TestCase):
tensor.verify_grad(None, TensorDot(axes), [aval,bval])
# test ndarray-ndarray
atens = Tensor('float64', broadcastable=(False,)*4)()
btens = Tensor('float64', broadcastable=(False,)*3)()
atens = NDArrayType('float64', broadcastable=(False,)*4)()
btens = NDArrayType('float64', broadcastable=(False,)*3)()
axes = ((1,3),(0,2))
c = tensordot(axes)(atens, btens)
f5 = inplace_func([atens,btens],c)
......
......@@ -12,7 +12,7 @@ _as_scalar = GemmLocalOptimizer._as_scalar
_is_real_matrix = GemmLocalOptimizer._is_real_matrix
from theano import In, Out
from .test_basic import (_approx_eq, as_tensor, inplace_func,
from .test_basic import (_approx_eq, as_ndarray_result, inplace_func,
compile, value, constant, inplace, eval_outputs)
class t_gemm(TestCase):
......@@ -35,7 +35,7 @@ class t_gemm(TestCase):
def cmp_linker(z, a, x, y, b, l):
z,a,x,y,b = [numpy.asarray(p) for p in z,a,x,y,b]
z_orig = z.copy()
tz,ta,tx,ty,tb = [as_tensor(p).type() for p in z,a,x,y,b]
tz,ta,tx,ty,tb = [as_ndarray_result(p).type() for p in z,a,x,y,b]
f = inplace_func([tz,ta,tx,ty,tb], gemm(tz,ta,tx,ty,tb), mode=compile.Mode(optimizer = None, linker = l))
new_z = f(z,a,x,y,b)
......@@ -100,7 +100,7 @@ class t_gemm(TestCase):
def test_destroy_map0(self):
"""test that only first input can be overwritten"""
Z = as_tensor(self.rand(2,2))
Z = as_ndarray_result(self.rand(2,2))
try:
gemm(Z, 1.0, Z, Z, 1.0)
except ValueError, e:
......@@ -109,8 +109,8 @@ class t_gemm(TestCase):
self.fail()
def test_destroy_map1(self):
"""test that only first input can be overwritten"""
Z = as_tensor(self.rand(2,2))
A = as_tensor(self.rand(2,2))
Z = as_ndarray_result(self.rand(2,2))
A = as_ndarray_result(self.rand(2,2))
try:
gemm(Z, 1.0, A, inplace.transpose_inplace(Z), 1.0)
except ValueError, e:
......@@ -119,8 +119,8 @@ class t_gemm(TestCase):
self.fail()
def test_destroy_map2(self):
"""test that only first input can be overwritten"""
Z = as_tensor(self.rand(2,2))
A = as_tensor(self.rand(2,2))
Z = as_ndarray_result(self.rand(2,2))
A = as_ndarray_result(self.rand(2,2))
try:
gemm(Z, 1.0, inplace.transpose_inplace(Z), A, 1.0)
except ValueError, e:
......@@ -129,8 +129,8 @@ class t_gemm(TestCase):
self.fail()
def test_destroy_map3(self):
"""test that only first input can be overwritten"""
Z = as_tensor(self.rand(2,2))
A = as_tensor(self.rand(2,2))
Z = as_ndarray_result(self.rand(2,2))
A = as_ndarray_result(self.rand(2,2))
try:
gemm(Z, 1.0, Z, A, 1.0)
except ValueError, e:
......
......@@ -27,7 +27,7 @@ class test_DimShuffle(unittest.TestCase):
((1, 4, 3, 2, 1), (3, 2, 1), (2, 3, 4)),
((1, 1, 4), (1, 2), (1, 4))]:
ib = [(entry == 1) for entry in xsh]
x = Tensor('float64', ib)('x')
x = NDArrayType('float64', ib)('x')
e = DimShuffle(ib, shuffle)(x)
f = copy(linker).accept(Env([x], [e])).make_function()
assert f(numpy.ones(xsh)).shape == zsh
......@@ -50,8 +50,8 @@ class test_Broadcast(unittest.TestCase):
((2, 3, 4, 5), (1, 3, 1, 5)),
((2, 3, 4, 5), (1, 1, 1, 1)),
((), ())]:
x = Tensor('float64', [(entry == 1) for entry in xsh])('x')
y = Tensor('float64', [(entry == 1) for entry in ysh])('y')
x = NDArrayType('float64', [(entry == 1) for entry in xsh])('x')
y = NDArrayType('float64', [(entry == 1) for entry in ysh])('y')
e = Elemwise(add)(x, y)
f = copy(linker).accept(Env([x, y], [e])).make_function()
xv = numpy.asarray(numpy.random.rand(*xsh))
......@@ -69,8 +69,8 @@ class test_Broadcast(unittest.TestCase):
((2, 3, 4, 5), (1, 3, 1, 5)),
((2, 3, 4, 5), (1, 1, 1, 1)),
((), ())]:
x = Tensor('float64', [(entry == 1) for entry in xsh])('x')
y = Tensor('float64', [(entry == 1) for entry in ysh])('y')
x = NDArrayType('float64', [(entry == 1) for entry in xsh])('x')
y = NDArrayType('float64', [(entry == 1) for entry in ysh])('y')
e = Elemwise(Add(transfer_type(0)), {0:0})(x, y)
f = copy(linker).accept(Env([x, y], [e])).make_function()
xv = numpy.asarray(numpy.random.rand(*xsh))
......@@ -94,8 +94,8 @@ class test_Broadcast(unittest.TestCase):
self.with_linker_inplace(gof.CLinker())
def test_fill(self):
x = Tensor('float64', [0, 0])('x')
y = Tensor('float64', [1, 1])('y')
x = NDArrayType('float64', [0, 0])('x')
y = NDArrayType('float64', [1, 1])('y')
e = Elemwise(Second(transfer_type(0)), {0:0})(x, y)
f = gof.CLinker().accept(Env([x, y], [e])).make_function()
xv = numpy.ones((5, 5))
......@@ -104,8 +104,8 @@ class test_Broadcast(unittest.TestCase):
assert (xv == yv).all()
def test_weird_strides(self):
x = Tensor('float64', [0, 0, 0, 0, 0])('x')
y = Tensor('float64', [0, 0, 0, 0, 0])('y')
x = NDArrayType('float64', [0, 0, 0, 0, 0])('x')
y = NDArrayType('float64', [0, 0, 0, 0, 0])('y')
e = Elemwise(add)(x, y)
f = gof.CLinker().accept(Env([x, y], [e])).make_function()
xv = numpy.random.rand(2, 2, 2, 2, 2)
......@@ -114,7 +114,7 @@ class test_Broadcast(unittest.TestCase):
assert (f(xv, yv) == zv).all()
def test_same_inputs(self):
x = Tensor('float64', [0, 0])('x')
x = NDArrayType('float64', [0, 0])('x')
e = Elemwise(add)(x, x)
f = gof.CLinker().accept(Env([x], [e])).make_function()
xv = numpy.random.rand(2, 2)
......@@ -134,7 +134,7 @@ class test_CAReduce(unittest.TestCase):
((5, 6), ()),
((2, 3, 4, 5), (0, 1, 3)),
((), ())]:
x = Tensor('float64', [(entry == 1) for entry in xsh])('x')
x = NDArrayType('float64', [(entry == 1) for entry in xsh])('x')
e = CAReduce(add, axis = tosum)(x)
if tosum is None: tosum = range(len(xsh))
f = copy(linker).accept(Env([x], [e])).make_function()
......
......@@ -63,7 +63,7 @@ def test_merge_with_weird_eq():
assert node.inputs[0] is node.inputs[1]
#NONSCALAR CASE
# This was created to test TensorConstantSignature
# This was created to test NDArrayConstantSignature
x = T.constant(numpy.ones(5), name='x')
y = T.constant(numpy.ones(5), name='y')
g = Env([x, y], [x+y])
......
......@@ -6,7 +6,7 @@ import unittest
from theano import gof
from theano.tensor.opt import *
from theano import tensor
from theano.tensor import Tensor
from theano.tensor import NDArrayType
from theano.gof import Env
from theano.tensor.elemwise import DimShuffle
from theano import pprint
......@@ -18,9 +18,9 @@ from theano import function
def inputs(xbc = (0, 0), ybc = (0, 0), zbc = (0, 0)):
x = Tensor(broadcastable = xbc, dtype = 'float64')('x')
y = Tensor(broadcastable = ybc, dtype = 'float64')('y')
z = Tensor(broadcastable = zbc, dtype = 'float64')('z')
x = NDArrayType(broadcastable = xbc, dtype = 'float64')('x')
y = NDArrayType(broadcastable = ybc, dtype = 'float64')('y')
z = NDArrayType(broadcastable = zbc, dtype = 'float64')('z')
return x, y, z
......
......@@ -3,7 +3,7 @@ from theano.tensor.xlogx import xlogx
import unittest
import theano
from theano.tensor import as_tensor
from theano.tensor import as_ndarray_result
import test_basic as TT
import random
......@@ -15,7 +15,7 @@ class T_XlogX(unittest.TestCase):
unittest_tools.seed_rng()
def test0(self):
x = as_tensor([1, 0])
x = as_ndarray_result([1, 0])
y = xlogx(x)
f = theano.function([], [y])
self.failUnless(numpy.all(f() == numpy.asarray([0, 0.])))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论