提交 ae9825c5 authored 作者: Razvan Pascanu's avatar Razvan Pascanu

Removed trailing spaces

上级 af0fbb52
...@@ -8,11 +8,11 @@ class SymbolicInput(object): ...@@ -8,11 +8,11 @@ class SymbolicInput(object):
""" """
Represents a symbolic input for use with function or FunctionMaker. Represents a symbolic input for use with function or FunctionMaker.
variable: a Variable instance. variable: a Variable instance.
This will be assigned a value before running the function, This will be assigned a value before running the function,
not computed from its owner. not computed from its owner.
name: Any type. (If autoname=True, defaults to variable.name). name: Any type. (If autoname=True, defaults to variable.name).
If name is a valid Python identifier, this input can be set by kwarg, and its value If name is a valid Python identifier, this input can be set by kwarg, and its value
can be accessed by self.<name>. can be accessed by self.<name>.
...@@ -41,9 +41,9 @@ class SymbolicInput(object): ...@@ -41,9 +41,9 @@ class SymbolicInput(object):
assert implicit is not None # Safety check. assert implicit is not None # Safety check.
self.variable = variable self.variable = variable
if (autoname and name is None): if (autoname and name is None):
self.name = variable.name self.name = variable.name
else: else:
self.name = name self.name = name
#backport #backport
#self.name = variable.name if (autoname and name is None) else name #self.name = variable.name if (autoname and name is None) else name
...@@ -131,11 +131,11 @@ class In(SymbolicInput): ...@@ -131,11 +131,11 @@ class In(SymbolicInput):
""" """
Represents a symbolic input for use with function or FunctionMaker. Represents a symbolic input for use with function or FunctionMaker.
variable: a Variable instance. variable: a Variable instance.
This will be assigned a value before running the function, This will be assigned a value before running the function,
not computed from its owner. not computed from its owner.
name: Any type. (If autoname=True, defaults to variable.name). name: Any type. (If autoname=True, defaults to variable.name).
If name is a valid Python identifier, this input can be set by kwarg, and its value If name is a valid Python identifier, this input can be set by kwarg, and its value
can be accessed by self.<name>. can be accessed by self.<name>.
...@@ -194,7 +194,7 @@ class SymbolicOutput(object): ...@@ -194,7 +194,7 @@ class SymbolicOutput(object):
returned for this output might be clobbered by running returned for this output might be clobbered by running
the function again, but the function might be faster. the function again, but the function might be faster.
""" """
def __init__(self, variable, borrow=False): def __init__(self, variable, borrow=False):
self.variable = variable self.variable = variable
self.borrow = borrow self.borrow = borrow
......
...@@ -99,6 +99,7 @@ def as_sparse_variable(x, name=None): ...@@ -99,6 +99,7 @@ def as_sparse_variable(x, name=None):
except TypeError: except TypeError:
raise TypeError("Cannot convert %s to SparseType" % x, type(x)) raise TypeError("Cannot convert %s to SparseType" % x, type(x))
as_sparse = as_sparse_variable as_sparse = as_sparse_variable
def constant(x, name=None): def constant(x, name=None):
...@@ -147,13 +148,12 @@ class SparseType(gof.Type): ...@@ -147,13 +148,12 @@ class SparseType(gof.Type):
@param format: The sparse storage strategy. @param format: The sparse storage strategy.
@return An empty SparseVariable instance. @return An empty SparseVariable instance.
""" """
dtype = str(dtype) dtype = str(dtype)
if dtype in self.dtype_set: if dtype in self.dtype_set:
self.dtype = dtype self.dtype = dtype
else: else:
raise NotImplementedError('unsupported dtype "%s" not in list' % dtype, list(self.dtype_set)) raise NotImplementedError('unsupported dtype "%s" not in list' % dtype, list(self.dtype_set))
assert isinstance(format, str) assert isinstance(format, str)
if format in self.format_cls: if format in self.format_cls:
self.format = format self.format = format
...@@ -259,7 +259,7 @@ class SparseValue(gof.Value, _sparse_py_operators): ...@@ -259,7 +259,7 @@ class SparseValue(gof.Value, _sparse_py_operators):
dtype = property(lambda self: self.type.dtype) dtype = property(lambda self: self.type.dtype)
format = property(lambda self: self.type.format) format = property(lambda self: self.type.format)
# CONSTRUCTION # CONSTRUCTION
class CSMProperties(gof.Op): class CSMProperties(gof.Op):
"""Extract all of .data .indices and .indptr""" """Extract all of .data .indices and .indptr"""
...@@ -276,14 +276,14 @@ class CSMProperties(gof.Op): ...@@ -276,14 +276,14 @@ class CSMProperties(gof.Op):
return type(self) == type(other) and _kmap_eq(self.kmap, other.kmap) return type(self) == type(other) and _kmap_eq(self.kmap, other.kmap)
def __ne__(self, other): return not (self == other) def __ne__(self, other): return not (self == other)
def __hash__(self): def __hash__(self):
return 8234 ^ hash(type(self)) ^ _kmap_hash(self.kmap) return 8234 ^ hash(type(self)) ^ _kmap_hash(self.kmap)
def make_node(self, csm): def make_node(self, csm):
csm = as_sparse_variable(csm) csm = as_sparse_variable(csm)
data = tensor.TensorType(dtype=csm.type.dtype, broadcastable = (False,)).make_variable() data = tensor.TensorType(dtype=csm.type.dtype, broadcastable = (False,)).make_variable()
return gof.Apply(self, [csm], return gof.Apply(self, [csm],
[data, tensor.ivector(), tensor.ivector(), tensor.ivector()]) [data, tensor.ivector(), tensor.ivector(), tensor.ivector()])
def perform(self, node, (csm,), out): def perform(self, node, (csm,), out):
...@@ -347,10 +347,10 @@ class CSM(gof.Op): ...@@ -347,10 +347,10 @@ class CSM(gof.Op):
def __hash__(self): def __hash__(self):
return self._hashval return self._hashval
def make_node(self, data, indices, indptr, shape): def make_node(self, data, indices, indptr, shape):
"""Build a SparseVariable from the internal parametrization """Build a SparseVariable from the internal parametrization
:param data: :param data:
:param indices: :param indices:
:param indptr: :param indptr:
:type data: 1-d tensor :type data: 1-d tensor
...@@ -397,13 +397,13 @@ class CSM(gof.Op): ...@@ -397,13 +397,13 @@ class CSM(gof.Op):
'as indices (shape'+`indices.shape`+') or elements as kmap ('+`numpy.size(self.kmap)`+')' 'as indices (shape'+`indices.shape`+') or elements as kmap ('+`numpy.size(self.kmap)`+')'
raise ValueError(errmsg) raise ValueError(errmsg)
if self.format == 'csc': if self.format == 'csc':
out[0] = scipy.sparse.csc_matrix((data, indices.copy(), indptr.copy()), out[0] = scipy.sparse.csc_matrix((data, indices.copy(), indptr.copy()),
numpy.asarray(shape), numpy.asarray(shape),
copy = False #1000*len(data.flatten()) copy = False #1000*len(data.flatten())
) )
else: else:
assert self.format == 'csr' assert self.format == 'csr'
out[0] = scipy.sparse.csr_matrix((data, indices.copy(), indptr.copy()), out[0] = scipy.sparse.csr_matrix((data, indices.copy(), indptr.copy()),
shape.copy(), shape.copy(),
copy = False #1000*len(data.flatten()) copy = False #1000*len(data.flatten())
) )
...@@ -427,7 +427,7 @@ class CSMGrad(gof.op.Op): ...@@ -427,7 +427,7 @@ class CSMGrad(gof.op.Op):
return type(self) == type(other) and _kmap_eq(self.kmap, other.kmap) return type(self) == type(other) and _kmap_eq(self.kmap, other.kmap)
def __ne__(self, other): return not (self == other) def __ne__(self, other): return not (self == other)
def __hash__(self): def __hash__(self):
return 82345 ^ hash(type(self)) ^ _kmap_hash(self.kmap) return 82345 ^ hash(type(self)) ^ _kmap_hash(self.kmap)
...@@ -456,7 +456,7 @@ def skip_pack_csc01(node): ...@@ -456,7 +456,7 @@ def skip_pack_csc01(node):
register_specialize(skip_pack_csc01) register_specialize(skip_pack_csc01)
# #
# Conversion # Conversion
# #
...@@ -479,7 +479,7 @@ class DenseFromSparse(gof.op.Op): ...@@ -479,7 +479,7 @@ class DenseFromSparse(gof.op.Op):
broadcastable = (False, False)).make_variable()]) broadcastable = (False, False)).make_variable()])
def perform(self, node, (x, ), (out, )): def perform(self, node, (x, ), (out, )):
if _is_dense(x): if _is_dense(x):
print >> sys.stderr, "WARNING: You just called DenseFromSparse on a dense matrix." print >> sys.stderr, "WARNING: You just called DenseFromSparse on a dense matrix."
out[0] = x out[0] = x
else: else:
out[0] = x.toarray() out[0] = x.toarray()
...@@ -572,7 +572,7 @@ class AddSS(gof.op.Op): ...@@ -572,7 +572,7 @@ class AddSS(gof.op.Op):
[x, y], [x, y],
[SparseType(dtype = x.type.dtype, [SparseType(dtype = x.type.dtype,
format = x.type.format).make_variable()]) format = x.type.format).make_variable()])
def perform(self, node, (x, y), (out, )): def perform(self, node, (x, y), (out, )):
assert _is_sparse(x) and _is_sparse(y) assert _is_sparse(x) and _is_sparse(y)
assert x.shape == y.shape assert x.shape == y.shape
out[0] = x + y out[0] = x + y
...@@ -598,7 +598,7 @@ class AddSD(gof.op.Op): ...@@ -598,7 +598,7 @@ class AddSD(gof.op.Op):
[x, y], [x, y],
[tensor.TensorType(dtype = y.type.dtype, [tensor.TensorType(dtype = y.type.dtype,
broadcastable = y.type.broadcastable).make_variable()]) broadcastable = y.type.broadcastable).make_variable()])
def perform(self, node, (x, y), (out, )): def perform(self, node, (x, y), (out, )):
assert _is_sparse(x) and _is_dense(y) assert _is_sparse(x) and _is_dense(y)
out[0] = x + y out[0] = x + y
def grad(self, (x, y), (gz,)): def grad(self, (x, y), (gz,)):
...@@ -612,7 +612,7 @@ def add(x,y): ...@@ -612,7 +612,7 @@ def add(x,y):
""" """
if hasattr(x, 'getnnz'): x = as_sparse_variable(x) if hasattr(x, 'getnnz'): x = as_sparse_variable(x)
if hasattr(y, 'getnnz'): y = as_sparse_variable(y) if hasattr(y, 'getnnz'): y = as_sparse_variable(y)
x_is_sparse_variable = _is_sparse_variable(x) x_is_sparse_variable = _is_sparse_variable(x)
y_is_sparse_variable = _is_sparse_variable(y) y_is_sparse_variable = _is_sparse_variable(y)
...@@ -637,7 +637,7 @@ class MulSS(gof.op.Op): ...@@ -637,7 +637,7 @@ class MulSS(gof.op.Op):
if x.type != y.type: if x.type != y.type:
raise NotImplementedError() raise NotImplementedError()
return gof.Apply(self, [x, y], [x.type()]) return gof.Apply(self, [x, y], [x.type()])
def perform(self, node, (x, y), (out, )): def perform(self, node, (x, y), (out, )):
assert _is_sparse(x) and _is_sparse(y) assert _is_sparse(x) and _is_sparse(y)
assert len(x.shape) == 2 assert len(x.shape) == 2
assert y.shape == x.shape assert y.shape == x.shape
...@@ -664,7 +664,7 @@ class MulSD(gof.op.Op): ...@@ -664,7 +664,7 @@ class MulSD(gof.op.Op):
# Broadcasting of the sparse matrix is not supported. # Broadcasting of the sparse matrix is not supported.
assert y.type.ndim <= 2 assert y.type.ndim <= 2
return gof.Apply(self, [x, y], [x.type()]) return gof.Apply(self, [x, y], [x.type()])
def perform(self, node, (x, y), (out, )): def perform(self, node, (x, y), (out, )):
assert _is_sparse(x) and _is_dense(y) assert _is_sparse(x) and _is_dense(y)
if len(y.shape) == 0: if len(y.shape) == 0:
out[0] = x.copy() out[0] = x.copy()
...@@ -716,7 +716,7 @@ def mul(x,y): ...@@ -716,7 +716,7 @@ def mul(x,y):
""" """
if hasattr(x, 'getnnz'): x = as_sparse_variable(x) if hasattr(x, 'getnnz'): x = as_sparse_variable(x)
if hasattr(y, 'getnnz'): y = as_sparse_variable(y) if hasattr(y, 'getnnz'): y = as_sparse_variable(y)
x_is_sparse_variable = _is_sparse_variable(x) x_is_sparse_variable = _is_sparse_variable(x)
y_is_sparse_variable = _is_sparse_variable(y) y_is_sparse_variable = _is_sparse_variable(y)
...@@ -770,7 +770,7 @@ class StructuredDot(gof.Op): ...@@ -770,7 +770,7 @@ class StructuredDot(gof.Op):
else: else:
raise Exception("a.shape=%s, b.shape=%s, variable.shape=%s ??? I have no idea why") raise Exception("a.shape=%s, b.shape=%s, variable.shape=%s ??? I have no idea why")
#The cast is needed as otherwise we hit the bug mentioned into #The cast is needed as otherwise we hit the bug mentioned into
#theano._asarray function documentation. #theano._asarray function documentation.
out[0] = theano._asarray(variable, str(variable.dtype)) out[0] = theano._asarray(variable, str(variable.dtype))
...@@ -809,12 +809,12 @@ class StructuredDotCSC(gof.Op): ...@@ -809,12 +809,12 @@ class StructuredDotCSC(gof.Op):
return hash(type(self)) return hash(type(self))
def make_node(self, a_val, a_ind, a_ptr, a_nrows, b): def make_node(self, a_val, a_ind, a_ptr, a_nrows, b):
dtype_out = scalar.upcast(a_val.type.dtype, b.type.dtype) dtype_out = scalar.upcast(a_val.type.dtype, b.type.dtype)
r = gof.Apply(self, [a_val, a_ind, a_ptr, a_nrows, b], r = gof.Apply(self, [a_val, a_ind, a_ptr, a_nrows, b],
[tensor.tensor(dtype_out, (False, b.type.broadcastable[1]))]) [tensor.tensor(dtype_out, (False, b.type.broadcastable[1]))])
return r return r
def perform(self, node, (a_val, a_ind, a_ptr, a_nrows, b), (out,)): def perform(self, node, (a_val, a_ind, a_ptr, a_nrows, b), (out,)):
a = scipy.sparse.csc_matrix((a_val, a_ind, a_ptr), a = scipy.sparse.csc_matrix((a_val, a_ind, a_ptr),
(a_nrows, b.shape[0]), (a_nrows, b.shape[0]),
copy = False) copy = False)
#out[0] = a.dot(b) #out[0] = a.dot(b)
...@@ -852,7 +852,7 @@ class StructuredDotCSC(gof.Op): ...@@ -852,7 +852,7 @@ class StructuredDotCSC(gof.Op):
if (%(a_val)s->descr->type_num != %(typenum_a_val)s) { if (%(a_val)s->descr->type_num != %(typenum_a_val)s) {
PyErr_SetString(PyExc_NotImplementedError, "Invalid type for a_val"); %(fail)s;} PyErr_SetString(PyExc_NotImplementedError, "Invalid type for a_val"); %(fail)s;}
if (%(b)s->descr->type_num != %(typenum_b)s) { if (%(b)s->descr->type_num != %(typenum_b)s) {
PyErr_SetString(PyExc_NotImplementedError, "Invalid type for b"); %(fail)s;} PyErr_SetString(PyExc_NotImplementedError, "Invalid type for b"); %(fail)s;}
...@@ -908,7 +908,7 @@ class StructuredDotCSC(gof.Op): ...@@ -908,7 +908,7 @@ class StructuredDotCSC(gof.Op):
//clear the output array //clear the output array
memset(Dz, 0, M*N*sizeof(dtype_%(z)s)); memset(Dz, 0, M*N*sizeof(dtype_%(z)s));
//iterate over the sparse array, making the most of an entry wherever we find it. //iterate over the sparse array, making the most of an entry wherever we find it.
// //
// Normal matrix matrix multiply: A MxK, B KxN => Z = AB // Normal matrix matrix multiply: A MxK, B KxN => Z = AB
...@@ -916,7 +916,7 @@ class StructuredDotCSC(gof.Op): ...@@ -916,7 +916,7 @@ class StructuredDotCSC(gof.Op):
// for n // for n
// for k // for k
// z[m,n] += a[m,k] * b[k,n] // z[m,n] += a[m,k] * b[k,n]
// Here instead: Z = // Here instead: Z =
// for k // for k
// for m (sparse) // for m (sparse)
// for n // for n
...@@ -927,20 +927,20 @@ class StructuredDotCSC(gof.Op): ...@@ -927,20 +927,20 @@ class StructuredDotCSC(gof.Op):
{ {
// get pointer to k-th row of dense matrix // get pointer to k-th row of dense matrix
const dtype_%(b)s* __restrict__ bk = (dtype_%(b)s*)(%(b)s->data + %(b)s->strides[0] * k); const dtype_%(b)s* __restrict__ bk = (dtype_%(b)s*)(%(b)s->data + %(b)s->strides[0] * k);
// loop over sparse column indices through index pointer array // loop over sparse column indices through index pointer array
// (amounts to looping over rows M of sparse matrix) // (amounts to looping over rows M of sparse matrix)
for (npy_int32 m_idx = Dptr[k * Sptr]; m_idx < Dptr[(k+1) * Sptr]; ++m_idx) for (npy_int32 m_idx = Dptr[k * Sptr]; m_idx < Dptr[(k+1) * Sptr]; ++m_idx)
{ {
npy_int32 m = Dind[m_idx * Sind]; // row index of non-null value for column K npy_int32 m = Dind[m_idx * Sind]; // row index of non-null value for column K
const dtype_%(a_val)s Amk = Dval[m_idx * Sval]; // actual value at that location const dtype_%(a_val)s Amk = Dval[m_idx * Sval]; // actual value at that location
// pointer to m-th row of the output matrix Z // pointer to m-th row of the output matrix Z
dtype_%(z)s* __restrict__ zm = (dtype_%(z)s*)(%(z)s->data + %(z)s->strides[0] * m); dtype_%(z)s* __restrict__ zm = (dtype_%(z)s*)(%(z)s->data + %(z)s->strides[0] * m);
//RESOLVE: a.shape[0] equals z.shape[0], why is this not an equality constraint? //RESOLVE: a.shape[0] equals z.shape[0], why is this not an equality constraint?
if (m >= %(z)s->dimensions[0]) if (m >= %(z)s->dimensions[0])
{PyErr_SetString(PyExc_NotImplementedError, "illegal row index in a"); %(fail)s;} {PyErr_SetString(PyExc_NotImplementedError, "illegal row index in a"); %(fail)s;}
// loop over final dimension (cols of dense matrix) and perform dot product // loop over final dimension (cols of dense matrix) and perform dot product
...@@ -975,12 +975,12 @@ class StructuredDotCSR(gof.Op): ...@@ -975,12 +975,12 @@ class StructuredDotCSR(gof.Op):
return hash(type(self)) return hash(type(self))
def make_node(self, a_val, a_ind, a_ptr, b): def make_node(self, a_val, a_ind, a_ptr, b):
self.dtype_out = scalar.upcast(a_val.type.dtype, b.type.dtype) self.dtype_out = scalar.upcast(a_val.type.dtype, b.type.dtype)
r = gof.Apply(self, [a_val, a_ind, a_ptr, b], r = gof.Apply(self, [a_val, a_ind, a_ptr, b],
[tensor.tensor(self.dtype_out, (False, b.type.broadcastable[1]))]) [tensor.tensor(self.dtype_out, (False, b.type.broadcastable[1]))])
return r return r
def perform(self, node, (a_val, a_ind, a_ptr, b), (out,)): def perform(self, node, (a_val, a_ind, a_ptr, b), (out,)):
a = scipy.sparse.csr_matrix((a_val, a_ind, a_ptr), a = scipy.sparse.csr_matrix((a_val, a_ind, a_ptr),
(len(a_ptr)-1, b.shape[0]), (len(a_ptr)-1, b.shape[0]),
copy = True) #use view_map before setting this to False copy = True) #use view_map before setting this to False
#out[0] = a.dot(b) #out[0] = a.dot(b)
...@@ -1056,7 +1056,7 @@ class StructuredDotCSR(gof.Op): ...@@ -1056,7 +1056,7 @@ class StructuredDotCSR(gof.Op):
//clear the output array //clear the output array
memset(Dz, 0, M*N*sizeof(dtype_%(z)s)); memset(Dz, 0, M*N*sizeof(dtype_%(z)s));
//iterate over the sparse array, making the most of an entry wherever we find it. //iterate over the sparse array, making the most of an entry wherever we find it.
// Normal matrix matrix multiply: // Normal matrix matrix multiply:
// for m // for m
...@@ -1075,16 +1075,16 @@ class StructuredDotCSR(gof.Op): ...@@ -1075,16 +1075,16 @@ class StructuredDotCSR(gof.Op):
// pointer to m-th row of the output matrix Z // pointer to m-th row of the output matrix Z
dtype_%(z)s* __restrict__ zm = (dtype_%(z)s*)(%(z)s->data + %(z)s->strides[0] * m); dtype_%(z)s* __restrict__ zm = (dtype_%(z)s*)(%(z)s->data + %(z)s->strides[0] * m);
// loop over sparse rows indices through index pointer array // loop over sparse rows indices through index pointer array
// (amounts to looping over cols k of sparse matrix) // (amounts to looping over cols k of sparse matrix)
for (npy_int32 k_idx = Dptr[m * Sptr]; k_idx < Dptr[(m+1) * Sptr]; ++k_idx) for (npy_int32 k_idx = Dptr[m * Sptr]; k_idx < Dptr[(m+1) * Sptr]; ++k_idx)
{ {
npy_int32 k = Dind[k_idx * Sind]; // col index of non-null value for row m npy_int32 k = Dind[k_idx * Sind]; // col index of non-null value for row m
const dtype_%(a_val)s Amk = Dval[k_idx * Sval]; // actual value at that location const dtype_%(a_val)s Amk = Dval[k_idx * Sval]; // actual value at that location
// get pointer to k-th row of dense matrix // get pointer to k-th row of dense matrix
const dtype_%(b)s* __restrict__ bk = (dtype_%(b)s*)(%(b)s->data + %(b)s->strides[0] * k); const dtype_%(b)s* __restrict__ bk = (dtype_%(b)s*)(%(b)s->data + %(b)s->strides[0] * k);
// loop over final dimension (cols of dense matrix) and perform dot product // loop over final dimension (cols of dense matrix) and perform dot product
for(npy_int32 n = 0; n < N; ++n) for(npy_int32 n = 0; n < N; ++n)
{ {
...@@ -1093,7 +1093,7 @@ class StructuredDotCSR(gof.Op): ...@@ -1093,7 +1093,7 @@ class StructuredDotCSR(gof.Op):
} }
} }
} }
"""% dict(locals(), **sub) """% dict(locals(), **sub)
def c_code_cache_version(self): def c_code_cache_version(self):
...@@ -1114,7 +1114,7 @@ def local_structured_dot(node): ...@@ -1114,7 +1114,7 @@ def local_structured_dot(node):
return [sd_csr(a_val, a_ind, a_ptr, b)] return [sd_csr(a_val, a_ind, a_ptr, b)]
return False return False
# Commented out because # Commented out because
# a) it is only slightly faster than scipy these days, and sometimes a little slower, and # a) it is only slightly faster than scipy these days, and sometimes a little slower, and
# b) the resulting graphs make it very difficult for an op to do size checking on the matrices # b) the resulting graphs make it very difficult for an op to do size checking on the matrices
# involved. dimension mismatches are hard to detect sensibly. # involved. dimension mismatches are hard to detect sensibly.
...@@ -1152,7 +1152,7 @@ class StructuredDotGradCSC(gof.Op): ...@@ -1152,7 +1152,7 @@ class StructuredDotGradCSC(gof.Op):
def __hash__(self): def __hash__(self):
return hash(type(self)) return hash(type(self))
def make_node(self, a_indices, a_indptr, b, g_ab): def make_node(self, a_indices, a_indptr, b, g_ab):
return gof.Apply(self, [a_indices, a_indptr, b, g_ab], return gof.Apply(self, [a_indices, a_indptr, b, g_ab],
[tensor.tensor(g_ab.dtype, (False,))]) [tensor.tensor(g_ab.dtype, (False,))])
def perform(self, node, (a_indices, a_indptr, b, g_ab), (out,)): def perform(self, node, (a_indices, a_indptr, b, g_ab), (out,)):
g_a_data = numpy.zeros(a_indices.shape, dtype=g_ab.dtype) g_a_data = numpy.zeros(a_indices.shape, dtype=g_ab.dtype)
...@@ -1195,7 +1195,7 @@ class StructuredDotGradCSC(gof.Op): ...@@ -1195,7 +1195,7 @@ class StructuredDotGradCSC(gof.Op):
PyErr_SetString(PyExc_NotImplementedError, "somehow _zout got the wrong size.. and I don't know how to resize it."); PyErr_SetString(PyExc_NotImplementedError, "somehow _zout got the wrong size.. and I don't know how to resize it.");
%(fail)s; %(fail)s;
} }
{ //makes it compile even though labels jump over variable definitions. { //makes it compile even though labels jump over variable definitions.
npy_intp nnz = %(_indices)s->dimensions[0]; npy_intp nnz = %(_indices)s->dimensions[0];
npy_intp N = %(_indptr)s->dimensions[0]-1; //TODO: error checking with this npy_intp N = %(_indptr)s->dimensions[0]-1; //TODO: error checking with this
...@@ -1223,15 +1223,15 @@ class StructuredDotGradCSC(gof.Op): ...@@ -1223,15 +1223,15 @@ class StructuredDotGradCSC(gof.Op):
{ {
// extract row index of non-null value // extract row index of non-null value
npy_int32 i = indices[i_idx * Sindices]; npy_int32 i = indices[i_idx * Sindices];
// extract corresponding row in gradient // extract corresponding row in gradient
const dtype_%(_g)s* __restrict__ g_row = (dtype_%(_g)s*)(%(_g)s->data + %(_g)s->strides[0] * i); const dtype_%(_g)s* __restrict__ g_row = (dtype_%(_g)s*)(%(_g)s->data + %(_g)s->strides[0] * i);
double ip = 0.0; double ip = 0.0;
// make sure that row index is not bigger than actual number of rows // make sure that row index is not bigger than actual number of rows
// Note: wouldn't the above operation fail if that were the case ? // Note: wouldn't the above operation fail if that were the case ?
// when would this ever be true anyway ? // when would this ever be true anyway ?
if (i >= %(_g)s->dimensions[0]) if (i >= %(_g)s->dimensions[0])
{PyErr_SetString(PyExc_NotImplementedError, "H"); %(fail)s;} {PyErr_SetString(PyExc_NotImplementedError, "H"); %(fail)s;}
// perform dot product of dense and sparse rows // perform dot product of dense and sparse rows
...@@ -1266,7 +1266,7 @@ class StructuredDotGradCSR(gof.Op): ...@@ -1266,7 +1266,7 @@ class StructuredDotGradCSR(gof.Op):
ind1 = a_indptr[i+1] ind1 = a_indptr[i+1]
for j_idx in xrange(ind0, ind1): # loop over values in that row (columns) for j_idx in xrange(ind0, ind1): # loop over values in that row (columns)
j = a_indices[j_idx] j = a_indices[j_idx]
# grad is dot product of i-th row of gradient with j-th row of b # grad is dot product of i-th row of gradient with j-th row of b
g_a_data[j_idx] = numpy.dot(g_ab[i], b[j]) g_a_data[j_idx] = numpy.dot(g_ab[i], b[j])
out[0] = g_a_data out[0] = g_a_data
...@@ -1302,7 +1302,7 @@ class StructuredDotGradCSR(gof.Op): ...@@ -1302,7 +1302,7 @@ class StructuredDotGradCSR(gof.Op):
PyErr_SetString(PyExc_NotImplementedError, "somehow _zout got the wrong size.. and I don't know how to resize it."); PyErr_SetString(PyExc_NotImplementedError, "somehow _zout got the wrong size.. and I don't know how to resize it.");
%(fail)s; %(fail)s;
} }
{ //makes it compile even though labels jump over variable definitions. { //makes it compile even though labels jump over variable definitions.
npy_intp nnz = %(_indices)s->dimensions[0]; npy_intp nnz = %(_indices)s->dimensions[0];
// extract number of rows // extract number of rows
...@@ -1327,7 +1327,7 @@ class StructuredDotGradCSR(gof.Op): ...@@ -1327,7 +1327,7 @@ class StructuredDotGradCSR(gof.Op):
{ {
// extract column index of non-null value // extract column index of non-null value
npy_int32 j = indices[j_idx * Sindices]; npy_int32 j = indices[j_idx * Sindices];
// extract j-th row of dense matrix // extract j-th row of dense matrix
const dtype_%(_d)s* __restrict__ d_row = (dtype_%(_d)s*)(%(_d)s->data + %(_d)s->strides[0] * j); const dtype_%(_d)s* __restrict__ d_row = (dtype_%(_d)s*)(%(_d)s->data + %(_d)s->strides[0] * j);
if(j >= %(_d)s->dimensions[0]) {PyErr_SetString(PyExc_NotImplementedError, "G"); %(fail)s;} if(j >= %(_d)s->dimensions[0]) {PyErr_SetString(PyExc_NotImplementedError, "G"); %(fail)s;}
...@@ -1337,9 +1337,9 @@ class StructuredDotGradCSR(gof.Op): ...@@ -1337,9 +1337,9 @@ class StructuredDotGradCSR(gof.Op):
double ip = 0.0; double ip = 0.0;
// make sure that row index is not bigger than actual number of rows // make sure that row index is not bigger than actual number of rows
// Note: wouldn't the above operation fail if that were the case ? // Note: wouldn't the above operation fail if that were the case ?
// when would this ever be true anyway ? // when would this ever be true anyway ?
if (i >= %(_g)s->dimensions[0]) if (i >= %(_g)s->dimensions[0])
{PyErr_SetString(PyExc_NotImplementedError, "H"); %(fail)s;} {PyErr_SetString(PyExc_NotImplementedError, "H"); %(fail)s;}
// perform dot product of dense and sparse rows // perform dot product of dense and sparse rows
...@@ -1353,7 +1353,7 @@ class StructuredDotGradCSR(gof.Op): ...@@ -1353,7 +1353,7 @@ class StructuredDotGradCSR(gof.Op):
} }
} }
} }
"""% dict(locals(), **sub) """% dict(locals(), **sub)
sdg_csr = StructuredDotGradCSR() sdg_csr = StructuredDotGradCSR()
...@@ -898,24 +898,24 @@ class _tensor_py_operators: ...@@ -898,24 +898,24 @@ class _tensor_py_operators:
#COMPARISONS #COMPARISONS
_is_nonzero = True _is_nonzero = True
def __lt__(self,other): def __lt__(self,other):
rval = lt(self, other) rval = lt(self, other)
rval._is_nonzero=False rval._is_nonzero=False
return rval return rval
def __le__(self,other): def __le__(self,other):
rval = le(self, other) rval = le(self, other)
rval._is_nonzero=False rval._is_nonzero=False
return rval return rval
def __gt__(self,other): def __gt__(self,other):
rval = gt(self, other) rval = gt(self, other)
rval._is_nonzero=False rval._is_nonzero=False
return rval return rval
def __ge__(self,other): def __ge__(self,other):
rval = ge(self, other) rval = ge(self, other)
rval._is_nonzero=False rval._is_nonzero=False
return rval return rval
def __nonzero__(self): def __nonzero__(self):
# This is meant to prohibit stuff like a < b < c, which is internally implemented as # This is meant to prohibit stuff like a < b < c, which is internally implemented as
# (a < b) and (b < c). The trouble with this is the side-effect that checking for a # (a < b) and (b < c). The trouble with this is the side-effect that checking for a
# non-NULL a by typing "if a: ..." uses the same __nonzero__ method. We want these # non-NULL a by typing "if a: ..." uses the same __nonzero__ method. We want these
# both to work, but it seems impossible. Currently, all vars evaluate to nonzero # both to work, but it seems impossible. Currently, all vars evaluate to nonzero
...@@ -3962,7 +3962,7 @@ def tensordot(x, y, axes=2): ...@@ -3962,7 +3962,7 @@ def tensordot(x, y, axes=2):
raise ValueError('Cannot perform tensordot of 0-d inputs.') raise ValueError('Cannot perform tensordot of 0-d inputs.')
axes = TensorDot.parse_axes(axes) axes = TensorDot.parse_axes(axes)
# check whether axes is valid given the dimensions of x and y # check whether axes is valid given the dimensions of x and y
if numpy.isscalar(axes): if numpy.isscalar(axes):
if axes >= x.ndim or axes >= y.ndim: if axes >= x.ndim or axes >= y.ndim:
...@@ -3979,12 +3979,12 @@ def tensordot(x, y, axes=2): ...@@ -3979,12 +3979,12 @@ def tensordot(x, y, axes=2):
if isinstance(axes[1],(list,tuple)) and \ if isinstance(axes[1],(list,tuple)) and \
(len(axes[1]) > y.ndim or (numpy.array(axes[1]) >= y.ndim).any()): (len(axes[1]) > y.ndim or (numpy.array(axes[1]) >= y.ndim).any()):
raise ValueError('axes[1] should be array_like, of length smaller'\ raise ValueError('axes[1] should be array_like, of length smaller'\
'than the dimension of y (y.ndim=%i, len(axes[1])=%i).' % 'than the dimension of y (y.ndim=%i, len(axes[1])=%i).' %
(y.ndim, len(axes[1]))) (y.ndim, len(axes[1])))
if not hasattr(tensordot, 'op'): if not hasattr(tensordot, 'op'):
tensordot.op = {} tensordot.op = {}
if axes not in tensordot.op: if axes not in tensordot.op:
tensordot.op[axes] = TensorDot(axes) tensordot.op[axes] = TensorDot(axes)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论