提交 a1e290b7 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #3128 from harlouci/flake8_sparse

Flake8 sparse
......@@ -16,7 +16,7 @@ from six.moves import xrange
import scipy.sparse
import theano
from theano import gof, tensor, compile, scalar, config
from theano import gof, tensor, scalar, config
from theano.gradient import DisconnectedType
from theano.sparse.utils import hash_from_sparse
import theano.tests.unittest_tools as utt
......@@ -28,10 +28,10 @@ sparse_formats = ['csc', 'csr']
""" Types of sparse matrices to use for testing """
_mtypes = [scipy.sparse.csc_matrix, scipy.sparse.csr_matrix]
#_mtypes = [sparse.csc_matrix, sparse.csr_matrix, sparse.dok_matrix,
# _mtypes = [sparse.csc_matrix, sparse.csr_matrix, sparse.dok_matrix,
# sparse.lil_matrix, sparse.coo_matrix]
#* new class ``dia_matrix`` : the sparse DIAgonal format
#* new class ``bsr_matrix`` : the Block CSR format
# * new class ``dia_matrix`` : the sparse DIAgonal format
# * new class ``bsr_matrix`` : the Block CSR format
_mtype_to_str = {scipy.sparse.csc_matrix: "csc",
scipy.sparse.csr_matrix: "csr"}
......@@ -153,7 +153,8 @@ def verify_grad_sparse(op, pt, structured=False, *args, **kwargs):
:return: None
"""
conv_none = lambda x: x
def conv_none(x):
return x
def conv_csr(ind, indptr, shp):
def f(spdata):
......@@ -369,11 +370,11 @@ class SparseVariable(_sparse_py_operators, gof.Variable):
class SparseConstantSignature(tuple):
def __eq__(self, other):
(a, b), (x, y) = self, other
return a == x \
and (b.dtype == y.dtype)\
and (type(b) == type(y))\
and (b.shape == y.shape)\
and (abs(b - y).sum() < 1e-6 * b.nnz)
return (a == x and
(b.dtype == y.dtype) and
(type(b) == type(y)) and
(b.shape == y.shape) and
(abs(b - y).sum() < 1e-6 * b.nnz))
def __hash__(self):
(a, b) = self
......@@ -488,9 +489,10 @@ class CSMProperties(gof.Op):
csm = as_sparse_variable(csm)
assert csm.format in ["csr", "csc"]
data = tensor.TensorType(dtype=csm.type.dtype,
broadcastable=(False,)).make_variable()
broadcastable=(False,))()
return gof.Apply(self, [csm],
[data, tensor.ivector(), tensor.ivector(), tensor.ivector()])
[data, tensor.ivector(),
tensor.ivector(), tensor.ivector()])
def perform(self, node, inputs, out):
(csm,) = inputs
......@@ -646,7 +648,7 @@ class CSM(gof.Op):
return gof.Apply(self,
[data, indices, indptr, shape],
[SparseType(dtype=data.type.dtype,
format=self.format).make_variable()])
format=self.format)()])
def perform(self, node, inputs, outputs):
# for efficiency, if remap does nothing, then do not apply it
......@@ -834,7 +836,7 @@ class Cast(gof.op.Op):
assert x.format in ["csr", "csc"]
return gof.Apply(
self, [x],
[SparseType(dtype=self.out_type, format=x.format).make_variable()])
[SparseType(dtype=self.out_type, format=x.format)()])
def perform(self, node, inputs, outputs):
(x,) = inputs
......@@ -902,8 +904,8 @@ class DenseFromSparse(gof.op.Op):
self.sparse_grad = structured
def __eq__(self, other):
return (type(self) == type(other)) and \
(self.sparse_grad == other.sparse_grad)
return ((type(self) == type(other)) and
(self.sparse_grad == other.sparse_grad))
def __hash__(self):
return hash(type(self)) ^ hash(self.sparse_grad)
......@@ -918,8 +920,7 @@ class DenseFromSparse(gof.op.Op):
return gof.Apply(self,
[x],
[tensor.TensorType(dtype=x.type.dtype,
broadcastable=(False, False)
).make_variable()])
broadcastable=(False, False))()])
def perform(self, node, inputs, outputs):
(x,) = inputs
......@@ -1002,8 +1003,7 @@ class SparseFromDense(gof.op.Op):
return gof.Apply(self,
[x],
[SparseType(dtype=x.type.dtype,
format=self.format
).make_variable()])
format=self.format)()])
def perform(self, node, inputs, outputs):
(x,) = inputs
......@@ -1243,7 +1243,7 @@ class GetItem2d(gof.op.Op):
# def infer_shape(self, node, i0_shapes):
# return i0_shapes
def make_node(self, x, index):
scipy_ver = [ int(n) for n in scipy.__version__.split('.')[:2]]
scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]]
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
assert len(index) in [1, 2]
......@@ -1260,7 +1260,7 @@ class GetItem2d(gof.op.Op):
# If start or stop or step are None, make them a Generic
# constant. Else, they should be converted to Tensor Variables
# of dimension 1 and int/uint dtype.
if scipy_ver < [0, 14] and ind.step != None:
if scipy_ver < [0, 14] and ind.step is not None:
raise ValueError(
'Slice with step is not support with current'
' version of Scipy.')
......@@ -1301,8 +1301,8 @@ class GetItem2d(gof.op.Op):
stop.ndim, stop.dtype)
elif ((isinstance(ind, gof.Variable) and
getattr(ind, 'ndim', -1) == 0)
or numpy.isscalar(ind)):
getattr(ind, 'ndim', -1) == 0) or
numpy.isscalar(ind)):
raise NotImplementedError(
'Theano has no sparse vector' +
'Use X[a:b, c:d], X[a:b, c:c+1] or X[a:b] instead.')
......@@ -1438,8 +1438,7 @@ class Transpose(gof.op.Op):
return gof.Apply(self,
[x],
[SparseType(dtype=x.type.dtype,
format=self.format_map[x.type.format]
).make_variable()])
format=self.format_map[x.type.format])()])
def perform(self, node, inputs, outputs):
(x,) = inputs
......@@ -1836,7 +1835,7 @@ class SquareDiagonal(gof.op.Op):
def perform(self, node, inputs, outputs):
(z,) = outputs
diag, o_shape = inputs[0], inputs[0].shape * 2
diag = inputs[0]
N = len(diag)
data = diag[:N]
......@@ -1959,8 +1958,7 @@ class AddSS(gof.op.Op):
return gof.Apply(self,
[x, y],
[SparseType(dtype=out_dtype,
format=x.type.format
).make_variable()])
format=x.type.format)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
......@@ -2001,7 +1999,7 @@ class AddSSData(gof.op.Op):
return gof.Apply(self,
[x, y],
[SparseType(dtype=x.type.dtype,
format=x.type.format).make_variable()])
format=x.type.format)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
......@@ -2067,7 +2065,7 @@ class AddSD(gof.op.Op):
[x, y],
[tensor.TensorType(dtype=out_dtype,
broadcastable=y.type.broadcastable
).make_variable()])
)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
......@@ -2110,7 +2108,7 @@ class StructuredAddSV(gof.op.Op):
return gof.Apply(self,
[x, y],
[SparseType(dtype=x.type.dtype,
format=x.type.format).make_variable()])
format=x.type.format)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
......@@ -2222,10 +2220,10 @@ class MulSS(gof.op.Op):
assert x.format in ["csr", "csc"]
assert y.format in ["csr", "csc"]
out_dtype = scalar.upcast(x.type.dtype, y.type.dtype)
return gof.Apply(self, [x, y],
return gof.Apply(self,
[x, y],
[SparseType(dtype=out_dtype,
format=x.type.format
)()])
format=x.type.format)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
......@@ -2300,7 +2298,6 @@ class MulSD(gof.op.Op):
out_dtype = node.outputs[0].dtype
if x.format == 'csc':
x_data = x.data
indices = x.indices
indptr = x.indptr
if x.dtype == out_dtype:
......@@ -2315,7 +2312,6 @@ class MulSD(gof.op.Op):
z_data[i_idx] *= y[i, j]
out[0] = z
elif x.format == 'csr':
x_data = x.data
indices = x.indices
indptr = x.indptr
if x.dtype == out_dtype:
......@@ -2368,7 +2364,7 @@ class MulSV(gof.op.Op):
return gof.Apply(self,
[x, y],
[SparseType(dtype=x.type.dtype,
format=x.type.format).make_variable()])
format=x.type.format)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
......@@ -2483,7 +2479,7 @@ class __ComparisonOpSS(gof.op.Op):
return gof.Apply(self,
[x, y],
[SparseType(dtype='uint8',
format=x.type.format).make_variable()])
format=x.type.format)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
......@@ -2527,7 +2523,7 @@ class __ComparisonOpSD(gof.op.Op):
return gof.Apply(self,
[x, y],
[SparseType(dtype='uint8',
format=x.type.format).make_variable()])
format=x.type.format)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
......@@ -2765,9 +2761,10 @@ class HStack(gof.op.Op):
for x in var:
assert x.format in ["csr", "csc"]
return gof.Apply(
self, var,
[SparseType(dtype=self.dtype, format=self.format).make_variable()])
return gof.Apply(self,
var,
[SparseType(dtype=self.dtype,
format=self.format)()])
def perform(self, node, block, outputs):
(out,) = outputs
......@@ -3213,8 +3210,7 @@ class TrueDot(gof.op.Op):
raise NotImplementedError()
inputs = [x, y] # Need to convert? e.g. assparse
outputs = [SparseType(dtype=x.type.dtype,
format=myformat).make_variable()]
outputs = [SparseType(dtype=x.type.dtype, format=myformat)()]
return gof.Apply(self, inputs, outputs)
def perform(self, node, inp, out_):
......@@ -3729,11 +3725,10 @@ def structured_dot_grad(sparse_A, dense_B, ga):
sdgcsx = sdg_csr
CSx = CSR
g_A_data = sdgcsx(csm_indices(sparse_A), \
g_A_data = sdgcsx(csm_indices(sparse_A),
csm_indptr(sparse_A), dense_B, ga)
return CSx(g_A_data, csm_indices(sparse_A), \
csm_indptr(sparse_A), \
csm_shape(sparse_A))
return CSx(g_A_data, csm_indices(sparse_A),
csm_indptr(sparse_A), csm_shape(sparse_A))
else:
raise NotImplementedError()
......@@ -3756,7 +3751,7 @@ class SamplingDot(gof.op.Op):
raise TypeError(p)
# TODO: use it.
dtype_out = scalar.upcast(x.type.dtype, y.type.dtype, p.type.dtype)
dtype_out = scalar.upcast(x.type.dtype, y.type.dtype, p.type.dtype) # noqa
return gof.Apply(self, [x, y, p], [p.type()])
......@@ -3857,7 +3852,8 @@ class Dot(gof.op.Op):
y_is_sparse_var = _is_sparse_variable(y)
if not x_is_sparse_var and not y_is_sparse_var:
raise TypeError("Sparse dot product should have at least one "
raise TypeError(
"Sparse dot product should have at least one "
"sparse variable as inputs, but the inputs are "
"%s (%s) and %s (%s)." % (x, x.type, y, y.type))
......
......@@ -12,6 +12,7 @@ from theano.sparse import (CSC, CSR, csm_properties,
from theano.sparse import basic as sparse
_is_sparse_variable = sparse._is_sparse_variable
_is_dense = sparse._is_dense
# This is tested in tests/test_opt.py:test_local_csm_properties_csm
......@@ -47,7 +48,8 @@ def local_inplace_remove0(node):
return [new_node]
return False
theano.compile.optdb.register('local_inplace_remove0',
theano.compile.optdb.register(
'local_inplace_remove0',
gof.TopoOptimizer(local_inplace_remove0,
failure_callback=gof.TopoOptimizer.warn_inplace),
60, 'fast_run', 'inplace')
......@@ -174,7 +176,8 @@ def local_inplace_addsd_ccode(node):
inplace=True)(*node.inputs)
return [new_node]
return False
theano.compile.optdb.register('local_inplace_addsd_ccode',
theano.compile.optdb.register(
'local_inplace_addsd_ccode',
gof.TopoOptimizer(local_inplace_addsd_ccode,
failure_callback=gof.TopoOptimizer.warn_inplace),
60, 'fast_run', 'inplace')
......@@ -234,7 +237,8 @@ class StructuredDotCSC(gof.Op):
def make_node(self, a_val, a_ind, a_ptr, a_nrows, b):
dtype_out = scalar.upcast(a_val.type.dtype, b.type.dtype)
r = gof.Apply(self, [a_val, a_ind, a_ptr, a_nrows, b],
[tensor.tensor(dtype_out, (False, b.type.broadcastable[1]))])
[tensor.tensor(dtype_out,
(False, b.type.broadcastable[1]))])
return r
def perform(self, node, inputs, outputs):
......@@ -243,7 +247,7 @@ class StructuredDotCSC(gof.Op):
a = scipy.sparse.csc_matrix((a_val, a_ind, a_ptr),
(a_nrows, b.shape[0]),
copy=False)
#out[0] = a.dot(b)
# out[0] = a.dot(b)
out[0] = theano._asarray(a * b, dtype=node.outputs[0].type.dtype)
assert _is_dense(out[0]) # scipy 0.7 automatically converts to dense
......@@ -427,17 +431,18 @@ class StructuredDotCSR(gof.Op):
def make_node(self, a_val, a_ind, a_ptr, b):
self.dtype_out = scalar.upcast(a_val.type.dtype, b.type.dtype)
r = gof.Apply(self, [a_val, a_ind, a_ptr, b],
[tensor.tensor(self.dtype_out, (False,
b.type.broadcastable[1]))])
[tensor.tensor(self.dtype_out,
(False, b.type.broadcastable[1]))])
return r
def perform(self, node, inputs, outputs):
(a_val, a_ind, a_ptr, b) = inputs
(out,) = outputs
a = scipy.sparse.csr_matrix((a_val, a_ind, a_ptr),
a = scipy.sparse.csr_matrix(
(a_val, a_ind, a_ptr),
(len(a_ptr) - 1, b.shape[0]),
copy=True) # use view_map before setting this to False
#out[0] = a.dot(b)
# out[0] = a.dot(b)
out[0] = a * b
# scipy 0.7 automatically converts to dense, but not .6 sometimes
assert _is_dense(out[0])
......@@ -653,7 +658,8 @@ class UsmmCscDense(gof.Op):
if dtype_out != z.type.dtype:
z = tensor.cast(z, dtype_out)
r = gof.Apply(self, [alpha, x_val, x_ind, x_ptr, x_nrows, y, z],
r = gof.Apply(
self, [alpha, x_val, x_ind, x_ptr, x_nrows, y, z],
[tensor.tensor(dtype_out, (False, y.type.broadcastable[1]))])
return r
......@@ -1019,7 +1025,7 @@ def local_csm_grad_c(node):
return [csm_grad_c(*node.inputs)]
return False
# DISABLED AS IT IS BROKEN FOR UNSORTED INDICES!
#register_specialize(local_csm_grad_c, 'cxx_only')
# register_specialize(local_csm_grad_c, 'cxx_only')
class MulSDCSC(gof.Op):
......@@ -1572,7 +1578,7 @@ def local_structured_add_s_v(node):
x, y = node.inputs
x_is_sparse_variable = _is_sparse_variable(x)
#y_is_sparse_variable = _is_sparse_variable(y)
# y_is_sparse_variable = _is_sparse_variable(y)
if x_is_sparse_variable:
svar = x
......
......@@ -228,10 +228,7 @@ whitelist_flake8 = [
"misc/tests/test_pycuda_example.py",
"misc/hooks/reindent.py",
"misc/hooks/check_whitespace.py",
"sparse/type.py",
"sparse/__init__.py",
"sparse/opt.py",
"sparse/basic.py",
"sparse/tests/test_utils.py",
"sparse/tests/test_opt.py",
"sparse/tests/test_basic.py",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论