提交 19092641 authored 作者: Frederic's avatar Frederic

pep8 fix.

上级 fce0a1e8
from theano.sparse.basic import * # To facilitate later merge into sparse module
from theano.sparse.basic import _is_sparse, _is_sparse_variable, \
_is_dense_variable, _is_sparse, _is_dense, _kmap_eq, _kmap_hash
from theano.sparse.basic import (
_is_sparse, _is_sparse_variable, _is_dense_variable,
_is_sparse, _is_dense, _kmap_eq, _kmap_hash)
class Cast(gof.op.Op):
def __init__(self, out_type):
self.out_type = out_type
def __eq__(self, other):
return (type(self) == type(other)) and self.out_type == other.out_type
def __hash__(self):
return hash(type(self)) ^ hash(self.out_type)
def make_node(self, x):
x = as_sparse_variable(x)
return gof.Apply(self, [x],
[SparseType(dtype=self.out_type, format=x.format).make_variable()])
def perform(self, node, (x, ), (out, )):
assert _is_sparse(x)
out[0] = x
......@@ -20,31 +26,40 @@ class Cast(gof.op.Op):
fcast = Cast('float32')
dcast = Cast('float64')
class Poisson(gof.op.Op):
def __eq__(self, other):
return (type(self) == type(other))
def __hash__(self):
return hash(type(self))
def make_node(self, x):
x = as_sparse_variable(x)
return gof.Apply(self, [x], [x.type()])
def perform(self, node, (x, ), (out, )):
assert _is_sparse(x)
out[0] = x.copy()
out[0].data = numpy.asarray(numpy.random.poisson(out[0].data), dtype=x.dtype)
out[0].data = numpy.asarray(numpy.random.poisson(out[0].data),
dtype=x.dtype)
out[0].eliminate_zeros()
poisson = Poisson()
class Multinomial(gof.op.Op):
def __eq__(self, other):
return (type(self) == type(other))
def __hash__(self):
return hash(type(self))
def make_node(self, n, p):
n = tensor.as_tensor_variable(n)
p = as_sparse_variable(p)
return gof.Apply(self, [n, p], [p.type()])
def perform(self, node, (n, p), (out, )):
assert _is_sparse(p)
......@@ -53,54 +68,68 @@ class Multinomial(gof.op.Op):
out[0] = p.copy()
for i in xrange(p.shape[0]):
k, l = p.indptr[i], p.indptr[i+1]
k, l = p.indptr[i], p.indptr[i + 1]
out[0].data[k:l] = numpy.random.multinomial(n[i], p.data[k:l])
multinomial = Multinomial()
class EliminateZeros(gof.op.Op):
def __eq__(self, other):
return (type(self) == type(other))
def __hash__(self):
return hash(type(self))
def make_node(self, x):
x = as_sparse_variable(x)
return gof.Apply(self, [x], [x.type()])
def perform(self, node, (x, ), (out, )):
assert _is_sparse(x)
out[0] = x.copy()
out[0].eliminate_zeros()
eliminate_zeros = EliminateZeros()
class Sum(gof.op.Op):
def __eq__(self, other):
return (type(self) == type(other))
def __hash__(self):
return hash(type(self))
def make_node(self, x, a):
x = as_sparse_variable(x)
a = tensor.as_tensor_variable(a)
return gof.Apply(self, [x, a], [tensor.TensorType(dtype = x.type.dtype,
broadcastable = (False,)).make_variable()])
return gof.Apply(self, [x, a], [tensor.TensorType(dtype=x.type.dtype,
broadcastable=(False,)).make_variable()])
def perform(self, node, (x, a), (out, )):
assert _is_sparse(x)
out[0] = numpy.asarray(x.sum(a), dtype=x.dtype).flatten()
sum = Sum()
class Binomial(gof.op.Op):
def __init__(self, format, dtype):
self.format = format
self.dtype = dtype
def __eq__(self, other):
return (type(self) == type(other)) and self.format == other.format and \
self.dtype == other.dtype
return ((type(self) == type(other)) and
self.format == other.format and
self.dtype == other.dtype)
def __hash__(self):
return hash(type(self)) ^ hash(self.format) ^ hash(self.dtype)
def make_node(self, n, p, shape):
n = tensor.as_tensor_variable(n)
p = tensor.as_tensor_variable(p)
shape = tensor.as_tensor_variable(shape)
return gof.Apply(self, [n, p, shape], [SparseType(dtype = self.dtype,
format = self.format).make_variable()])
return gof.Apply(self, [n, p, shape], [SparseType(dtype=self.dtype,
format=self.format).make_variable()])
def perform(self, node, (n, p, shape, ), (out, )):
N = n * p * shape[0] * shape[1]
data = numpy.ones(N, dtype=self.dtype)
......@@ -116,6 +145,7 @@ csc_fbinomial = Binomial('csc', 'float32')
csr_dbinomial = Binomial('csr', 'float64')
csc_dbinomial = Binomial('csc', 'float64')
def structured_sigmoid(x):
"""
Element-wise sigmoid function only to the non-zero elements.
......@@ -179,8 +209,10 @@ class StructuredAddSV(gof.op.Op):
matrix.'''
def __eq__(self, other):
return (type(self) == type(other))
def __hash__(self):
return hash(type(self))
def make_node(self, x, y):
x = as_sparse_variable(x)
y = tensor.as_tensor_variable(y)
......@@ -191,12 +223,14 @@ class StructuredAddSV(gof.op.Op):
raise NotImplementedError()
return gof.Apply(self,
[x, y],
[SparseType(dtype = x.type.dtype,
format = x.type.format).make_variable()])
[SparseType(dtype=x.type.dtype,
format=x.type.format).make_variable()])
def perform(self, node, (x, y), (out, )):
assert _is_sparse(x) and not _is_sparse(y)
assert x.shape[1] == y.shape[0]
out[0] = x.__class__(x + (x.toarray() != 0) * y)
def grad(self, (x, y), (gz,)):
assert _is_sparse_variable(x) and _is_sparse_variable(y)
assert _is_sparse_variable(gz)
......@@ -207,14 +241,18 @@ structured_add_s_v = StructuredAddSV()
class StrucutedAddSVCSR(gof.Op):
def __eq__(self, other):
return (type(self) == type(other))
def __hash__(self):
return hash(type(self))
def make_node(self, a_data, a_indices, a_indptr, b):
assert b.type.ndim == 1
return gof.Apply(self, [a_data, a_indices, a_indptr, b],
[tensor.tensor(b.dtype, (False,))])
def c_code(self, node, name, (_data, _indices, _indptr, _b,), (_zout, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
_data, _indices, _indptr, _b, = inputs
_zout, = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
......@@ -272,16 +310,17 @@ class StrucutedAddSVCSR(gof.Op):
}
}
"""% dict(locals(), **sub)
""" % dict(locals(), **sub)
structured_add_s_v_csr = StrucutedAddSVCSR()
@gof.local_optimizer([structured_add_s_v])
def local_structured_add_s_v(node):
if node.op == structured_add_s_v:
x, y = node.inputs
x_is_sparse_variable = _is_sparse_variable(x)
y_is_sparse_variable = _is_sparse_variable(y)
#y_is_sparse_variable = _is_sparse_variable(y)
if x_is_sparse_variable:
svar = x
......@@ -310,14 +349,19 @@ register_specialize(local_structured_add_s_v)
class SamplingDot(gof.op.Op):
"""
Operand for calculating the dot product DOT(X, Y) = Z when you only want to calculate
a subset of Z. It is equivalent to P o (X . Y) where o is the element-wise product, X and Y operands of
the dot product and P is a matrix that contains 1 when the corresponding element of Z should be calculated
and 0 when it shouldn't. Note that SamplingDot has a different interface than DOT because SamplingDot
requires X to be a MxK matrix while Y is a NxK matrix instead of the usual KxN matrix.
It will work if the pattern is not binary value, but if the pattern doesn't have a high sparsity proportion
it will be slower then a more optimized dot followed by a normal elemwise multiplication.
Operand for calculating the dot product DOT(X, Y) = Z when you
only want to calculate a subset of Z. It is equivalent to P o (X
. Y) where o is the element-wise product, X and Y operands of the
dot product and P is a matrix that contains 1 when the
corresponding element of Z should be calculated and 0 when it
shouldn't. Note that SamplingDot has a different interface than
DOT because SamplingDot requires X to be a MxK matrix while Y is a
NxK matrix instead of the usual KxN matrix.
It will work if the pattern is not binary value, but if the
pattern doesn't have a high sparsity proportion it will be slower
then a more optimized dot followed by a normal elemwise
multiplication.
"""
def __eq__(self, other):
......@@ -364,6 +408,7 @@ class SamplingDot(gof.op.Op):
return rval
sampling_dot = SamplingDot()
class SamplingDotCsr(gof.Op):
"""
Optimized SamplingDot when the pattern P is a CSR matrix.
......@@ -391,7 +436,8 @@ class SamplingDotCsr(gof.Op):
assert p_ncols.dtype == 'int32'
dtype_out = scalar.upcast(x.type.dtype, y.type.dtype, p_data.type.dtype)
dtype_out = scalar.upcast(x.type.dtype, y.type.dtype,
p_data.type.dtype)
dot_out = scalar.upcast(x.type.dtype, y.type.dtype)
# We call blas ?dot function that take only param of the same type
......@@ -420,15 +466,20 @@ class SamplingDotCsr(gof.Op):
def c_header_dirs(self):
return blas.ldflags(libs=False, include_dir=True)
def c_code(self, node, name, (x, y, p_data, p_ind, p_ptr, p_ncols), (z_data, z_ind, z_ptr), sub):
def c_code(self, node, name, inputs, outputs, sub):
x, y, p_data, p_ind, p_ptr, p_ncols = inputs
z_data, z_ind, z_ptr = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for x')
if node.inputs[1].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for y')
if node.inputs[2].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for pattern')
raise NotImplementedError(
'Complex types are not supported for pattern')
dot_out = scalar.upcast(node.inputs[0].type.dtype, node.inputs[0].type.dtype)
# TODO: why 2 times the same inputs?
dot_out = scalar.upcast(node.inputs[0].type.dtype,
node.inputs[0].type.dtype)
if dot_out == "float32":
conv_type = "float"
......@@ -437,12 +488,16 @@ class SamplingDotCsr(gof.Op):
conv_type = "double"
cdot = "ddot_sub_"
typenum_x = node.inputs[0].type.dtype_specs()[-1] # retrieve dtype number
typenum_y = node.inputs[1].type.dtype_specs()[-1] # retrieve dtype number
typenum_p = node.inputs[2].type.dtype_specs()[-1] # retrieve dtype number
typenum_zd = tensor.TensorType(node.outputs[0].dtype, []).dtype_specs()[-1] # retrieve dtype number
typenum_zi = tensor.TensorType(node.outputs[1].dtype, []).dtype_specs()[-1] # retrieve dtype number
typenum_zp = tensor.TensorType(node.outputs[2].dtype, []).dtype_specs()[-1] # retrieve dtype number
# retrieve dtype number
typenum_x = node.inputs[0].type.dtype_specs()[-1]
typenum_y = node.inputs[1].type.dtype_specs()[-1]
typenum_p = node.inputs[2].type.dtype_specs()[-1]
typenum_zd = tensor.TensorType(node.outputs[0].dtype,
[]).dtype_specs()[-1]
typenum_zi = tensor.TensorType(node.outputs[1].dtype,
[]).dtype_specs()[-1]
typenum_zp = tensor.TensorType(node.outputs[2].dtype,
[]).dtype_specs()[-1]
rval = """
if (%(x)s->nd != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(x) != 2"); %(fail)s;}
......@@ -531,11 +586,12 @@ class SamplingDotCsr(gof.Op):
}
}
}
"""% dict(locals(), **sub)
""" % dict(locals(), **sub)
return rval
sampling_dot_csr = SamplingDotCsr()
# register a specialization to replace SamplingDot -> SamplingDotCsr
@gof.local_optimizer([sampling_dot])
def local_sampling_dot_csr(node):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论