提交 554cde1c authored 作者: abergeron's avatar abergeron

Merge pull request #2864 from dwf/tuple_params

Respect PEP3113 (no more tuple unpacking arguments)
......@@ -36,8 +36,9 @@ def memodict(f):
def make_depends():
@memodict
def depends((a, b)):
def depends(pair):
""" Returns True if a depends on b """
a, b = pair
return (any(bout in a.inputs for bout in b.outputs)
or any(depends((ainp.owner, b)) for ainp in a.inputs
if ainp.owner))
......
......@@ -84,7 +84,9 @@ class DotModulo(Op):
def make_node(self, A, s, m, A2, s2, m2):
return Apply(self, [A, s, m, A2, s2, m2], [s.type()])
def perform(self, node, (A, s, m, A2, s2, m2), (out, )):
def perform(self, node, inputs, outputs):
(A, s, m, A2, s2, m2) = inputs
(out,) = outputs
o1 = matVecModM(A, s, m)
o2 = matVecModM(A2, s2, m2)
out[0] = numpy.concatenate((o1, o2))
......@@ -92,7 +94,9 @@ class DotModulo(Op):
def c_code_cache_version(self):
return (6,)
def c_code(self, node, name, (_A, _s, _m, _A2, _s2, _m2), (_z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(_A, _s, _m, _A2, _s2, _m2) = inputs
(_z,) = outputs
return """
int osize = -1;
if (PyArray_NDIM(%(_A)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(A) != 2"); %(fail)s;}
......
差异被折叠。
......@@ -171,7 +171,9 @@ class Gamma(UnaryScalarOp):
else:
super(Gamma, self).impl(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -182,7 +184,9 @@ class Gamma(UnaryScalarOp):
return gz * gamma(x) * psi(x),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in float_types:
return """%(z)s = tgamma(%(x)s);""" % locals()
raise NotImplementedError('only floating point is implemented')
......
差异被折叠。
......@@ -105,7 +105,9 @@ class AddSD_ccode(gof.op.Op):
[data, indices, indptr, y],
[out])
def c_code(self, node, name, (_data, _indices, _indptr, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(_data, _indices, _indptr, y) = inputs
(z,) = outputs
inplace = int(self.inplace)
format = {'csc': 0, 'csr': 1}[self.format]
out_typenum = node.outputs[0].type.dtype_specs()[2]
......@@ -236,7 +238,9 @@ class StructuredDotCSC(gof.Op):
[tensor.tensor(dtype_out, (False, b.type.broadcastable[1]))])
return r
def perform(self, node, (a_val, a_ind, a_ptr, a_nrows, b), (out,)):
def perform(self, node, inputs, outputs):
(a_val, a_ind, a_ptr, a_nrows, b) = inputs
(out,) = outputs
a = scipy.sparse.csc_matrix((a_val, a_ind, a_ptr),
(a_nrows, b.shape[0]),
copy=False)
......@@ -244,7 +248,7 @@ class StructuredDotCSC(gof.Op):
out[0] = theano._asarray(a * b, dtype=node.outputs[0].type.dtype)
assert _is_dense(out[0]) # scipy 0.7 automatically converts to dense
def c_code(self, node, name, (a_val, a_ind, a_ptr, a_nrows, b), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
# C-implementation of the dot product of the sparse matrix A and matrix
# B.
# @param a_val: non-zero values of the sparse matrix
......@@ -257,6 +261,8 @@ class StructuredDotCSC(gof.Op):
# @param z: return value
# @param sub: TODO, not too sure, something to do with weave probably
(a_val, a_ind, a_ptr, a_nrows, b) = inputs
(z,) = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a_val')
if node.inputs[4].type.dtype in ('complex64', 'complex128'):
......@@ -426,7 +432,9 @@ class StructuredDotCSR(gof.Op):
b.type.broadcastable[1]))])
return r
def perform(self, node, (a_val, a_ind, a_ptr, b), (out,)):
def perform(self, node, inputs, outputs):
(a_val, a_ind, a_ptr, b) = inputs
(out,) = outputs
a = scipy.sparse.csr_matrix((a_val, a_ind, a_ptr),
(len(a_ptr) - 1, b.shape[0]),
copy=True) # use view_map before setting this to False
......@@ -435,7 +443,7 @@ class StructuredDotCSR(gof.Op):
# scipy 0.7 automatically converts to dense, but not .6 sometimes
assert _is_dense(out[0])
def c_code(self, node, name, (a_val, a_ind, a_ptr, b), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
"""
C-implementation of the dot product of the sparse matrix A and matrix
B.
......@@ -449,7 +457,8 @@ class StructuredDotCSR(gof.Op):
@param z: return value
@param sub: TODO, not too sure, something to do with weave probably
"""
# retrieve dtype number
(a_val, a_ind, a_ptr, b) = inputs
(z,) = outputs
typenum_z = tensor.TensorType(self.dtype_out, []).dtype_specs()[2]
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a_val')
......@@ -890,9 +899,11 @@ class CSMGradC(gof.Op):
return gof.Apply(self, [a_val, a_ind, a_ptr, a_dim,
b_val, b_ind, b_ptr, b_dim], [b_val.type()])
def c_code(self, node, name, (a_val, a_ind, a_ptr, a_dim,
b_val, b_ind, b_ptr, b_dim), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
# retrieve dtype number
(a_val, a_ind, a_ptr, a_dim,
b_val, b_ind, b_ptr, b_dim) = inputs
(z,) = outputs
typenum_z = node.outputs[0].type.dtype_specs()[2]
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a_val')
......@@ -1047,9 +1058,10 @@ class MulSDCSC(gof.Op):
# def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):
# return NotImplementedError()
def c_code(self, node, name, (_data, _indices, _indptr, _b,),
(_zout, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(_data, _indices, _indptr, _b,) = inputs
(_zout,) = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
......@@ -1163,9 +1175,10 @@ class MulSDCSR(gof.Op):
# def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):
# return NotImplemented()
def c_code(self, node, name, (_data, _indices, _indptr, _b,),
(_zout, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(_data, _indices, _indptr, _b,) = inputs
(_zout,) = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
......
......@@ -42,18 +42,20 @@ class ConvolutionIndices(Op):
"""
@staticmethod
def sparse_eval(inshp, kshp, nkern, (dx, dy)=(1, 1), mode='valid'):
def sparse_eval(inshp, kshp, nkern, strides=(1, 1), mode='valid'):
(dx, dy) = strides
return convolution_indices.evaluate(inshp, kshp, (dx, dy),
nkern, mode=mode, ws=False)
@staticmethod
def conv_eval(inshp, kshp, (dx, dy)=(1, 1), mode='valid'):
def conv_eval(inshp, kshp, strides=(1, 1), mode='valid'):
(dx, dy) = strides
return convolution_indices.evaluate(inshp, kshp, (dx, dy),
mode=mode, ws=True)
# img_shape and ker_shape are (height,width)
@staticmethod
def evaluate(inshp, kshp, (dx, dy)=(1, 1), nkern=1, mode='valid', ws=True):
def evaluate(inshp, kshp, strides=(1, 1), nkern=1, mode='valid', ws=True):
"""Build a sparse matrix which can be used for performing...
* convolution: in this case, the dot product of this matrix
with the input images will generate a stack of images
......@@ -79,6 +81,7 @@ class ConvolutionIndices(Op):
:returns: the structure of a sparse matrix, and the logical dimensions
of the image which will be the result of filtering.
"""
(dx, dy) = strides
N = numpy
# inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
......@@ -251,8 +254,9 @@ class ConvolutionIndices(Op):
return rval
def perform(self, node, (inshp, kshp),\
(out_indices, out_indptr, spmat_shape)):
def perform(self, node, inputs, outputs):
(inshp, kshp) = inputs
(out_indices, out_indptr, spmat_shape) = outputs
indices, indptr, spmatshp, outshp = self.evaluate(inshp, kshp)
out_indices[0] = indices
out_indptr[0] = indptr
......
......@@ -71,7 +71,9 @@ class Poisson(gof.op.Op):
x = as_sparse_variable(x)
return gof.Apply(self, [x], [x.type()])
def perform(self, node, (x, ), (out, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
assert _is_sparse(x)
assert x.format in ["csr", "csc"]
out[0] = x.copy()
......@@ -130,7 +132,9 @@ class Binomial(gof.op.Op):
[SparseType(dtype=self.dtype,
format=self.format).make_variable()])
def perform(self, node, (n, p, shape, ), (out, )):
def perform(self, node, inputs, outputs):
(n, p, shape) = inputs
(out,) = outputs
binomial = numpy.random.binomial(n, p, size=shape)
csx_matrix = getattr(scipy.sparse, self.format + '_matrix')
out[0] = csx_matrix(binomial, dtype=self.dtype)
......@@ -138,7 +142,9 @@ class Binomial(gof.op.Op):
def connection_pattern(self, node):
return [[True], [True], [False]]
def grad(self, (n, p, shape, ), (gz,)):
def grad(self, inputs, gout):
(n, p, shape) = inputs
(gz,) = gout
comment_n = "No gradient exists for the number of samples in class\
Binomial of theano/sparse/sandbox/sp2.py"
comment_p = "No gradient exists for the prob of success in class\
......@@ -196,7 +202,9 @@ class Multinomial(gof.op.Op):
return gof.Apply(self, [n, p], [p.type()])
def perform(self, node, (n, p), (out, )):
def perform(self, node, inputs, outputs):
(n, p) = inputs
(out,) = outputs
assert _is_sparse(p)
if p.format != 'csr':
......
......@@ -186,11 +186,15 @@ class T_verify_grad_sparse(unittest.TestCase):
x = as_sparse_variable(x)
return gof.Apply(self, [x], [x.type()])
def perform(self, node, (x, ), (out, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
assert _is_sparse(x)
out[0] = -x
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_sparse_variable(gz)
if self.structured:
return sp_ones_like(x) * dense_from_sparse(gz),
......
......@@ -5159,10 +5159,14 @@ class Diagonal(Op):
return Apply(self, [x], [tensor(dtype=x.dtype,
broadcastable=[False] * (x.ndim - 1))])
def perform(self, node, (x,), (z,)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = x.diagonal(self.offset, self.axis1, self.axis2)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
return [grad_not_implemented(self, 0, x)]
def infer_shape(self, node, shapes):
......@@ -5207,10 +5211,12 @@ class Diag(Op):
return Apply(self, [diag], [matrix(dtype=diag.dtype)])
def perform(self, node, inputs, (z,)):
def perform(self, node, inputs, outputs):
(z,) = outputs
z[0] = numpy.diag(inputs[0])
def grad(self, inputs, (gz,)):
def grad(self, inputs, gout):
(gz,) = gout
return [diagonal(gz)]
def infer_shape(self, nodes, shapes):
......@@ -5435,7 +5441,8 @@ class Choose(Op):
o = TensorType(choice.dtype, bcast)
return Apply(self, [a, choice], [o()])
def perform(self, node, inputs, (z, )):
def perform(self, node, inputs, outputs):
(z,) = outputs
a = inputs[0]
choice = inputs[1]
# TODO reuse out?
......
......@@ -593,7 +593,9 @@ class RepeatOp(theano.Op):
return [[True], [False]]
def grad(self, (x, repeats), (gz, )):
def grad(self, inputs, gout):
(x, repeats) = inputs
(gz,) = gout
if repeats.ndim == 0:
if self.axis is None:
axis = x.ndim
......
......@@ -42,7 +42,9 @@ class MatrixPinv(Op):
assert x.ndim == 2
return Apply(self, [x], [x.type()])
def perform(self, node, (x,), (z, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = numpy.linalg.pinv(x).astype(x.dtype)
pinv = MatrixPinv()
......@@ -69,7 +71,9 @@ class MatrixInverse(Op):
assert x.ndim == 2
return Apply(self, [x], [x.type()])
def perform(self, node, (x,), (z, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = numpy.linalg.inv(x).astype(x.dtype)
def grad(self, inputs, g_outputs):
......@@ -149,7 +153,9 @@ class AllocDiag(Op):
def grad(self, inputs, g_outputs):
return [extract_diag(g_outputs[0])]
def perform(self, node, (x,), (z,)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
if x.ndim != 1:
raise TypeError(x)
z[0] = numpy.diag(x)
......@@ -264,7 +270,9 @@ class Det(Op):
o = theano.tensor.scalar(dtype=x.dtype)
return Apply(self, [x], [o])
def perform(self, node, (x,), (z, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
try:
z[0] = numpy.asarray(numpy.linalg.det(x), dtype=x.dtype)
except Exception:
......@@ -298,7 +306,9 @@ class Eig(Op):
v = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [w, v])
def perform(self, node, (x,), (w, v)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(w, v) = outputs
w[0], v[0] = [z.astype(x.dtype) for z in self._numop(x)]
def infer_shape(self, node, shapes):
......@@ -333,7 +343,9 @@ class Eigh(Eig):
v = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [w, v])
def perform(self, node, (x,), (w, v)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(w, v) = outputs
w[0], v[0] = self._numop(x, self.UPLO)
def grad(self, inputs, g_outputs):
......@@ -466,7 +478,9 @@ class QRFull(Op):
return Apply(self, [x], [q, r])
def perform(self, node, (x,), (q, r)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(q, r) = outputs
assert x.ndim == 2, "The input of qr function should be a matrix."
q[0], r[0] = self._numop(x, self.mode)
......@@ -489,7 +503,9 @@ class QRIncomplete(Op):
q = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [q])
def perform(self, node, (x,), (q,)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(q,) = outputs
assert x.ndim == 2, "The input of qr function should be a matrix."
q[0] = self._numop(x,
self.mode)
......@@ -594,7 +610,9 @@ class SVD(Op):
v = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [w, u, v])
def perform(self, node, (x,), (w, u, v)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(w, u, v) = outputs
assert x.ndim == 2, "The input of svd function should be a matrix."
w[0], u[0], v[0] = self._numop(x,
self.full_matrices,
......
......@@ -232,7 +232,8 @@ class Eigvalsh(Op):
w = theano.tensor.vector(dtype=out_dtype)
return Apply(self, [a, b], [w])
def perform(self, node, inputs, (w,)):
def perform(self, node, inputs, outputs):
(w,) = outputs
if len(inputs) == 2:
w[0] = scipy.linalg.eigvalsh(a=inputs[0], b=inputs[1], lower=self.lower)
else:
......@@ -288,7 +289,8 @@ class EigvalshGrad(Op):
out2 = theano.tensor.matrix(dtype=out_dtype)
return Apply(self, [a, b, gw], [out1, out2])
def perform(self, node, (a, b, gw), outputs):
def perform(self, node, inputs, outputs):
(a, b, gw) = inputs
w, v = scipy.linalg.eigh(a, b, lower=self.lower)
gA = v.dot(numpy.diag(gw).dot(v.T))
gB = - v.dot(numpy.diag(gw*w).dot(v.T))
......@@ -353,10 +355,14 @@ class Expm(Op):
expm = theano.tensor.matrix(dtype=A.dtype)
return Apply(self, [A, ], [expm, ])
def perform(self, node, (A,), (expm,)):
def perform(self, node, inputs, outputs):
(A,) = inputs
(expm,) = outputs
expm[0] = scipy.linalg.expm(A)
def grad(self, (A,), (g_out,)):
def grad(self, inputs, outputs):
(A,) = inputs
(g_out,) = outputs
return [ExpmGrad()(A, g_out)]
def infer_shape(self, node, shapes):
......@@ -378,10 +384,12 @@ class ExpmGrad(Op):
def infer_shape(self, node, shapes):
return [shapes[0]]
def perform(self, node, (A, gA), (out,)):
def perform(self, node, inputs, outputs):
# Kalbfleisch and Lawless, J. Am. Stat. Assoc. 80 (1985) Equation 3.4
# Kind of... You need to do some algebra from there to arrive at
# this expression.
(A, gA) = inputs
(out,) = outputs
w, V = scipy.linalg.eig(A, right=True)
U = scipy.linalg.inv(V).T
......
......@@ -1233,7 +1233,9 @@ def test_not_implemented_elemwise_grad():
def impl(self, n, x):
return x * n
def grad(self, (n, x), (gz,)):
def grad(self, inputs, gout):
(n, x) = inputs
(gz,) = gout
dy_dx = n
return [theano.gradient.grad_not_implemented(self, 0, n),
gz * dy_dx]
......
......@@ -1421,7 +1421,9 @@ class TimesN(theano.scalar.basic.UnaryScalarOp):
float %(nodename)s_timesn(float x) { return x * %(n)s; }
""" % locals()
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
return "%(z)s = %(name)s_timesn(%(x)s);" % locals()
......
......@@ -80,7 +80,9 @@ class GetItem(Op):
else:
raise TypeError('Expected scalar or slice as index.')
def perform(self, node, (x, index), (out, )):
def perform(self, node, inputs, outputs):
(x, index) = inputs
(out,) = outputs
if not isinstance(index, slice):
index = int(index)
out[0] = x[index]
......@@ -137,7 +139,9 @@ class Append(Op):
assert x.ttype == toAppend.type, (x.ttype, toAppend.type)
return Apply(self, [x, toAppend], [x.type()])
def perform(self, node, (x, toAppend), (out, )):
def perform(self, node, inputs, outputs):
(x, toAppend) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
......@@ -209,7 +213,9 @@ class Extend(Op):
assert x.type == toAppend.type
return Apply(self, [x, toAppend], [x.type()])
def perform(self, node, (x, toAppend), (out, )):
def perform(self, node, inputs, outputs):
(x, toAppend) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
......@@ -292,7 +298,9 @@ class Insert(Op):
assert isinstance(index, T.TensorVariable) and index.ndim == 0
return Apply(self, [x, index, toInsert], [x.type()])
def perform(self, node, (x, index, toInsert), (out, )):
def perform(self, node, inputs, outputs):
(x, index, toInsert) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
......@@ -360,8 +368,9 @@ class Remove(Op):
assert x.ttype == toRemove.type
return Apply(self, [x, toRemove], [x.type()])
def perform(self, node, (x, toRemove), (out, )):
def perform(self, node, inputs, outputs):
(x, toRemove) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
......@@ -413,8 +422,8 @@ class Reverse(Op):
assert isinstance(x.type, TypedListType)
return Apply(self, [x], [x.type()])
def perform(self, node, inp, (out, )):
def perform(self, node, inp, outputs):
(out,) = outputs
if not self.inplace:
out[0] = list(inp[0])
else:
......@@ -470,12 +479,14 @@ class Index(Op):
assert x.ttype == elem.type
return Apply(self, [x, elem], [T.scalar()])
def perform(self, node, (x, elem), (out, )):
def perform(self, node, inputs, outputs):
"""
inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list
"""
(x, elem) = inputs
(out,) = outputs
for y in range(len(x)):
if node.inputs[0].ttype.values_eq(x[y], elem):
out[0] = numpy.asarray(y, dtype=theano.config.floatX)
......@@ -500,12 +511,14 @@ class Count(Op):
assert x.ttype == elem.type
return Apply(self, [x, elem], [T.scalar()])
def perform(self, node, (x, elem), (out, )):
def perform(self, node, inputs, outputs):
"""
inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list
"""
(x, elem) = inputs
(out,) = outputs
out[0] = 0
for y in range(len(x)):
if node.inputs[0].ttype.values_eq(x[y], elem):
......@@ -543,7 +556,8 @@ class Length(Op):
assert isinstance(x.type, TypedListType)
return Apply(self, [x], [T.scalar(dtype='int64')])
def perform(self, node, x, (out, )):
def perform(self, node, x, outputs):
(out,) = outputs
out[0] = numpy.asarray(len(x[0]), 'int64')
def __str__(self):
......@@ -593,7 +607,8 @@ class MakeList(Op):
return Apply(self, a2, [tl])
def perform(self, node, inputs, (out, )):
def perform(self, node, inputs, outputs):
(out,) = outputs
out[0] = list(inputs)
make_list = MakeList()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论