提交 554cde1c authored 作者: abergeron's avatar abergeron

Merge pull request #2864 from dwf/tuple_params

Respect PEP3113 (no more tuple unpacking arguments)
......@@ -36,8 +36,9 @@ def memodict(f):
def make_depends():
@memodict
def depends((a, b)):
def depends(pair):
""" Returns True if a depends on b """
a, b = pair
return (any(bout in a.inputs for bout in b.outputs)
or any(depends((ainp.owner, b)) for ainp in a.inputs
if ainp.owner))
......
......@@ -84,7 +84,9 @@ class DotModulo(Op):
def make_node(self, A, s, m, A2, s2, m2):
return Apply(self, [A, s, m, A2, s2, m2], [s.type()])
def perform(self, node, (A, s, m, A2, s2, m2), (out, )):
def perform(self, node, inputs, outputs):
(A, s, m, A2, s2, m2) = inputs
(out,) = outputs
o1 = matVecModM(A, s, m)
o2 = matVecModM(A2, s2, m2)
out[0] = numpy.concatenate((o1, o2))
......@@ -92,7 +94,9 @@ class DotModulo(Op):
def c_code_cache_version(self):
return (6,)
def c_code(self, node, name, (_A, _s, _m, _A2, _s2, _m2), (_z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(_A, _s, _m, _A2, _s2, _m2) = inputs
(_z,) = outputs
return """
int osize = -1;
if (PyArray_NDIM(%(_A)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(A) != 2"); %(fail)s;}
......
......@@ -925,7 +925,9 @@ class UnaryScalarOp(ScalarOp):
amd_float32 = None
amd_float64 = None
def c_code_contiguous(self, node, name, (x, ), (z, ), sub):
def c_code_contiguous(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if (not theano.config.lib.amdlibm or
# We compare the dtype AND the broadcast flag
# as this function do not broadcast
......@@ -1008,7 +1010,9 @@ class LT(LogicalComparison):
# built-in < don't support complex
return numpy.less(x, y)
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s < %(y)s);" % locals()
......@@ -1024,7 +1028,9 @@ class GT(LogicalComparison):
# built-in > don't support complex
return numpy.greater(x, y)
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s > %(y)s);" % locals()
......@@ -1040,7 +1046,9 @@ class LE(LogicalComparison):
# built-in <= don't support complex
return numpy.less_equal(x, y)
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s <= %(y)s);" % locals()
......@@ -1056,7 +1064,9 @@ class GE(LogicalComparison):
# built-in >= don't support complex
return numpy.greater_equal(x, y)
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s >= %(y)s);" % locals()
......@@ -1071,7 +1081,9 @@ class EQ(LogicalComparison):
def impl(self, x, y):
return x == y
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s == %(y)s);" % locals()
......@@ -1086,7 +1098,9 @@ class NEQ(LogicalComparison):
def impl(self, x, y):
return x != y
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s != %(y)s);" % locals()
......@@ -1097,7 +1111,9 @@ class IsNan(FixedLogicalComparison):
def impl(self, x):
return numpy.isnan(x)
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = isnan(%(x)s);" % locals()
......@@ -1108,7 +1124,9 @@ class IsInf(FixedLogicalComparison):
def impl(self, x):
return numpy.isinf(x)
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError()
# Note that the C isinf returns -1 for -Inf and +1 for +Inf, while
......@@ -1136,7 +1154,9 @@ class InRange(LogicalComparison):
return False
return True
def c_code(self, node, name, (x, low, hi), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, low, hi) = inputs
(z,) = outputs
if self.openlow:
cmp1 = '>'
else:
......@@ -1165,7 +1185,9 @@ class InRange(LogicalComparison):
else:
return elem.zeros_like()
def grad(self, (x, low, hi), (gz, )):
def grad(self, inputs, gout):
(x, low, hi) = inputs
(gz,) = gout
grads = []
for elem in [x, low, hi]:
grads.append(get_grad(elem))
......@@ -1186,10 +1208,14 @@ class Switch(ScalarOp):
# backport
# return ift if cond else iff
def c_code(self, node, name, (cond, ift, iff), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(cond, ift, iff) = inputs
(z,) = outputs
return "%(z)s = %(cond)s ? %(ift)s : %(iff)s;" % locals()
def grad(self, (cond, ift, iff), (gz, )):
def grad(self, inputs, gout):
(cond, ift, iff) = inputs
(gz,) = gout
first_part = switch(cond, gz, 0.)
second_part = switch(cond, 0., gz)
......@@ -1205,7 +1231,8 @@ class Switch(ScalarOp):
return (condition_grad, first_part, second_part)
def output_types(self, (cond_t, ift_t, iff_t)):
def output_types(self, types):
(cond_t, ift_t, iff_t) = types
return upcast_out(ift_t, iff_t)
switch = Switch()
......@@ -1249,7 +1276,9 @@ class OR(BinaryBitOp):
def impl(self, x, y):
return x | y
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
return "%(z)s = (%(x)s | %(y)s);" % locals()
or_ = OR()
......@@ -1262,7 +1291,9 @@ class XOR(BinaryBitOp):
def impl(self, x, y):
return x ^ y
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
return "%(z)s = (%(x)s ^ %(y)s);" % locals()
xor = XOR()
......@@ -1275,7 +1306,9 @@ class AND(BinaryBitOp):
def impl(self, x, y):
return x & y
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
return "%(z)s = (%(x)s & %(y)s);" % locals()
and_ = AND()
......@@ -1284,7 +1317,9 @@ class Invert(UnaryBitOp):
def impl(self, x):
return ~x
def c_code(self, node, name, (x,), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
return "%(z)s = (~%(x)s);" % locals()
invert = Invert()
......@@ -1300,14 +1335,18 @@ class Maximum(BinaryScalarOp):
# The built-in max function don't support complex type
return numpy.maximum(*inputs)
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
if any([i.type in complex_types for i in node.inputs]):
raise NotImplementedError()
# Test for both y>x and x>=y to detect NaN
return ('%(z)s = ((%(y)s)>(%(x)s)? (%(y)s): '
'((%(x)s)>=(%(y)s)? (%(x)s): nan("")));' % locals())
def grad(self, (x, y), (gz, )):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
if gz.type in complex_types:
# max is currently defined for complex_types,
# but the gradient for complex is not.
......@@ -1334,13 +1373,17 @@ class Minimum(BinaryScalarOp):
# The built-in min function don't support complex type
return numpy.minimum(*inputs)
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
if any([i.type in complex_types for i in node.inputs]):
raise NotImplementedError()
return ('%(z)s = ((%(y)s)<(%(x)s)? (%(y)s): '
'((%(x)s)<=(%(y)s)? (%(x)s): nan("")));' % locals())
def grad(self, (x, y), (gz, )):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
if gz.type in complex_types:
# min is currently defined for complex_types,
# but the gradient for complex is not.
......@@ -1364,13 +1407,15 @@ class Add(ScalarOp):
def impl(self, *inputs):
return sum(inputs)
def c_code(self, node, name, inputs, (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(z,) = outputs
if not inputs:
return z + " = 0;"
else:
return z + " = " + " + ".join(inputs) + ";"
def grad(self, inputs, (gz, )):
def grad(self, inputs, gout):
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
if self(*inputs).type in discrete_types:
......@@ -1400,13 +1445,15 @@ class Mul(ScalarOp):
def impl(self, *inputs):
return numpy.product(inputs)
def c_code(self, node, name, inputs, (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(z,) = outputs
if not inputs:
return z + " = 1;"
else:
return z + " = " + " * ".join(inputs) + ";"
def grad(self, inputs, (gz, )):
def grad(self, inputs, gout):
(gz,) = gout
retval = []
# The following 3 lines verify that gz is complex when the
......@@ -1448,10 +1495,14 @@ class Sub(BinaryScalarOp):
def impl(self, x, y):
return x - y
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
return "%(z)s = %(x)s - %(y)s;" % locals()
def grad(self, (x, y), (gz, )):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
......@@ -1528,8 +1579,10 @@ class TrueDiv(BinaryScalarOp):
else:
return x / y
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
# we generate good c code only when both are complex!
(x, y) = inputs
(z,) = outputs
if sum([node.inputs[0].type in complex_types,
node.inputs[1].type in complex_types]) == 1:
raise NotImplementedError('type not supported', type)
......@@ -1538,8 +1591,10 @@ class TrueDiv(BinaryScalarOp):
return "%(z)s = ((double)%(x)s) / %(y)s;" % locals()
return "%(z)s = %(x)s / %(y)s;" % locals()
def grad(self, (x, y), (gz, )):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
......@@ -1578,7 +1633,9 @@ class IntDiv(BinaryScalarOp):
# of string formatting.
return "#define THEANO_MACRO_MOD(x,y) (x % y)"
def c_code(self, node, name, (x, y), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
t = node.inputs[0].type.upcast(*[i.type for i in node.inputs[1:]])
if t in imap(str, discrete_types):
x_div_y_pp = '(%(x)s / %(y)s)' % locals()
......@@ -1666,13 +1723,13 @@ class Mod(BinaryScalarOp):
# of string formatting.
return "#define THEANO_MACRO_MOD(x,y) (x % y)"
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
"""
We want the result to have the same sign as python, not the other
implementation of mod.
"""
# raise NotImplementedError("Unlike Python, C's modulo returns negative
# modulo on negative dividend (to implement)")
(x, y) = inputs
(z,) = outputs
t = node.inputs[0].type.upcast(*[i.type for i in node.inputs[1:]])
if (str(t) in imap(str, discrete_types) or
t in ['uint8', 'int8', 'uint16', 'int16'] or
......@@ -1716,7 +1773,9 @@ class Mod(BinaryScalarOp):
}
""") % locals()
def grad(self, (x, y), (gz, )):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
z = self(x, y)
if z.type.dtype in discrete_types:
# The gradient does not flow in if the output is discrete
......@@ -1732,13 +1791,17 @@ class Pow(BinaryScalarOp):
def impl(self, x, y):
return x ** y
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
if (node.inputs[0].type in complex_types or
node.inputs[1].type in complex_types):
raise NotImplementedError('type not supported', type)
return "%(z)s = pow(%(x)s, %(y)s);" % locals()
def grad(self, (x, y), (gz, )):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
......@@ -1753,7 +1816,9 @@ class Pow(BinaryScalarOp):
return (first_part, second_part)
def c_code_contiguous(self, node, name, (x, y), (z, ), sub):
def c_code_contiguous(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
if not theano.config.lib.amdlibm:
raise theano.gof.utils.MethodNotDefined()
......@@ -1807,10 +1872,14 @@ class Clip(ScalarOp):
else:
return x
def c_code(self, node, name, (x, min, max), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, min, max) = inputs
(z,) = outputs
return "%(z)s = %(x)s < %(min)s ? %(min)s : %(x)s > %(max)s ? %(max)s : %(x)s;" % locals()
def grad(self, (x, mn, mx), (gz, )):
def grad(self, inputs, gout):
(x, mn, mx) = inputs
(gz,) = gout
assert gz.type not in complex_types
gx = ((x >= mn) & (x <= mx)) * gz
gmn = (x < mn) * gz
......@@ -1834,7 +1903,9 @@ class Second(BinaryScalarOp):
def impl(self, x, y):
return y
def c_code(self, node, name, (x, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
return "%(z)s = %(y)s;" % locals()
def connection_pattern(self, node):
......@@ -1844,8 +1915,10 @@ class Second(BinaryScalarOp):
return [[False], [True]]
def grad(self, (x, y), (gz, )):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
if y.type in continuous_types:
# x is disconnected because the elements of x are not used
return DisconnectedType()(), gz
......@@ -1863,10 +1936,14 @@ class Identity(UnaryScalarOp):
def impl(self, input):
return input
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
return "%(z)s = %(x)s;" % locals()
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in continuous_types:
return gz,
else:
......@@ -1889,10 +1966,14 @@ class Cast(UnaryScalarOp):
def impl(self, input):
return self.ctor(input)
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
return "%s = (%s)%s;" % (z, node.outputs[0].type.dtype_specs()[1], x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if self.o_type in continuous_types:
return [gz]
else:
......@@ -1962,7 +2043,9 @@ class Abs(UnaryScalarOp):
def impl(self, x):
return numpy.abs(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if self(x).type in discrete_types:
if x.type in discrete_types:
return [x.zeros_like(dtype=theano.config.floatX)]
......@@ -1971,7 +2054,9 @@ class Abs(UnaryScalarOp):
return gz * x / abs(x), # formula works for complex and real
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
type = node.inputs[0].type
if type in int_types:
return "%(z)s = abs(%(x)s);" % locals()
......@@ -1988,8 +2073,9 @@ class Sgn(UnaryScalarOp):
# casting to output type is handled by filter
return numpy.sign(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
rval = x.zeros_like()
if rval.type.dtype in discrete_types:
......@@ -1997,9 +2083,11 @@ class Sgn(UnaryScalarOp):
return [rval]
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
# casting is done by compiler
# TODO: use copysign
(x,) = inputs
(z,) = outputs
type = node.inputs[0].type
if type in float_types:
return "%(z)s = (%(x)s >= 0) ? (%(x)s == 0) ? 0.0 : 1.0 : -1.0;" % locals()
......@@ -2020,7 +2108,9 @@ class Ceil(UnaryScalarOp):
def impl(self, x):
return numpy.ceil(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
rval = x.zeros_like()
if rval.type.dtype in discrete_types:
......@@ -2028,7 +2118,9 @@ class Ceil(UnaryScalarOp):
return [rval]
def c_code(self, node, name, (x,), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
return "%(z)s = ceil(%(x)s);" % locals()
ceil = Ceil(same_out_nocomplex, name='ceil')
......@@ -2037,7 +2129,9 @@ class Floor(UnaryScalarOp):
def impl(self, x):
return numpy.floor(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
rval = x.zeros_like()
if rval.type.dtype in discrete_types:
......@@ -2045,7 +2139,9 @@ class Floor(UnaryScalarOp):
return [rval]
def c_code(self, node, name, (x,), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
return "%(z)s = floor(%(x)s);" % locals()
floor = Floor(same_out_nocomplex, name='floor')
......@@ -2054,10 +2150,14 @@ class Trunc(UnaryScalarOp):
def impl(self, x):
return numpy.trunc(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
return [x.zeros_like().astype(theano.config.floatX)]
def c_code(self, node, name, (x,), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
return "%(z)s = %(x)s >= 0? floor(%(x)s): -floor(-%(x)s);" % locals()
trunc = Trunc(same_out_nocomplex, name='trunc')
......@@ -2072,7 +2172,9 @@ class RoundHalfToEven(UnaryScalarOp):
def impl(self, x):
return numpy.round(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
rval = x.zeros_like()
if rval.type.dtype in discrete_types:
......@@ -2080,7 +2182,9 @@ class RoundHalfToEven(UnaryScalarOp):
return [rval]
def c_code___(self, node, name, (x, ), (z, ), sub):
def c_code___(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
typ = node.outputs[0].type.dtype
if not typ in ['float32', 'float64']:
Exception("The output should be float32 or float64")
......@@ -2164,7 +2268,9 @@ class RoundHalfAwayFromZero(UnaryScalarOp):
def impl(self, x):
return round_half_away_from_zero_vec(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
rval = x.zeros_like()
if rval.type.dtype in discrete_types:
......@@ -2172,7 +2278,9 @@ class RoundHalfAwayFromZero(UnaryScalarOp):
return [rval]
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.outputs[0].type.dtype in ['float32', 'float64']:
return "%(z)s = round(%(x)s);" % locals()
else:
......@@ -2184,7 +2292,9 @@ class Neg(UnaryScalarOp):
def impl(self, x):
return -x
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if self(x).type in discrete_types:
if x.type in discrete_types:
return [x.zeros_like(dtype=theano.config.floatX)]
......@@ -2193,7 +2303,9 @@ class Neg(UnaryScalarOp):
return -gz,
def c_code(self, node, name, (x,), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
return "%(z)s = -%(x)s;" % locals()
neg = Neg(same_out, name='neg')
......@@ -2212,7 +2324,9 @@ class Inv(UnaryScalarOp):
def impl(self, x):
return numpy.float32(1.0) / x
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2223,7 +2337,9 @@ class Inv(UnaryScalarOp):
return -gz / (x * x),
def c_code(self, node, name, (x,), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = 1.0 / %(x)s;" % locals()
......@@ -2243,7 +2359,9 @@ class Log(UnaryScalarOp):
return numpy.log(x, sig='f')
return numpy.log(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2254,10 +2372,12 @@ class Log(UnaryScalarOp):
return gz / x,
def c_code(self, node, name, (x,), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
# todo: the version using log2 seems to be very slightly faster
# on some machines for some reason, check if it's worth switching
# return "%(z)s = log2(%(x)s) * 0.69314718055994529;" % locals()
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = log(%(x)s);" % locals()
......@@ -2277,7 +2397,9 @@ class Log2(UnaryScalarOp):
return numpy.log2(x, sig='f')
return numpy.log2(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2288,7 +2410,9 @@ class Log2(UnaryScalarOp):
return gz / (x * math.log(2.0)),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = log2(%(x)s);" % locals()
......@@ -2308,7 +2432,9 @@ class Log10(UnaryScalarOp):
return numpy.log10(x, sig='f')
return numpy.log10(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2319,7 +2445,9 @@ class Log10(UnaryScalarOp):
return gz / (x * numpy.log(10.0)),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = log10(%(x)s);" % locals()
......@@ -2336,7 +2464,9 @@ class Log1p(UnaryScalarOp):
return numpy.log1p(x, sig='f')
return numpy.log1p(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2347,7 +2477,9 @@ class Log1p(UnaryScalarOp):
return [gz / (1 + x)]
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = log1p(%(x)s);" % locals()
......@@ -2366,7 +2498,9 @@ class Exp(UnaryScalarOp):
return numpy.exp(x, sig='f')
return numpy.exp(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2377,7 +2511,9 @@ class Exp(UnaryScalarOp):
return gz * exp(x),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = exp(%(x)s);" % locals()
......@@ -2393,7 +2529,9 @@ class Exp2(UnaryScalarOp):
return numpy.exp2(x, sig='f')
return numpy.exp2(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2404,7 +2542,9 @@ class Exp2(UnaryScalarOp):
return gz * exp2(x) * log(numpy.cast[x.type](2)),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = exp2(%(x)s);" % locals()
......@@ -2420,7 +2560,9 @@ class Expm1(UnaryScalarOp):
return numpy.expm1(x, sig='f')
return numpy.expm1(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2431,7 +2573,9 @@ class Expm1(UnaryScalarOp):
return gz * exp(x),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = expm1(%(x)s);" % locals()
......@@ -2445,7 +2589,9 @@ class Sqr(UnaryScalarOp):
def impl(self, x):
return x * x
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2456,7 +2602,9 @@ class Sqr(UnaryScalarOp):
return gz * x * 2,
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
return "%(z)s = %(x)s * %(x)s;" % locals()
sqr = Sqr(same_out, name='sqr')
......@@ -2470,7 +2618,9 @@ class Sqrt(UnaryScalarOp):
return numpy.sqrt(x, sig='f')
return numpy.sqrt(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2481,7 +2631,9 @@ class Sqrt(UnaryScalarOp):
return (gz * 0.5) / sqrt(x),
def c_code(self, node, name, (x,), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = sqrt(%(x)s);" % locals()
......@@ -2497,7 +2649,9 @@ class Deg2Rad(UnaryScalarOp):
return numpy.deg2rad(x, sig='f')
return numpy.deg2rad(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2508,7 +2662,9 @@ class Deg2Rad(UnaryScalarOp):
return gz * numpy.asarray(numpy.pi / 180, gz.type),
def c_code(self, node, name, (x,), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = %(x)s * (M_PI / 180.0);" % locals()
......@@ -2524,7 +2680,9 @@ class Rad2Deg(UnaryScalarOp):
return numpy.rad2deg(x, sig='f')
return numpy.rad2deg(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2535,7 +2693,9 @@ class Rad2Deg(UnaryScalarOp):
return gz * numpy.asarray(180. / numpy.pi, gz.type),
def c_code(self, node, name, (x,), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = %(x)s * (180.0 / M_PI);" % locals()
......@@ -2554,7 +2714,9 @@ class Cos(UnaryScalarOp):
return numpy.cos(x, sig='f')
return numpy.cos(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2565,7 +2727,9 @@ class Cos(UnaryScalarOp):
return -gz * sin(x),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = cos(%(x)s);" % locals()
......@@ -2581,7 +2745,9 @@ class ArcCos(UnaryScalarOp):
return numpy.arccos(x, sig='f')
return numpy.arccos(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2592,7 +2758,9 @@ class ArcCos(UnaryScalarOp):
return - gz / sqrt(numpy.cast[x.type](1) - sqr(x)),
def c_code(self, node, name, (x,), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = acos(%(x)s);" % locals()
......@@ -2611,7 +2779,9 @@ class Sin(UnaryScalarOp):
return numpy.sin(x, sig='f')
return numpy.sin(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2622,7 +2792,9 @@ class Sin(UnaryScalarOp):
return gz * cos(x),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = sin(%(x)s);" % locals()
......@@ -2638,7 +2810,9 @@ class ArcSin(UnaryScalarOp):
return numpy.arcsin(x, sig='f')
return numpy.arcsin(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2649,7 +2823,9 @@ class ArcSin(UnaryScalarOp):
return gz / sqrt(numpy.cast[x.type](1) - sqr(x)),
def c_code(self, node, name, (x,), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = asin(%(x)s);" % locals()
......@@ -2665,7 +2841,9 @@ class Tan(UnaryScalarOp):
return numpy.tan(x, sig='f')
return numpy.tan(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2676,7 +2854,9 @@ class Tan(UnaryScalarOp):
return gz / sqr(cos(x)),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = tan(%(x)s);" % locals()
......@@ -2692,7 +2872,9 @@ class ArcTan(UnaryScalarOp):
return numpy.arctan(x, sig='f')
return numpy.arctan(x)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2703,7 +2885,9 @@ class ArcTan(UnaryScalarOp):
return gz / (numpy.cast[x.type](1) + sqr(x)),
def c_code(self, node, name, (x,), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = atan(%(x)s);" % locals()
......@@ -2721,7 +2905,9 @@ class ArcTan2(BinaryScalarOp):
return numpy.arctan2(y, x, sig='f')
return numpy.arctan2(y, x)
def grad(self, (y, x), (gz,)):
def grad(self, inputs, gout):
(y, x) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
else:
......@@ -2741,7 +2927,9 @@ class ArcTan2(BinaryScalarOp):
return [gz * x / (sqr(x) + sqr(y)),
gz * neg(y) / (sqr(x) + sqr(y))]
def c_code(self, node, name, (y, x), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
(y, x) = inputs
(z,) = outputs
if (node.inputs[0].type in complex_types or
node.inputs[1].type in complex_types):
raise NotImplementedError('type not supported', type)
......@@ -2761,7 +2949,9 @@ class Cosh(UnaryScalarOp):
return numpy.cosh(x, sig='f')
return numpy.cosh(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2772,7 +2962,9 @@ class Cosh(UnaryScalarOp):
return gz * sinh(x),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = cosh(%(x)s);" % locals()
......@@ -2788,7 +2980,9 @@ class ArcCosh(UnaryScalarOp):
return numpy.arccosh(x, sig='f')
return numpy.arccosh(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2799,7 +2993,9 @@ class ArcCosh(UnaryScalarOp):
return gz / sqrt(sqr(x) - numpy.cast[x.type](1)),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = acosh(%(x)s);" % locals()
......@@ -2818,7 +3014,9 @@ class Sinh(UnaryScalarOp):
return numpy.sinh(x, sig='f')
return numpy.sinh(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2829,7 +3027,9 @@ class Sinh(UnaryScalarOp):
return gz * cosh(x),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = sinh(%(x)s);" % locals()
......@@ -2845,7 +3045,9 @@ class ArcSinh(UnaryScalarOp):
return numpy.arcsinh(x, sig='f')
return numpy.arcsinh(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2856,7 +3058,9 @@ class ArcSinh(UnaryScalarOp):
return gz / sqrt(sqr(x) + numpy.cast[x.type](1)),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = asinh(%(x)s);" % locals()
......@@ -2876,7 +3080,9 @@ class Tanh(UnaryScalarOp):
return numpy.tanh(x, sig='f')
return numpy.tanh(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2887,7 +3093,9 @@ class Tanh(UnaryScalarOp):
return gz * (1 - sqr(tanh(x))),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = tanh(%(x)s);" % locals()
......@@ -2903,7 +3111,9 @@ class ArcTanh(UnaryScalarOp):
return numpy.arctanh(x, sig='f')
return numpy.arctanh(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -2914,7 +3124,9 @@ class ArcTanh(UnaryScalarOp):
return gz / (numpy.cast[x.type](1) - sqr(x)),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = atanh(%(x)s);" % locals()
......@@ -2926,7 +3138,9 @@ class Real(UnaryScalarOp):
def impl(self, x):
return numpy.real(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
return [complex(gz, 0)]
real = Real(real_out, name='real')
......@@ -2936,7 +3150,9 @@ class Imag(UnaryScalarOp):
def impl(self, x):
return numpy.imag(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
return [complex(0, gz)]
elif x.type in float_types:
......@@ -2951,7 +3167,7 @@ class Angle(UnaryScalarOp):
def impl(self, x):
return numpy.angle(x)
def grad(self, (c, ), (gtheta, )):
def grad(self, inputs, gout):
# y = x.imag
# r = sqrt(y**2 + x.real**2)
# g = y/r
......@@ -2962,6 +3178,8 @@ class Angle(UnaryScalarOp):
# else:
# theta = -numpy.arcsin(g)+numpy.pi
(c,) = inputs
(gtheta,) = gout
x = real(c)
y = imag(c)
r = abs(c)
......@@ -2996,7 +3214,9 @@ class Complex(BinaryScalarOp):
def impl(self, x, y):
return numpy.complex(x, y)
def grad(self, (x, y), (gz,)):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
return [cast(real(gz), x.type.dtype),
cast(imag(gz), y.type.dtype)]
complex = Complex(name='complex')
......@@ -3023,7 +3243,9 @@ class ComplexFromPolar(BinaryScalarOp):
else:
return numpy.complex128(numpy.complex(x, y))
def grad(self, (r, theta), (gz,)):
def grad(self, inputs, gout):
(r, theta) = inputs
(gz,) = gout
gr = gz * complex_from_polar(1, theta)
gtheta = gz * complex_from_polar(r, -theta)
return [gr, gtheta]
......
......@@ -171,7 +171,9 @@ class Gamma(UnaryScalarOp):
else:
super(Gamma, self).impl(x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
......@@ -182,7 +184,9 @@ class Gamma(UnaryScalarOp):
return gz * gamma(x) * psi(x),
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if node.inputs[0].type in float_types:
return """%(z)s = tgamma(%(x)s);""" % locals()
raise NotImplementedError('only floating point is implemented')
......
......@@ -490,7 +490,8 @@ class CSMProperties(gof.Op):
return gof.Apply(self, [csm],
[data, tensor.ivector(), tensor.ivector(), tensor.ivector()])
def perform(self, node, (csm,), out):
def perform(self, node, inputs, out):
(csm,) = inputs
if self.kmap is None:
out[0][0] = csm.data
else:
......@@ -503,7 +504,7 @@ class CSMProperties(gof.Op):
out[2][0] = theano._asarray(csm.indptr, dtype='int32')
out[3][0] = theano._asarray(csm.shape, dtype='int32')
def grad(self, (csm,), g):
def grad(self, inputs, g):
# g[1:] is all integers, so their Jacobian in this op
# is 0. We thus don't need to worry about what their values
......@@ -513,6 +514,7 @@ class CSMProperties(gof.Op):
# any gradient anywhere. but we know that at least one of
# g[1:] is connected, or this grad method wouldn't have been
# called, so we should report zeros
(csm,) = inputs
if isinstance(g[0].type, DisconnectedType):
return [csm.zeros_like()]
......@@ -644,8 +646,10 @@ class CSM(gof.Op):
[SparseType(dtype=data.type.dtype,
format=self.format).make_variable()])
def perform(self, node, (data, indices, indptr, shape), (out,)):
def perform(self, node, inputs, outputs):
# for efficiency, if remap does nothing, then do not apply it
(data, indices, indptr, shape) = inputs
(out,) = outputs
if self.kmap is not None:
data = data[self.kmap]
......@@ -672,7 +676,9 @@ class CSM(gof.Op):
def connection_pattern(self, node):
return [[True], [False], [False], [False]]
def grad(self, (x_data, x_indices, x_indptr, x_shape), (g_out,)):
def grad(self, inputs, gout):
(x_data, x_indices, x_indptr, x_shape) = inputs
(g_out,) = gout
g_data, g_indices, g_indptr, g_shape = csm_properties(g_out)
# unpack the data vector and wrap it as a 1d TensorType
g_data = csm_grad(self.kmap)(x_data, x_indices, x_indptr, x_shape,
......@@ -773,8 +779,10 @@ class CSMGrad(gof.op.Op):
return gof.Apply(self, [x_data, x_indices, x_indptr, x_shape,
g_data, g_indices, g_indptr, g_shape], [gout_data])
def perform(self, node, (x_data, x_indices, x_indptr, x_shape,
g_data, g_indices, g_indptr, g_shape), (g_out,)):
def perform(self, node, inputs, outputs):
(x_data, x_indices, x_indptr, x_shape,
g_data, g_indices, g_indptr, g_shape) = inputs
(g_out,) = outputs
if len(x_indptr) - 1 == x_shape[0]:
sp_dim = x_shape[1]
else:
......@@ -826,7 +834,9 @@ class Cast(gof.op.Op):
self, [x],
[SparseType(dtype=self.out_type, format=x.format).make_variable()])
def perform(self, node, (x, ), (out, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
assert _is_sparse(x)
out[0] = x.astype(self.out_type)
......@@ -848,7 +858,7 @@ class Cast(gof.op.Op):
return [gz]
else:
return [Cast(inputs[0].dtype)(gz)]
def infer_shape(self, node, ins_shapes):
return ins_shapes
......@@ -909,7 +919,9 @@ class DenseFromSparse(gof.op.Op):
broadcastable=(False, False)
).make_variable()])
def perform(self, node, (x, ), (out, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
if _is_dense(x):
print >> sys.stderr, (
"WARNING: You just called DenseFromSparse on a dense matrix."
......@@ -919,7 +931,9 @@ class DenseFromSparse(gof.op.Op):
out[0] = x.toarray()
assert _is_dense(out[0])
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if self.sparse_grad:
left = sp_ones_like(x)
right = gz
......@@ -989,10 +1003,14 @@ class SparseFromDense(gof.op.Op):
format=self.format
).make_variable()])
def perform(self, node, (x, ), (out, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
out[0] = SparseType.format_cls[self.format](x)
def grad(self, (x, ), (gz, )):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
gx = dense_from_sparse(gz)
gx = tensor.patternbroadcast(gx, x.broadcastable)
return gx,
......@@ -1035,7 +1053,8 @@ class GetItemList(gof.op.Op):
return gof.Apply(self, [x, ind], [x.type()])
def perform(self, node, inp, (out, )):
def perform(self, node, inp, outputs):
(out,) = outputs
x = inp[0]
indices = inp[1]
assert _is_sparse(x)
......@@ -1051,7 +1070,7 @@ class GetItemList(gof.op.Op):
return self.__class__.__name__
get_item_list = GetItemList()
"""Select row of sparse matrix,
"""Select row of sparse matrix,
returning them as a new sparse matrix.
:param x: Sparse matrix.
......@@ -1090,7 +1109,8 @@ class GetItemListGrad(gof.op.Op):
return gof.Apply(self, [x, ind, gz], [x.type()])
def perform(self, node, inp, (out, )):
def perform(self, node, inp, outputs):
(out,) = outputs
x = inp[0]
indices = inp[1]
gz = inp[2]
......@@ -1129,7 +1149,8 @@ class GetItem2Lists(gof.op.Op):
return gof.Apply(self, [x, ind1, ind2],
[theano.tensor.vector()])
def perform(self, node, inp, (out, )):
def perform(self, node, inp, outputs):
(out,) = outputs
x = inp[0]
ind1 = inp[1]
ind2 = inp[2]
......@@ -1184,7 +1205,8 @@ class GetItem2ListsGrad(gof.op.Op):
return gof.Apply(self, [x, ind1, ind2, gz], [x.type()])
def perform(self, node, inp, (out, )):
def perform(self, node, inp, outputs):
(out,) = outputs
x = inp[0]
ind1 = inp[1]
ind2 = inp[2]
......@@ -1292,7 +1314,9 @@ class GetItem2d(gof.op.Op):
return gof.Apply(self, input_op, [x.type()])
def perform(self, node, (x, start1, stop1, step1, start2, stop2, step2), (out, )):
def perform(self, node, inputs, outputs):
(x, start1, stop1, step1, start2, stop2, step2) = inputs
(out,) = outputs
assert _is_sparse(x)
out[0] = x[start1:stop1:step1, start2:stop2:step2]
......@@ -1364,7 +1388,9 @@ class GetItemScalar(gof.op.Op):
return gof.Apply(self, input_op, [tensor.scalar(dtype=x.dtype)])
def perform(self, node, (x, ind1, ind2), (out, )):
def perform(self, node, inputs, outputs):
(x, ind1, ind2) = inputs
(out,) = outputs
assert _is_sparse(x)
out[0] = theano._asarray(x[ind1, ind2], x.dtype)
......@@ -1413,11 +1439,15 @@ class Transpose(gof.op.Op):
format=self.format_map[x.type.format]
).make_variable()])
def perform(self, node, (x, ), (out, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
assert _is_sparse(x)
out[0] = x.transpose()
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_sparse_variable(gz)
return transpose(gz),
......@@ -1454,11 +1484,15 @@ class Neg(gof.op.Op):
assert x.format in ["csr", "csc"]
return gof.Apply(self, [x], [x.type()])
def perform(self, node, (x, ), (out, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
assert _is_sparse(x)
out[0] = -x
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_sparse_variable(gz)
return -gz,
......@@ -1500,7 +1534,9 @@ class ColScaleCSC(gof.op.Op):
raise ValueError('x was not a csc matrix')
return gof.Apply(self, [x, s], [x.type()])
def perform(self, node, (x, s), (z,)):
def perform(self, node, inputs, outputs):
(x, s) = inputs
(z,) = outputs
M, N = x.shape
assert x.format == 'csc'
assert s.shape == (N, )
......@@ -1512,7 +1548,9 @@ class ColScaleCSC(gof.op.Op):
z[0] = y
def grad(self, (x, s), (gz,)):
def grad(self, inputs, gout):
(x, s) = inputs
(gz,) = gout
return [col_scale(gz, s), sp_sum(x * gz, axis=0)]
def infer_shape(self, node, ins_shapes):
......@@ -1549,10 +1587,12 @@ class RowScaleCSC(gof.op.Op):
assert x.format in ["csr", "csc"]
return gof.Apply(self, [x, s], [x.type()])
def perform(self, node, (x, s), (z,)):
def perform(self, node, inputs, outputs):
(x, s) = inputs
(z,) = outputs
M, N = x.shape
assert x.format == 'csc'
assert s.shape == (M, )
assert s.shape == (M,)
indices = x.indices
indptr = x.indptr
......@@ -1565,7 +1605,9 @@ class RowScaleCSC(gof.op.Op):
z[0] = scipy.sparse.csc_matrix((y_data, indices, indptr), (M, N))
def grad(self, (x, s), (gz,)):
def grad(self, inputs, gout):
(x, s) = inputs
(gz,) = gout
return [row_scale(gz, s), sp_sum(x * gz, axis=1)]
def infer_shape(self, node, ins_shapes):
......@@ -1650,13 +1692,17 @@ class SpSum(gof.op.Op):
z = tensor.TensorType(broadcastable=b, dtype=x.dtype)()
return gof.Apply(self, [x], [z])
def perform(self, node, (x,), (z,)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
if self.axis is None:
z[0] = numpy.asarray(x.sum())
else:
z[0] = numpy.asarray(x.sum(self.axis)).ravel()
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.dtype not in continuous_dtypes:
return [x.zeros_like(dtype=theano.config.floatX)]
if self.structured:
......@@ -1738,13 +1784,17 @@ class Diag(gof.op.Op):
return gof.Apply(self, [x], [tensor.tensor(broadcastable=(False,),
dtype=x.dtype)])
def perform(self, node, (x,), (z,)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
N, M = x.shape
if N != M:
raise ValueError('Diag only apply on square matrix')
z[0] = x.diagonal()
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
return [square_diagonal(gz)]
def infer_shape(self, nodes, shapes):
......@@ -1782,7 +1832,8 @@ class SquareDiagonal(gof.op.Op):
return gof.Apply(self, [diag],
[SparseType(dtype=diag.dtype, format='csc')()])
def perform(self, node, inputs, (z,)):
def perform(self, node, inputs, outputs):
(z,) = outputs
diag, o_shape = inputs[0], inputs[0].shape * 2
N = len(diag)
......@@ -1793,7 +1844,8 @@ class SquareDiagonal(gof.op.Op):
z[0] = scipy.sparse.csc_matrix(tup, copy=True)
def grad(self, inputs, (gz,)):
def grad(self, inputs, gout):
(gz,) = gout
return [diag(gz)]
def infer_shape(self, nodes, shapes):
......@@ -1831,7 +1883,9 @@ class EnsureSortedIndices(gof.op.Op):
assert x.format in ["csr", "csc"]
return gof.Apply(self, [x], [x.type()])
def perform(self, node, (x, ), (z, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
if self.inplace:
z[0] = x.sort_indices()
else:
......@@ -1906,12 +1960,16 @@ class AddSS(gof.op.Op):
format=x.type.format
).make_variable()])
def perform(self, node, (x, y), (out, )):
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and _is_sparse(y)
assert x.shape == y.shape
out[0] = x + y
def grad(self, (x, y), (gz,)):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_sparse_variable(y)
assert _is_sparse_variable(gz)
return gz, gz
......@@ -1943,14 +2001,17 @@ class AddSSData(gof.op.Op):
[SparseType(dtype=x.type.dtype,
format=x.type.format).make_variable()])
def perform(self, node, (x, y), (out, )):
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and _is_sparse(y)
assert x.shape == y.shape
assert x.data.shape == y.data.shape
out[0] = x.copy()
out[0].data += y.data
def grad(self, inputs, (gz, )):
def grad(self, inputs, gout):
(gz,) = gout
is_continuous = [(i.dtype in continuous_dtypes)
for i in inputs]
derivative = {True: gz, False: None}
......@@ -2006,14 +2067,18 @@ class AddSD(gof.op.Op):
broadcastable=y.type.broadcastable
).make_variable()])
def perform(self, node, (x, y), (out, )):
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_dense(y)
# The asarray is needed as in some case, this return a
# numpy.matrixlib.defmatrix.matrix object and not an ndarray.
out[0] = theano._asarray(x + y, dtype=node.outputs[0].type.dtype)
def grad(self, (x, y), (gz,)):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_dense_variable(y)
assert _is_dense_variable(gz)
return sp_ones_like(x) * gz, gz
......@@ -2045,12 +2110,16 @@ class StructuredAddSV(gof.op.Op):
[SparseType(dtype=x.type.dtype,
format=x.type.format).make_variable()])
def perform(self, node, (x, y), (out, )):
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and not _is_sparse(y)
assert x.shape[1] == y.shape[0]
out[0] = x.__class__(x + (x.toarray() != 0) * y)
def grad(self, (x, y), (gz,)):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and not _is_sparse_variable(y)
assert _is_sparse_variable(gz)
return gz, sp_sum(gz, axis=0, sparse_grad=True)
......@@ -2156,7 +2225,9 @@ class MulSS(gof.op.Op):
format=x.type.format
)()])
def perform(self, node, (x, y), (out, )):
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and _is_sparse(y)
assert len(x.shape) == 2
assert y.shape == x.shape
......@@ -2164,7 +2235,9 @@ class MulSS(gof.op.Op):
# x * y calls dot...
out[0] = x.multiply(y)
def grad(self, (x, y), (gz,)):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
return y * gz, x * gz
def infer_shape(self, node, shapes):
......@@ -2202,7 +2275,9 @@ class MulSD(gof.op.Op):
format=x.type.format)()
return gof.Apply(self, [x, y], [out])
def perform(self, node, (x, y), (out, )):
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and _is_dense(y)
if len(y.shape) == 0:
out_dtype = node.outputs[0].dtype
......@@ -2258,7 +2333,9 @@ class MulSD(gof.op.Op):
), x.format
out[0] = type(x)(x.toarray() * y)
def grad(self, (x, y), (gz,)):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_dense_variable(y)
assert _is_sparse_variable(gz)
return y * gz, dense_from_sparse(x * gz)
......@@ -2291,12 +2368,16 @@ class MulSV(gof.op.Op):
[SparseType(dtype=x.type.dtype,
format=x.type.format).make_variable()])
def perform(self, node, (x, y), (out, )):
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and not _is_sparse(y)
assert x.shape[1] == y.shape[0]
out[0] = x.__class__(x.toarray() * y)
def grad(self, (x, y), (gz,)):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_dense_variable(y)
assert _is_sparse_variable(gz)
......@@ -2402,7 +2483,9 @@ class __ComparisonOpSS(gof.op.Op):
[SparseType(dtype='uint8',
format=x.type.format).make_variable()])
def perform(self, node, (x, y), (out, )):
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and _is_sparse(y)
assert x.shape == y.shape
out[0] = self.comparison(x, y).astype('uint8')
......@@ -2444,7 +2527,9 @@ class __ComparisonOpSD(gof.op.Op):
[SparseType(dtype='uint8',
format=x.type.format).make_variable()])
def perform(self, node, (x, y), (out, )):
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x)
assert x.shape == y.shape
assert _is_dense(y)
......@@ -2682,7 +2767,8 @@ class HStack(gof.op.Op):
self, var,
[SparseType(dtype=self.dtype, format=self.format).make_variable()])
def perform(self, node, block, (out, )):
def perform(self, node, block, outputs):
(out,) = outputs
for b in block:
assert _is_sparse(b)
out[0] = scipy.sparse.hstack(block, format=self.format,
......@@ -2692,7 +2778,8 @@ class HStack(gof.op.Op):
if out[0].dtype != self.dtype:
out[0] = out[0].astype(self.dtype)
def grad(self, inputs, (gz, )):
def grad(self, inputs, gout):
(gz,) = gout
is_continuous = [(inputs[i].dtype in tensor.continuous_dtypes)
for i in range(len(inputs))]
......@@ -2749,7 +2836,8 @@ def hstack(blocks, format=None, dtype=None):
class VStack(HStack):
# See doc in instance of this Op or function after this class definition.
def perform(self, node, block, (out, )):
def perform(self, node, block, outputs):
(out,) = outputs
for b in block:
assert _is_sparse(b)
out[0] = scipy.sparse.vstack(block, format=self.format,
......@@ -2759,7 +2847,8 @@ class VStack(HStack):
if out[0].dtype != self.dtype:
out[0] = out[0].astype(self.dtype)
def grad(self, inputs, (gz, )):
def grad(self, inputs, gout):
(gz,) = gout
is_continuous = [(inputs[i].dtype in tensor.continuous_dtypes)
for i in range(len(inputs))]
......@@ -2836,7 +2925,9 @@ class Remove0(gof.Op):
assert x.format in ["csr", "csc"]
return gof.Apply(self, [x], [x.type()])
def perform(self, node, (x,), (z,)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
if self.inplace:
c = x
else:
......@@ -2844,7 +2935,9 @@ class Remove0(gof.Op):
c.eliminate_zeros()
z[0] = c
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
return [gz]
def infer_shape(self, node, i0_shapes):
......@@ -3157,7 +3250,9 @@ class TrueDot(gof.op.Op):
shape, copy=False)
out[0] = rval
def grad(self, (x, y), (gz, )):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(gz)
assert _is_sparse_variable(x)
......@@ -3246,7 +3341,9 @@ class StructuredDot(gof.Op):
[tensor.tensor(dtype_out,
(False, b.type.broadcastable[1]))])
def perform(self, node, (a, b), (out,)):
def perform(self, node, inputs, outputs):
(a, b) = inputs
(out,) = outputs
if a.shape[1] != b.shape[0]:
raise ValueError('shape mismatch in StructuredDot.perform',
(a.shape, b.shape))
......@@ -3287,10 +3384,12 @@ class StructuredDot(gof.Op):
# theano._asarray function documentation.
out[0] = theano._asarray(variable, str(variable.dtype))
def grad(self, (a, b), (g_out,)):
def grad(self, inputs, gout):
# a is sparse, b is dense, g_out is dense
# ga = g_out x b.T
# gb = a.T x g_out
(a, b) = inputs
(g_out,) = gout
return [structured_dot_grad(a, b, g_out), structured_dot(a.T, g_out)]
def infer_shape(self, node, shapes):
......@@ -3367,7 +3466,9 @@ class StructuredDotGradCSC(gof.Op):
return gof.Apply(self, [a_indices, a_indptr, b, g_ab],
[tensor.tensor(g_ab.dtype, (False,))])
def perform(self, node, (a_indices, a_indptr, b, g_ab), (out,)):
def perform(self, node, inputs, outputs):
(a_indices, a_indptr, b, g_ab) = inputs
(out,) = outputs
g_a_data = numpy.zeros(a_indices.shape, dtype=g_ab.dtype)
for j in xrange(len(a_indptr) - 1):
ind0 = a_indptr[j]
......@@ -3386,8 +3487,10 @@ class StructuredDotGradCSC(gof.Op):
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, (_indices, _indptr, _d, _g), (_zout, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(_indices, _indptr, _d, _g) = inputs
(_zout,) = outputs
if node.inputs[2].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for b')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
......@@ -3501,7 +3604,9 @@ class StructuredDotGradCSR(gof.Op):
return gof.Apply(self, [a_indices, a_indptr, b, g_ab],
[tensor.tensor(b.dtype, (False,))])
def perform(self, node, (a_indices, a_indptr, b, g_ab), (out,)):
def perform(self, node, inputs, outputs):
(a_indices, a_indptr, b, g_ab) = inputs
(out,) = outputs
g_a_data = numpy.zeros(a_indices.shape, dtype=g_ab.dtype)
for i in xrange(len(a_indptr) - 1): # loop over rows
ind0 = a_indptr[i]
......@@ -3522,8 +3627,10 @@ class StructuredDotGradCSR(gof.Op):
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, (_indices, _indptr, _d, _g), (_zout, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(_indices, _indptr, _d, _g) = inputs
(_zout,) = outputs
if node.inputs[2].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for b')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
......@@ -3651,7 +3758,9 @@ class SamplingDot(gof.op.Op):
return gof.Apply(self, [x, y, p], [p.type()])
def perform(self, node, (x, y, p), (out,)):
def perform(self, node, inputs, outputs):
(x, y, p) = inputs
(out,) = outputs
if _is_sparse(x):
raise TypeError(x)
......@@ -3663,7 +3772,9 @@ class SamplingDot(gof.op.Op):
out[0] = p.__class__(p.multiply(numpy.dot(x, y.T)))
def grad(self, (x, y, p), (gz,)):
def grad(self, inputs, gout):
(x, y, p) = inputs
(gz,) = gout
rval = [
dot(p * gz, y),
dot((p * gz).T, x),
......@@ -3787,7 +3898,9 @@ class Dot(gof.op.Op):
out[0] = theano._asarray(rval, dtype=node.outputs[0].dtype)
def grad(self, (x, y), (gz,)):
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(x) or _is_sparse_variable(y)
rval = []
......@@ -3876,7 +3989,9 @@ class Usmm(gof.op.Op):
[tensor.tensor(dtype=dtype_out,
broadcastable=(False, False))])
def perform(self, node, (alpha, x, y, z), (out, )):
def perform(self, node, inputs, outputs):
(alpha, x, y, z) = inputs
(out,) = outputs
x_is_sparse = _is_sparse(x)
y_is_sparse = _is_sparse(y)
......
......@@ -105,7 +105,9 @@ class AddSD_ccode(gof.op.Op):
[data, indices, indptr, y],
[out])
def c_code(self, node, name, (_data, _indices, _indptr, y), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(_data, _indices, _indptr, y) = inputs
(z,) = outputs
inplace = int(self.inplace)
format = {'csc': 0, 'csr': 1}[self.format]
out_typenum = node.outputs[0].type.dtype_specs()[2]
......@@ -236,7 +238,9 @@ class StructuredDotCSC(gof.Op):
[tensor.tensor(dtype_out, (False, b.type.broadcastable[1]))])
return r
def perform(self, node, (a_val, a_ind, a_ptr, a_nrows, b), (out,)):
def perform(self, node, inputs, outputs):
(a_val, a_ind, a_ptr, a_nrows, b) = inputs
(out,) = outputs
a = scipy.sparse.csc_matrix((a_val, a_ind, a_ptr),
(a_nrows, b.shape[0]),
copy=False)
......@@ -244,7 +248,7 @@ class StructuredDotCSC(gof.Op):
out[0] = theano._asarray(a * b, dtype=node.outputs[0].type.dtype)
assert _is_dense(out[0]) # scipy 0.7 automatically converts to dense
def c_code(self, node, name, (a_val, a_ind, a_ptr, a_nrows, b), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
# C-implementation of the dot product of the sparse matrix A and matrix
# B.
# @param a_val: non-zero values of the sparse matrix
......@@ -257,6 +261,8 @@ class StructuredDotCSC(gof.Op):
# @param z: return value
# @param sub: TODO, not too sure, something to do with weave probably
(a_val, a_ind, a_ptr, a_nrows, b) = inputs
(z,) = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a_val')
if node.inputs[4].type.dtype in ('complex64', 'complex128'):
......@@ -426,7 +432,9 @@ class StructuredDotCSR(gof.Op):
b.type.broadcastable[1]))])
return r
def perform(self, node, (a_val, a_ind, a_ptr, b), (out,)):
def perform(self, node, inputs, outputs):
(a_val, a_ind, a_ptr, b) = inputs
(out,) = outputs
a = scipy.sparse.csr_matrix((a_val, a_ind, a_ptr),
(len(a_ptr) - 1, b.shape[0]),
copy=True) # use view_map before setting this to False
......@@ -435,7 +443,7 @@ class StructuredDotCSR(gof.Op):
# scipy 0.7 automatically converts to dense, but not .6 sometimes
assert _is_dense(out[0])
def c_code(self, node, name, (a_val, a_ind, a_ptr, b), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
"""
C-implementation of the dot product of the sparse matrix A and matrix
B.
......@@ -449,7 +457,8 @@ class StructuredDotCSR(gof.Op):
@param z: return value
@param sub: TODO, not too sure, something to do with weave probably
"""
# retrieve dtype number
(a_val, a_ind, a_ptr, b) = inputs
(z,) = outputs
typenum_z = tensor.TensorType(self.dtype_out, []).dtype_specs()[2]
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a_val')
......@@ -890,9 +899,11 @@ class CSMGradC(gof.Op):
return gof.Apply(self, [a_val, a_ind, a_ptr, a_dim,
b_val, b_ind, b_ptr, b_dim], [b_val.type()])
def c_code(self, node, name, (a_val, a_ind, a_ptr, a_dim,
b_val, b_ind, b_ptr, b_dim), (z,), sub):
def c_code(self, node, name, inputs, outputs, sub):
# retrieve dtype number
(a_val, a_ind, a_ptr, a_dim,
b_val, b_ind, b_ptr, b_dim) = inputs
(z,) = outputs
typenum_z = node.outputs[0].type.dtype_specs()[2]
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a_val')
......@@ -1047,9 +1058,10 @@ class MulSDCSC(gof.Op):
# def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):
# return NotImplementedError()
def c_code(self, node, name, (_data, _indices, _indptr, _b,),
(_zout, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(_data, _indices, _indptr, _b,) = inputs
(_zout,) = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
......@@ -1163,9 +1175,10 @@ class MulSDCSR(gof.Op):
# def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):
# return NotImplemented()
def c_code(self, node, name, (_data, _indices, _indptr, _b,),
(_zout, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(_data, _indices, _indptr, _b,) = inputs
(_zout,) = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
......
......@@ -42,18 +42,20 @@ class ConvolutionIndices(Op):
"""
@staticmethod
def sparse_eval(inshp, kshp, nkern, (dx, dy)=(1, 1), mode='valid'):
def sparse_eval(inshp, kshp, nkern, strides=(1, 1), mode='valid'):
(dx, dy) = strides
return convolution_indices.evaluate(inshp, kshp, (dx, dy),
nkern, mode=mode, ws=False)
@staticmethod
def conv_eval(inshp, kshp, (dx, dy)=(1, 1), mode='valid'):
def conv_eval(inshp, kshp, strides=(1, 1), mode='valid'):
(dx, dy) = strides
return convolution_indices.evaluate(inshp, kshp, (dx, dy),
mode=mode, ws=True)
# img_shape and ker_shape are (height,width)
@staticmethod
def evaluate(inshp, kshp, (dx, dy)=(1, 1), nkern=1, mode='valid', ws=True):
def evaluate(inshp, kshp, strides=(1, 1), nkern=1, mode='valid', ws=True):
"""Build a sparse matrix which can be used for performing...
* convolution: in this case, the dot product of this matrix
with the input images will generate a stack of images
......@@ -79,6 +81,7 @@ class ConvolutionIndices(Op):
:returns: the structure of a sparse matrix, and the logical dimensions
of the image which will be the result of filtering.
"""
(dx, dy) = strides
N = numpy
# inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
......@@ -251,8 +254,9 @@ class ConvolutionIndices(Op):
return rval
def perform(self, node, (inshp, kshp),\
(out_indices, out_indptr, spmat_shape)):
def perform(self, node, inputs, outputs):
(inshp, kshp) = inputs
(out_indices, out_indptr, spmat_shape) = outputs
indices, indptr, spmatshp, outshp = self.evaluate(inshp, kshp)
out_indices[0] = indices
out_indptr[0] = indptr
......
......@@ -71,7 +71,9 @@ class Poisson(gof.op.Op):
x = as_sparse_variable(x)
return gof.Apply(self, [x], [x.type()])
def perform(self, node, (x, ), (out, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
assert _is_sparse(x)
assert x.format in ["csr", "csc"]
out[0] = x.copy()
......@@ -130,7 +132,9 @@ class Binomial(gof.op.Op):
[SparseType(dtype=self.dtype,
format=self.format).make_variable()])
def perform(self, node, (n, p, shape, ), (out, )):
def perform(self, node, inputs, outputs):
(n, p, shape) = inputs
(out,) = outputs
binomial = numpy.random.binomial(n, p, size=shape)
csx_matrix = getattr(scipy.sparse, self.format + '_matrix')
out[0] = csx_matrix(binomial, dtype=self.dtype)
......@@ -138,7 +142,9 @@ class Binomial(gof.op.Op):
def connection_pattern(self, node):
return [[True], [True], [False]]
def grad(self, (n, p, shape, ), (gz,)):
def grad(self, inputs, gout):
(n, p, shape) = inputs
(gz,) = gout
comment_n = "No gradient exists for the number of samples in class\
Binomial of theano/sparse/sandbox/sp2.py"
comment_p = "No gradient exists for the prob of success in class\
......@@ -196,7 +202,9 @@ class Multinomial(gof.op.Op):
return gof.Apply(self, [n, p], [p.type()])
def perform(self, node, (n, p), (out, )):
def perform(self, node, inputs, outputs):
(n, p) = inputs
(out,) = outputs
assert _is_sparse(p)
if p.format != 'csr':
......
......@@ -186,11 +186,15 @@ class T_verify_grad_sparse(unittest.TestCase):
x = as_sparse_variable(x)
return gof.Apply(self, [x], [x.type()])
def perform(self, node, (x, ), (out, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
assert _is_sparse(x)
out[0] = -x
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_sparse_variable(gz)
if self.structured:
return sp_ones_like(x) * dense_from_sparse(gz),
......
......@@ -5159,10 +5159,14 @@ class Diagonal(Op):
return Apply(self, [x], [tensor(dtype=x.dtype,
broadcastable=[False] * (x.ndim - 1))])
def perform(self, node, (x,), (z,)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = x.diagonal(self.offset, self.axis1, self.axis2)
def grad(self, (x,), (gz,)):
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
return [grad_not_implemented(self, 0, x)]
def infer_shape(self, node, shapes):
......@@ -5207,10 +5211,12 @@ class Diag(Op):
return Apply(self, [diag], [matrix(dtype=diag.dtype)])
def perform(self, node, inputs, (z,)):
def perform(self, node, inputs, outputs):
(z,) = outputs
z[0] = numpy.diag(inputs[0])
def grad(self, inputs, (gz,)):
def grad(self, inputs, gout):
(gz,) = gout
return [diagonal(gz)]
def infer_shape(self, nodes, shapes):
......@@ -5435,7 +5441,8 @@ class Choose(Op):
o = TensorType(choice.dtype, bcast)
return Apply(self, [a, choice], [o()])
def perform(self, node, inputs, (z, )):
def perform(self, node, inputs, outputs):
(z,) = outputs
a = inputs[0]
choice = inputs[1]
# TODO reuse out?
......
......@@ -593,7 +593,9 @@ class RepeatOp(theano.Op):
return [[True], [False]]
def grad(self, (x, repeats), (gz, )):
def grad(self, inputs, gout):
(x, repeats) = inputs
(gz,) = gout
if repeats.ndim == 0:
if self.axis is None:
axis = x.ndim
......
......@@ -42,7 +42,9 @@ class MatrixPinv(Op):
assert x.ndim == 2
return Apply(self, [x], [x.type()])
def perform(self, node, (x,), (z, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = numpy.linalg.pinv(x).astype(x.dtype)
pinv = MatrixPinv()
......@@ -69,7 +71,9 @@ class MatrixInverse(Op):
assert x.ndim == 2
return Apply(self, [x], [x.type()])
def perform(self, node, (x,), (z, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = numpy.linalg.inv(x).astype(x.dtype)
def grad(self, inputs, g_outputs):
......@@ -149,7 +153,9 @@ class AllocDiag(Op):
def grad(self, inputs, g_outputs):
return [extract_diag(g_outputs[0])]
def perform(self, node, (x,), (z,)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
if x.ndim != 1:
raise TypeError(x)
z[0] = numpy.diag(x)
......@@ -264,7 +270,9 @@ class Det(Op):
o = theano.tensor.scalar(dtype=x.dtype)
return Apply(self, [x], [o])
def perform(self, node, (x,), (z, )):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
try:
z[0] = numpy.asarray(numpy.linalg.det(x), dtype=x.dtype)
except Exception:
......@@ -298,7 +306,9 @@ class Eig(Op):
v = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [w, v])
def perform(self, node, (x,), (w, v)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(w, v) = outputs
w[0], v[0] = [z.astype(x.dtype) for z in self._numop(x)]
def infer_shape(self, node, shapes):
......@@ -333,7 +343,9 @@ class Eigh(Eig):
v = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [w, v])
def perform(self, node, (x,), (w, v)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(w, v) = outputs
w[0], v[0] = self._numop(x, self.UPLO)
def grad(self, inputs, g_outputs):
......@@ -466,7 +478,9 @@ class QRFull(Op):
return Apply(self, [x], [q, r])
def perform(self, node, (x,), (q, r)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(q, r) = outputs
assert x.ndim == 2, "The input of qr function should be a matrix."
q[0], r[0] = self._numop(x, self.mode)
......@@ -489,7 +503,9 @@ class QRIncomplete(Op):
q = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [q])
def perform(self, node, (x,), (q,)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(q,) = outputs
assert x.ndim == 2, "The input of qr function should be a matrix."
q[0] = self._numop(x,
self.mode)
......@@ -594,7 +610,9 @@ class SVD(Op):
v = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [w, u, v])
def perform(self, node, (x,), (w, u, v)):
def perform(self, node, inputs, outputs):
(x,) = inputs
(w, u, v) = outputs
assert x.ndim == 2, "The input of svd function should be a matrix."
w[0], u[0], v[0] = self._numop(x,
self.full_matrices,
......
......@@ -232,7 +232,8 @@ class Eigvalsh(Op):
w = theano.tensor.vector(dtype=out_dtype)
return Apply(self, [a, b], [w])
def perform(self, node, inputs, (w,)):
def perform(self, node, inputs, outputs):
(w,) = outputs
if len(inputs) == 2:
w[0] = scipy.linalg.eigvalsh(a=inputs[0], b=inputs[1], lower=self.lower)
else:
......@@ -288,7 +289,8 @@ class EigvalshGrad(Op):
out2 = theano.tensor.matrix(dtype=out_dtype)
return Apply(self, [a, b, gw], [out1, out2])
def perform(self, node, (a, b, gw), outputs):
def perform(self, node, inputs, outputs):
(a, b, gw) = inputs
w, v = scipy.linalg.eigh(a, b, lower=self.lower)
gA = v.dot(numpy.diag(gw).dot(v.T))
gB = - v.dot(numpy.diag(gw*w).dot(v.T))
......@@ -353,10 +355,14 @@ class Expm(Op):
expm = theano.tensor.matrix(dtype=A.dtype)
return Apply(self, [A, ], [expm, ])
def perform(self, node, (A,), (expm,)):
def perform(self, node, inputs, outputs):
(A,) = inputs
(expm,) = outputs
expm[0] = scipy.linalg.expm(A)
def grad(self, (A,), (g_out,)):
def grad(self, inputs, outputs):
(A,) = inputs
(g_out,) = outputs
return [ExpmGrad()(A, g_out)]
def infer_shape(self, node, shapes):
......@@ -378,10 +384,12 @@ class ExpmGrad(Op):
def infer_shape(self, node, shapes):
return [shapes[0]]
def perform(self, node, (A, gA), (out,)):
def perform(self, node, inputs, outputs):
# Kalbfleisch and Lawless, J. Am. Stat. Assoc. 80 (1985) Equation 3.4
# Kind of... You need to do some algebra from there to arrive at
# this expression.
(A, gA) = inputs
(out,) = outputs
w, V = scipy.linalg.eig(A, right=True)
U = scipy.linalg.inv(V).T
......
......@@ -1233,7 +1233,9 @@ def test_not_implemented_elemwise_grad():
def impl(self, n, x):
return x * n
def grad(self, (n, x), (gz,)):
def grad(self, inputs, gout):
(n, x) = inputs
(gz,) = gout
dy_dx = n
return [theano.gradient.grad_not_implemented(self, 0, n),
gz * dy_dx]
......
......@@ -1421,7 +1421,9 @@ class TimesN(theano.scalar.basic.UnaryScalarOp):
float %(nodename)s_timesn(float x) { return x * %(n)s; }
""" % locals()
def c_code(self, node, name, (x, ), (z, ), sub):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
return "%(z)s = %(name)s_timesn(%(x)s);" % locals()
......
......@@ -80,7 +80,9 @@ class GetItem(Op):
else:
raise TypeError('Expected scalar or slice as index.')
def perform(self, node, (x, index), (out, )):
def perform(self, node, inputs, outputs):
(x, index) = inputs
(out,) = outputs
if not isinstance(index, slice):
index = int(index)
out[0] = x[index]
......@@ -137,7 +139,9 @@ class Append(Op):
assert x.ttype == toAppend.type, (x.ttype, toAppend.type)
return Apply(self, [x, toAppend], [x.type()])
def perform(self, node, (x, toAppend), (out, )):
def perform(self, node, inputs, outputs):
(x, toAppend) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
......@@ -209,7 +213,9 @@ class Extend(Op):
assert x.type == toAppend.type
return Apply(self, [x, toAppend], [x.type()])
def perform(self, node, (x, toAppend), (out, )):
def perform(self, node, inputs, outputs):
(x, toAppend) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
......@@ -292,7 +298,9 @@ class Insert(Op):
assert isinstance(index, T.TensorVariable) and index.ndim == 0
return Apply(self, [x, index, toInsert], [x.type()])
def perform(self, node, (x, index, toInsert), (out, )):
def perform(self, node, inputs, outputs):
(x, index, toInsert) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
......@@ -360,8 +368,9 @@ class Remove(Op):
assert x.ttype == toRemove.type
return Apply(self, [x, toRemove], [x.type()])
def perform(self, node, (x, toRemove), (out, )):
def perform(self, node, inputs, outputs):
(x, toRemove) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
......@@ -413,8 +422,8 @@ class Reverse(Op):
assert isinstance(x.type, TypedListType)
return Apply(self, [x], [x.type()])
def perform(self, node, inp, (out, )):
def perform(self, node, inp, outputs):
(out,) = outputs
if not self.inplace:
out[0] = list(inp[0])
else:
......@@ -470,12 +479,14 @@ class Index(Op):
assert x.ttype == elem.type
return Apply(self, [x, elem], [T.scalar()])
def perform(self, node, (x, elem), (out, )):
def perform(self, node, inputs, outputs):
"""
inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list
"""
(x, elem) = inputs
(out,) = outputs
for y in range(len(x)):
if node.inputs[0].ttype.values_eq(x[y], elem):
out[0] = numpy.asarray(y, dtype=theano.config.floatX)
......@@ -500,12 +511,14 @@ class Count(Op):
assert x.ttype == elem.type
return Apply(self, [x, elem], [T.scalar()])
def perform(self, node, (x, elem), (out, )):
def perform(self, node, inputs, outputs):
"""
inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list
"""
(x, elem) = inputs
(out,) = outputs
out[0] = 0
for y in range(len(x)):
if node.inputs[0].ttype.values_eq(x[y], elem):
......@@ -543,7 +556,8 @@ class Length(Op):
assert isinstance(x.type, TypedListType)
return Apply(self, [x], [T.scalar(dtype='int64')])
def perform(self, node, x, (out, )):
def perform(self, node, x, outputs):
(out,) = outputs
out[0] = numpy.asarray(len(x[0]), 'int64')
def __str__(self):
......@@ -593,7 +607,8 @@ class MakeList(Op):
return Apply(self, a2, [tl])
def perform(self, node, inputs, (out, )):
def perform(self, node, inputs, outputs):
(out,) = outputs
out[0] = list(inputs)
make_list = MakeList()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论