提交 b8775273 authored 作者: Iban Harlouchet's avatar Iban Harlouchet

__props__ for theano/tensor/basic.py

上级 f4edcc59
......@@ -1001,6 +1001,9 @@ _scal_elemwise = _scal_elemwise_with_nfunc(None, None, None)
#########################
class TensorFromScalar(Op):
__props__ = ()
def make_node(self, s):
assert isinstance(s.type, scal.Scalar)
return Apply(self,
......@@ -1032,18 +1035,12 @@ class TensorFromScalar(Op):
raise NotImplementedError("grad not implemented for complex dtypes")
def __str__(self):
return self.__class__.__name__
tensor_from_scalar = TensorFromScalar()
class ScalarFromTensor(Op):
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
__props__ = ()
def make_node(self, t):
assert isinstance(t.type, TensorType)
......@@ -1071,9 +1068,6 @@ class ScalarFromTensor(Op):
return [None]
return self.make_node(*eval_points).outputs
def __str__(self):
return self.__class__.__name__
def c_code(self, node, name, inputs, outputs, sub):
x, = inputs
z, = outputs
......@@ -1196,12 +1190,7 @@ class MaxAndArgmax(Op):
nin = 2 # tensor, axis
nout = 2 # max val, max idx
E_axis = 'invalid axis'
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
__props__ = ()
def make_node(self, x, axis=None):
x = _as_tensor_variable(x)
......@@ -1423,10 +1412,6 @@ class MaxAndArgmax(Op):
g_x = eq(xmax_pad, x) * g_max_pad
return g_x, axis_grad
def __str__(self):
return self.__class__.__name__
_max_and_argmax = MaxAndArgmax()
......@@ -2329,6 +2314,9 @@ def nonzero_values(a):
class Tri(gof.Op):
__props__ = ("dtype",)
def __init__(self, dtype=None):
if dtype is None:
dtype = config.floatX
......@@ -2355,12 +2343,6 @@ class Tri(gof.Op):
def grad(self, inp, grads):
return [grad_undefined(self, i, inp[i]) for i in xrange(3)]
def __eq__(self, other):
return type(self) == type(other) and self.dtype == other.dtype
def __hash__(self):
return hash(self.dtype) ^ hash(type(self))
def tri(N, M=None, k=0, dtype=None):
"""
......@@ -2437,6 +2419,9 @@ def triu(m, k=0):
class Eye(gof.Op):
__props__ = ("dtype", )
def __init__(self, dtype=None):
if dtype is None:
dtype = config.floatX
......@@ -2989,6 +2974,7 @@ class Default(gof.Op):
have exactly the same type.
"""
view_map = {0: [0]}
__props__ = ()
def make_node(self, x, default):
x, default = as_tensor_variable(x), as_tensor_variable(default)
......@@ -3282,20 +3268,14 @@ class Split(Op):
"""A Split instance will have this many outputs, and require that
the splits argument to `perform` have exactly this many elements.
"""
__props__ = ("len_splits",)
def __init__(self, len_splits):
self.len_splits = int(len_splits)
def __eq__(self, other):
return (type(self) == type(other) and
self.len_splits == other.len_splits)
def __str__(self):
return self.__class__.__name__ + "{%s}" % self.len_splits
def __hash__(self):
return hash(Split) ^ self.len_splits
def make_node(self, x, axis, splits):
"""WRITEME"""
x = as_tensor_variable(x)
......@@ -3509,15 +3489,7 @@ class Join(Op):
join(0, x, u) # WRONG: joined tensors must have the same rank
"""
check_input = False
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def __str__(self):
return '%s' % (self.__class__.__name__)
__props__ = ()
def make_node(self, *axis_and_tensors):
"""
......@@ -3971,19 +3943,13 @@ class Reshape(Op):
_f16_ok = True
check_input = False
__props__ = ("ndim",)
# name does not participate because it doesn't affect computations
def __init__(self, ndim, name=None):
self.ndim = ndim
self.name = name
def __eq__(self, other):
# .name does not participate because it doesn't affect computations
return (type(other) is type(self)) and (other.ndim == self.ndim)
def __hash__(self):
# .name does not participate because it doesn't affect computations
return hash(type(self)) ^ hash(self.ndim)
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.ndim)
......@@ -4172,16 +4138,11 @@ class Flatten(Op):
view_map = {0: [0]}
check_input = False
__props__ = ("outdim",)
def __init__(self, outdim=1):
self.outdim = int(outdim)
def __eq__(self, other):
return type(self) == type(other) and self.outdim == other.outdim
def __hash__(self):
return hashtype(self) ^ hash(self.outdim)
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.outdim)
......@@ -4356,15 +4317,11 @@ class Tile(Op):
:see: `numpy.tile
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_
"""
__props__ = ("ndim",)
def __init__(self, ndim):
self.ndim = ndim
def __eq__(self, other):
return (type(other) is Tile) and (other.ndim == self.ndim)
def __hash__(self):
return hash(Tile) ^ hash(self.ndim)
def __str__(self):
return self.__class__.__name__ + "{ndim=%d}" % self.ndim
......@@ -4465,19 +4422,11 @@ class ARange(Op):
Parameters and behaviour are the same as numpy.arange().
"""
__props__ = ("dtype",)
def __init__(self, dtype):
self.dtype = dtype
def __eq__(self, other):
return type(self) == type(other) and self.dtype == other.dtype
def __hash__(self):
return hash(self.dtype)
def __str__(self):
return self.__class__.__name__
def make_node(self, start, stop, step):
start, stop, step = map(as_tensor_variable, (start, stop, step))
assert start.ndim == 0
......@@ -4633,6 +4582,8 @@ class _nd_grid(object):
>>> b[1].eval()
array([[0, 1, 2, 3]], dtype=int8)
"""
__props__ = ("sparse",)
def __init__(self, sparse=False):
self.sparse = sparse
......@@ -4693,6 +4644,7 @@ class PermuteRowElements(Op):
If the "inverse" argument is True, the Op will perform the inverse
permutation instead.
"""
__props__ = ()
def make_node(self, x, y, inverse):
x = as_tensor_variable(x)
......@@ -4900,12 +4852,7 @@ class Dot(Op):
tensor.blas)
"""
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
__props__ = ()
# the rationale for Dot22 is related to getting GEMM Ops into the
# graph. See Dot22 in tensor.blas for details.
......@@ -5076,9 +5023,6 @@ class Dot(Op):
return [xshp[:-1] + yshp[-1:]]
raise NotImplementedError()
def __str__(self):
return "dot"
_dot = Dot()
pprint.assign(_dot, printing.OperatorPrinter(printing.special['middle_dot'],
-1, 'left'))
......@@ -5369,6 +5313,7 @@ class Diagonal(Op):
:return: A vector representing the diagonal elements.
"""
__props__ = ("offset", "axis1", "axis2")
def __init__(self, offset=0, axis1=0, axis2=1):
if numpy_diagonal_return_view:
......@@ -5377,16 +5322,6 @@ class Diagonal(Op):
self.axis1 = axis1
self.axis2 = axis2
def __eq__(self, other):
return (type(self) == type(other) and
self.offset == other.offset and
self.axis1 == other.axis1 and
self.axis2 == other.axis2)
def __hash__(self):
return (hash(type(self)) ^ hash(self.offset) ^
hash(self.axis1) ^ hash(self.axis2))
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim >= 2
......@@ -5420,9 +5355,6 @@ class Diagonal(Op):
out_shape.append(diag_size)
return [tuple(out_shape)]
def __str__(self):
return self.__class__.__name__
def diagonal(a, offset=0, axis1=0, axis2=1):
if (offset, axis1, axis2) == (0, 0, 1):
......@@ -5432,11 +5364,7 @@ def diagonal(a, offset=0, axis1=0, axis2=1):
class Diag(Op):
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
__props__ = ()
def make_node(self, diag):
diag = as_tensor_variable(diag)
......@@ -5456,9 +5384,6 @@ class Diag(Op):
def infer_shape(self, nodes, shapes):
return [(shapes[0][0],) * 2]
def __str__(self):
return self.__class__.__name__
def diag(v, k=0):
if v.ndim == 1:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论