提交 7ed9fb90 authored 作者: Pascal Lamblin's avatar Pascal Lamblin 提交者: GitHub

Merge pull request #6111 from jhelie/fix-alerts-found-with-lgtm

Fix alerts found with lgtm
......@@ -56,8 +56,8 @@ class Optimizer(object):
# in subclasses from other bases.
return id(self) == id(other)
def __neq__(self, other):
# added to override the __neq__ implementation that may be inherited
def __ne__(self, other):
# added to override the __ne__ implementation that may be inherited
# in subclasses from other bases.
return id(self) != id(other)
......@@ -492,8 +492,7 @@ class _metadict:
except Exception:
if item is item2:
return value
else:
return default
return default
def clear(self):
self.d = {}
......
......@@ -65,7 +65,7 @@ class BoundVariable(Variable):
"""
def __init__(self, name, value):
self.name = name
super(BoundVariable, self).__init__(name=name)
self.value = value
......@@ -77,7 +77,7 @@ class OrVariable(Variable):
"""
def __init__(self, name, options):
self.name = name
super(OrVariable, self).__init__(name=name)
self.options = options
......@@ -89,7 +89,7 @@ class NotVariable(Variable):
"""
def __init__(self, name, not_options):
self.name = name
super(NotVariable, self).__init__(name=name)
self.not_options = not_options
......
......@@ -1315,7 +1315,6 @@ class GpuJoin(HideC, Join):
fail = sub['fail']
out = out_[0]
copy_inputs_to_list = '\n'.join(copy_to_list)
restype = restype
ctx = sub['params']
code = """
......
......@@ -771,8 +771,7 @@ class GpuAdvancedIncSubtensor1(Op):
else:
opname = 'increment'
raise TypeError(
'cannot %s x subtensor with ndim=%s'
' by y with ndim=%s to x subtensor with ndim=%s ' % (
'cannot %s x subtensor with ndim=%s by y with ndim=%s ' % (
opname, x_.type.ndim, y_.type.ndim))
return gof.Apply(self, [x_, y_, ilist_], [x_.type()])
......@@ -995,8 +994,7 @@ class GpuAdvancedIncSubtensor1_dev20(GpuKernelBase, HideC,
else:
opname = 'increment'
raise TypeError(
'cannot %s x subtensor with ndim=%s'
' by y with ndim=%s to x subtensor with ndim=%s ' % (
'cannot %s x subtensor with ndim=%s by y with ndim=%s ' % (
opname, x_.type.ndim, y_.type.ndim))
return gof.Apply(self, [x_, y_, ilist_], [x_.type()])
......
......@@ -56,7 +56,7 @@ def safe_new(x, tag='', dtype=None):
if dtype and x.dtype != dtype:
casted_x = x.astype(dtype)
nwx = x.__class__(casted_x.type, x.data, x.name)
nwx.tag = copy(x.tag)
nwx.tag = copy.copy(x.tag)
return nwx
else:
return x.clone()
......
......@@ -333,6 +333,9 @@ class SparseConstantSignature(tuple):
(b.shape == y.shape) and
(abs(b - y).sum() < 1e-6 * b.nnz))
def __ne__(self, other):
return not self == other
def __hash__(self):
(a, b) = self
return hash(type(self)) ^ hash(a) ^ hash(type(b))
......@@ -1330,7 +1333,7 @@ class GetItemScalar(gof.op.Op):
elif ind.ndim == 0:
input_op += [ind]
else:
raise NotImplemented()
raise NotImplementedError
return gof.Apply(self, input_op, [tensor.scalar(dtype=x.dtype)])
......
......@@ -1350,7 +1350,7 @@ def local_mul_s_d(node):
CSx = sparse.CSR
mul_s_d_csx = mul_s_d_csr
else:
raise NotImplemented()
raise NotImplementedError
if x.dtype != y.dtype:
# mul_s_d_csx don't support that case
return
......
......@@ -194,7 +194,7 @@ class Multinomial(gof.op.Op):
assert _is_sparse(p)
if p.format != 'csr':
raise NotImplemented()
raise NotImplementedError
out[0] = p.copy()
......
......@@ -8386,6 +8386,19 @@ class T_Choose(utt.InferShapeTester):
n_c = np.choose(A, B, mode=m)
assert np.allclose(t_c, n_c)
def test_method(self):
a = tensor.vector(dtype='int32')
b = tensor.matrix(dtype='float32')
A = np.random.randint(0, 4, 4).astype('int32')
B = np.asarray(np.random.rand(4, 4), dtype='float32')
for m in self.modes:
f = function([a, b], a.choose(b, mode=m))
t_c = f(A, B)
n_c = A.choose(B, mode=m)
assert np.allclose(t_c, n_c)
def test_broadcasted(self):
a = tensor.scalar(dtype='int32')
b = tensor.matrix(dtype='float32')
......
......@@ -762,14 +762,13 @@ class _tensor_py_operators(object):
"""Fill inputted tensor with the assigned value."""
return theano.tensor.basic.fill(self, value)
def choose(self, a, choices, out=None, mode='raise'):
def choose(self, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose
from.
"""
return theano.tensor.basic.choose(self, a, choices, out=None,
mode='raise')
return theano.tensor.basic.choose(self, choices, out=None, mode='raise')
def squeeze(self):
"""
......@@ -864,6 +863,9 @@ class TensorConstantSignature(tuple):
# here).
return (self.sum == other.sum) and np.all(d0 == d1)
def __ne__(self, other):
return not self == other
def __hash__(self):
t, d = self
return hashtype(self) ^ hash(t) ^ hash(d.shape) ^ hash(self.sum)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论