提交 008579b5 authored 作者: Brandon T. Willard's avatar Brandon T. Willard

Apply pyupgrade to theano.tensor

上级 4e04febf
......@@ -8,7 +8,6 @@ from collections.abc import Sequence
from functools import partial
import numpy as np
from six import integer_types
import theano
import theano.scalar.sharedvar
......@@ -56,8 +55,6 @@ uint_dtypes = list(map(str, scal.uint_types))
class ShapeError(Exception):
"""Raised when the shape cannot be computed."""
pass
def check_equal_numpy(x, y):
"""
......@@ -434,7 +431,7 @@ def get_scalar_constant_value(
# to depend on passing it None)
raise NotScalarConstantError()
if isinstance(v, (np.integer, integer_types, float)):
if isinstance(v, (np.integer, int, float)):
return np.asarray(v)
if isinstance(v, np.ndarray):
......@@ -678,7 +675,7 @@ def tensor(*args, **kwargs):
def _multi(*fns):
def f2(f, *names):
if names and isinstance(names[0], integer_types):
if names and isinstance(names[0], int):
if names == 1:
return f()
else:
......@@ -1054,7 +1051,7 @@ def _scal_elemwise_with_nfunc(nfunc, nin, nout):
else:
msg = "no_inplace"
n = "Elemwise{%s,%s}" % (symbolname, msg)
n = "Elemwise{{{},{}}}".format(symbolname, msg)
if inplace:
scalar_op = getattr(scal, symbolname[: -len("_inplace")])
......@@ -1116,7 +1113,7 @@ def check_and_normalize_axes(x, axis):
x = as_tensor_variable(x)
if axis is None:
axis = []
elif isinstance(axis, (integer_types, np.integer)) or (
elif isinstance(axis, (int, np.integer)) or (
isinstance(axis, np.ndarray) and axis.ndim == 0
):
axis = [int(axis)]
......@@ -1129,7 +1126,7 @@ def check_and_normalize_axes(x, axis):
raise TypeError("Computation needs a constant axis. Got %s" % axis)
else:
assert axis.dtype in integer_dtypes
if isinstance(axis.data, (integer_types, np.integer)) or (
if isinstance(axis.data, (int, np.integer)) or (
isinstance(axis.data, np.ndarray) and axis.data.ndim == 0
):
axis = [int(axis.data)]
......@@ -1331,10 +1328,8 @@ def cast(x, dtype):
return _x
if _x.type.dtype.startswith("complex") and not dtype.startswith("complex"):
raise TypeError(
(
"Casting from complex to real is ambiguous: consider real(), "
"imag(), angle() or abs()"
)
"Casting from complex to real is ambiguous: consider real(), "
"imag(), angle() or abs()"
)
return _cast_mapping[dtype](x)
......@@ -1490,14 +1485,12 @@ class MaxAndArgmax(Op):
if eval_points[0] is None:
return [None, None]
if len(self.axis) != 1:
raise ValueError(("R_op supported for arg_max only for " "one axis!"))
raise ValueError("R_op supported for arg_max only for " "one axis!")
if self.axis[0] > 1:
raise ValueError(
("R_op supported for arg_max only when " " axis is 0 or 1")
)
raise ValueError("R_op supported for arg_max only when " " axis is 0 or 1")
if inputs[0].ndim != 2:
raise ValueError(
("R_op supported for arg_max only when " " input is a matrix")
"R_op supported for arg_max only when " " input is a matrix"
)
max_vals, max_pos = self.make_node(*inputs).outputs
if self.axis[0] == 0:
......@@ -1710,7 +1703,7 @@ def makeKeepDims(x, y, axis):
if axis is None:
axis = list(range(x.type.ndim))
elif isinstance(axis, (integer_types, np.integer)):
elif isinstance(axis, (int, np.integer)):
axis = [axis]
elif isinstance(axis, np.ndarray) and axis.ndim == 0:
axis = [int(axis)]
......@@ -1718,7 +1711,7 @@ def makeKeepDims(x, y, axis):
axis = [int(a) for a in axis]
newaxis = []
for a in axis:
if not isinstance(a, integer_types):
if not isinstance(a, int):
raise ValueError("keepdims option can be used only with constant axis")
if a < 0:
a += x.type.ndim
......@@ -3211,7 +3204,7 @@ class Alloc(gof.Op):
If you always want an Alloc node, call make_node.
"""
ret = super(Alloc, self).__call__(val, *shapes, **kwargs)
ret = super().__call__(val, *shapes, **kwargs)
try:
# It makes optimization difficult when useless allocs are thrown
# into the graph at every stage of optimization. This little logic
......@@ -3294,7 +3287,7 @@ def transfer(var, target):
res = trans(var, target)
if res is not None:
return res
raise ValueError("Can't transfer to target %s" % (target,))
raise ValueError("Can't transfer to target {}".format(target))
transfer._others = []
......@@ -3503,7 +3496,7 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False, acc_dtype=None)
if axis is None:
axis = list(range(input.ndim))
elif isinstance(axis, (integer_types, np.integer)):
elif isinstance(axis, (int, np.integer)):
axis = [axis]
elif isinstance(axis, np.ndarray) and axis.ndim == 0:
axis = [int(axis)]
......@@ -3564,7 +3557,7 @@ def var(input, axis=None, ddof=0, keepdims=False, corrected=False):
input_ndim = input.type.ndim
if axis is None:
axis = list(range(input_ndim))
elif isinstance(axis, (integer_types, np.integer)):
elif isinstance(axis, (int, np.integer)):
axis = [axis]
elif isinstance(axis, np.ndarray) and axis.ndim == 0:
axis = [int(axis)]
......@@ -4016,7 +4009,9 @@ class Split(Op):
if np.sum(splits) != len_along_axis:
raise ValueError(
"The splits sum to %s, expected %s" % (np.sum(splits), len_along_axis)
"The splits sum to {}, expected {}".format(
np.sum(splits), len_along_axis
)
)
if python_any([nb < 0 for nb in splits]):
raise ValueError(
......@@ -4351,9 +4346,11 @@ class Join(Op):
if self.view == -1:
return self.__class__.__name__
else:
return "%s{%s}" % (
return "{}{{{}}}".format(
self.__class__.__name__,
", ".join("%s=%r" % (p, getattr(self, p)) for p in self.__props__),
", ".join(
"{}={!r}".format(p, getattr(self, p)) for p in self.__props__
),
)
def __setstate__(self, d):
......@@ -4412,7 +4409,7 @@ class Join(Op):
bcastable = [False] * len(as_tensor_variable_args[0].type.broadcastable)
ndim = len(bcastable)
# Axis can also be a constant
if not isinstance(axis, integer_types):
if not isinstance(axis, int):
try:
# Note : `get_scalar_constant_value` returns a ndarray not
# an int
......@@ -4420,7 +4417,7 @@ class Join(Op):
except NotScalarConstantError:
pass
if isinstance(axis, integer_types):
if isinstance(axis, int):
# Basically, broadcastable -> length 1, but the
# converse does not hold. So we permit e.g. T/F/T
# joins, and if they fail at runtime they fail, but if
......@@ -4886,7 +4883,7 @@ def stack(*tensors, **kwargs):
# See ticket #660
if np.all(
[ # in case there is direct int in tensors.
isinstance(t, (np.number, float, integer_types, python_complex))
isinstance(t, (np.number, float, int, python_complex))
or (
isinstance(t, Variable)
and isinstance(t.type, TensorType)
......@@ -5071,7 +5068,7 @@ class Reshape(Op):
assert name is None, "name attribute for Reshape has been deprecated"
def __str__(self):
return "%s{%s}" % (self.__class__.__name__, self.ndim)
return "{}{{{}}}".format(self.__class__.__name__, self.ndim)
def make_node(self, x, shp):
x = as_tensor_variable(x)
......@@ -5122,7 +5119,7 @@ class Reshape(Op):
out[0] = np.reshape(x, shp)
except Exception:
raise ValueError(
"Cannot reshape input of shape %s to shape %s" % (x.shape, shp)
"Cannot reshape input of shape {} to shape {}".format(x.shape, shp)
)
def connection_pattern(self, node):
......@@ -5301,7 +5298,7 @@ class Flatten(Op):
self.outdim = int(outdim)
def __str__(self):
return "%s{%s}" % (self.__class__.__name__, self.outdim)
return "{}{{{}}}".format(self.__class__.__name__, self.outdim)
def make_node(self, x):
t_x = as_tensor_variable(x)
......@@ -5667,7 +5664,7 @@ def tile(x, reps, ndim=None):
raise ValueError("len(reps) should be equal or less than ndim")
if not np.all(
[
isinstance(r, integer_types)
isinstance(r, int)
or (
isinstance(r, TensorVariable)
and r.dtype in theano.tensor.discrete_dtypes
......@@ -5861,7 +5858,7 @@ def arange(start, stop=None, step=1, dtype=None):
return _arange[dtype](start, stop, step)
class _nd_grid(object):
class _nd_grid:
"""Create a dense n-dimensional 'meshgrid' with equally spaced points.
Used to create the instance ``mgrid`` and ``ogrid`` which act similarly
......@@ -6046,7 +6043,7 @@ class PermuteRowElements(Op):
for i in range(ys0):
self._rec_perform(node, x[0], y[i], inverse, out[i], curdim + 1)
else:
raise ValueError("Dimension mismatch: %s, %s" % (xs0, ys0))
raise ValueError("Dimension mismatch: {}, {}".format(xs0, ys0))
def perform(self, node, inp, out):
x, y, inverse = inp
......@@ -6065,7 +6062,7 @@ class PermuteRowElements(Op):
elif ydim == 1:
outdim = xdim
else:
raise ValueError("Dimension mismatch: %s, %s" % (xdim, ydim))
raise ValueError("Dimension mismatch: {}, {}".format(xdim, ydim))
out_s.append(outdim)
if outs[0] is None or outs[0].shape != out_s:
......
......@@ -477,7 +477,7 @@ def _ldflags(ldflags_str, libs, flags, libs_dir, include_dir):
assert t0 == "-"
except Exception:
raise ValueError(
'invalid token "%s" in ldflags_str: "%s"' % (t, ldflags_str)
'invalid token "{}" in ldflags_str: "{}"'.format(t, ldflags_str)
)
if libs_dir and t1 == "L":
rval.append(t[2:])
......@@ -800,7 +800,7 @@ class GemmRelated(Op):
def build_gemm_call(self):
if hasattr(self, "inplace"):
setup_z_Nz_Sz = "if(%%(params)s->inplace){%s}else{%s}" % (
setup_z_Nz_Sz = "if(%(params)s->inplace){{{}}}else{{{}}}".format(
self.setup_z_Nz_Sz_inplace,
self.setup_z_Nz_Sz_outplace,
)
......@@ -880,7 +880,7 @@ class Gemm(GemmRelated):
inplace_str = "inplace"
else:
inplace_str = "no_inplace"
return "%s{%s}" % (self.__class__.__name__, inplace_str)
return "{}{{{}}}".format(self.__class__.__name__, inplace_str)
def __setstate__(self, dct):
self.__dict__.update(dct)
......@@ -1671,7 +1671,7 @@ class Dot22(GemmRelated):
if node.inputs[0].type.dtype.startswith("complex"):
raise MethodNotDefined("%s.c_code" % self.__class__.__name__)
if len(self.c_libraries()) <= 0:
return super(Dot22, self).c_code(node, name, (_x, _y), (_zout,), sub)
return super().c_code(node, name, (_x, _y), (_zout,), sub)
full_code = self.build_gemm_call() % dict(locals(), **sub)
return full_code
......@@ -1941,7 +1941,7 @@ class Dot22Scalar(GemmRelated):
if node.inputs[0].type.dtype.startswith("complex"):
raise MethodNotDefined("%s.c_code" % self.__class__.__name__)
if len(self.c_libraries()) <= 0:
return super(Dot22Scalar, self).c_code(node, name, (_x, _y), (_zout,), sub)
return super().c_code(node, name, (_x, _y), (_zout,), sub)
full_code = self.build_gemm_call() % dict(locals(), **sub)
return full_code
......
......@@ -23,7 +23,7 @@ from theano.tensor.blas import (
from theano.tensor.opt import in2out
class BaseBLAS(object):
class BaseBLAS:
def c_libraries(self):
return ldflags()
......@@ -617,7 +617,7 @@ class CGemv(BaseBLAS, Gemv):
)
def __init__(self, inplace):
super(CGemv, self).__init__(inplace)
super().__init__(inplace)
def c_code(self, node, name, inp, out, sub):
y, alpha, A, x, beta = inp
......
......@@ -770,7 +770,7 @@ def blas_header_text():
"precision": "d",
}
if not common_code or not template_code:
raise IOError(
raise OSError(
"Unable to load NumPy implementation of BLAS functions from C source files."
)
blas_code += common_code
......
from copy import copy
import numpy as np
from six import integer_types
import theano
from theano import change_flags, gof, scalar
......@@ -175,7 +174,7 @@ class DimShuffle(COp):
# isinstance(x, integer_types) returning False for
# numpy integers. See
# <http://projects.scipy.org/numpy/ticket/2235>.
if not isinstance(j, (integer_types, np.integer)):
if not isinstance(j, (int, np.integer)):
raise TypeError(
"DimShuffle indices must be python ints. "
"Got: '%s' of type '%s'.",
......@@ -232,20 +231,16 @@ class DimShuffle(COp):
if not ib == self.input_broadcastable:
if len(ib) != len(self.input_broadcastable):
raise TypeError(
(
"The number of dimensions of the "
"input is incorrect for this op. Expected %s, got %s."
% (self.input_broadcastable, ib)
)
"The number of dimensions of the "
"input is incorrect for this op. Expected %s, got %s."
% (self.input_broadcastable, ib)
)
for expected, b in zip(self.input_broadcastable, ib):
if expected is True and b is False:
raise TypeError(
(
"The broadcastable pattern of the "
"input is incorrect for this op. Expected %s, got %s."
% (self.input_broadcastable, ib)
)
"The broadcastable pattern of the "
"input is incorrect for this op. Expected %s, got %s."
% (self.input_broadcastable, ib)
)
# else, expected == b or expected is False and b is True
# Both case are good.
......@@ -335,7 +330,7 @@ class DimShufflePrinter:
return pstate.pprinter.process(r)
if list(new_order) == list(reversed(range(r.type.ndim))):
return "%s.T" % pstate.pprinter.process(r)
return "DimShuffle{%s}(%s)" % (
return "DimShuffle{{{}}}({})".format(
", ".join(map(str, new_order)),
pstate.pprinter.process(r),
)
......@@ -417,13 +412,13 @@ second dimension
self.name = name
self.scalar_op = scalar_op
self.inplace_pattern = inplace_pattern
self.destroy_map = dict((o, [i]) for o, i in self.inplace_pattern.items())
self.destroy_map = {o: [i] for o, i in self.inplace_pattern.items()}
if nfunc_spec is None:
nfunc_spec = getattr(scalar_op, "nfunc_spec", None)
self.nfunc_spec = nfunc_spec
self.__setstate__(self.__dict__)
super(Elemwise, self).__init__(openmp=openmp)
super().__init__(openmp=openmp)
def __getstate__(self):
d = copy(self.__dict__)
......@@ -433,7 +428,7 @@ second dimension
return d
def __setstate__(self, d):
super(Elemwise, self).__setstate__(d)
super().__setstate__(d)
self.ufunc = None
self.nfunc = None
self.inplace_pattern = frozendict(self.inplace_pattern)
......@@ -528,7 +523,7 @@ second dimension
if self.inplace_pattern:
items = list(self.inplace_pattern.items())
items.sort()
return "Elemwise{%s}%s" % (self.scalar_op, str(items))
return "Elemwise{{{}}}{}".format(self.scalar_op, str(items))
else:
return "Elemwise{%s}" % (self.scalar_op)
else:
......@@ -766,7 +761,7 @@ second dimension
# ValueError, if the number of inputs to a ufunc is 32 or more.
# In that case, the C version should be used, or Elemwise fusion
# should be disabled.
super(Elemwise, self).perform(node, inputs, output_storage)
super().perform(node, inputs, output_storage)
for dims in zip(
*[
......@@ -933,12 +928,9 @@ second dimension
# The destroy map is a map of output indices to input indices
# that overwrite them. We just convert them to the actual
# Variables.
dmap = dict(
[
(node.outputs[o], [node.inputs[i]])
for o, i in self.inplace_pattern.items()
]
)
dmap = {
node.outputs[o]: [node.inputs[i]] for o, i in self.inplace_pattern.items()
}
# dtypes of the inputs
idtypes = [input.type.dtype_specs()[1] for input in inputs]
......@@ -1091,7 +1083,7 @@ second dimension
# No loops
task_decl = "".join(
[
"%s& %s_i = *%s_iter;\n" % (dtype, name, name)
"{}& {}_i = *{}_iter;\n".format(dtype, name, name)
for name, dtype in zip(
inames + list(real_onames), idtypes + list(real_odtypes)
)
......@@ -1252,7 +1244,7 @@ second dimension
getattr(self.scalar_op, "inner_float16", False)
):
# Disable C code for float16 vars
super(Elemwise, self).c_code(node, nodename, inames, onames, sub)
super().c_code(node, nodename, inames, onames, sub)
code = "\n".join(self._c_all(node, nodename, inames, onames, sub))
return code
......@@ -1353,7 +1345,7 @@ class CAReduce(Op):
def __init__(self, scalar_op, axis=None):
if scalar_op.nin not in [-1, 2] or scalar_op.nout != 1:
raise NotImplementedError(
("CAReduce only supports binary functions with a single " "output.")
"CAReduce only supports binary functions with a single " "output."
)
self.scalar_op = scalar_op
......@@ -1362,12 +1354,12 @@ class CAReduce(Op):
# There is a bug in numpy that results in isinstance(x,
# integer_types) returning False for numpy integers. See
# <http://projects.scipy.org/numpy/ticket/2235>.
elif isinstance(axis, (integer_types, np.integer)):
elif isinstance(axis, (int, np.integer)):
self.axis = (axis,)
elif isinstance(axis, np.ndarray) and axis.ndim == 0:
self.axis = (int(axis),)
else:
self.axis = list(set(int(a) for a in axis))
self.axis = list({int(a) for a in axis})
self.axis.sort()
self.axis = tuple(self.axis)
......@@ -1408,10 +1400,8 @@ class CAReduce(Op):
axis < 0 and abs(axis) > input.type.ndim
):
raise ValueError(
(
"Not enough dimensions on %s to reduce on axis %s"
% (input, axis)
)
"Not enough dimensions on %s to reduce on axis %s"
% (input, axis)
)
input = as_tensor_variable(input)
axis = self.axis
......@@ -1452,7 +1442,7 @@ class CAReduce(Op):
def __str__(self):
if self.axis is not None:
return "Reduce{%s}{%s}" % (
return "Reduce{{{}}}{{{}}}".format(
self.scalar_op,
", ".join(str(x) for x in self.axis),
)
......@@ -1486,11 +1476,9 @@ class CAReduce(Op):
variable.fill(self.scalar_op.identity)
else:
raise ValueError(
(
"Input (%s) has zero-size on axis %s, but "
"self.scalar_op (%s) has no attribute 'identity'"
% (variable, dimension, self.scalar_op)
)
"Input (%s) has zero-size on axis %s, but "
"self.scalar_op (%s) has no attribute 'identity'"
% (variable, dimension, self.scalar_op)
)
else:
variable = self.ufunc.reduce(variable, dimension, dtype=acc_dtype)
......@@ -1775,7 +1763,7 @@ class All(CAReduce):
input = as_tensor_variable(input)
if input.dtype != "bool":
input = theano.tensor.neq(input, 0)
ret = super(All, self).make_node(input)
ret = super().make_node(input)
return ret
def grad(self, inp, grads):
......@@ -1808,7 +1796,7 @@ class Any(CAReduce):
input = as_tensor_variable(input)
if input.dtype != "bool":
input = theano.tensor.neq(input, 0)
ret = super(Any, self).make_node(input)
ret = super().make_node(input)
return ret
def grad(self, inp, grads):
......@@ -1877,7 +1865,7 @@ class CAReduceDtype(CAReduce):
self.acc_dtype = acc_dtype
def __setstate__(self, d):
super(CAReduceDtype, self).__setstate__(d)
super().__setstate__(d)
if not hasattr(self, "dtype"):
# This is needed as old pickled will crash otherwise.
# We need to keep the old dtype behavior as the op
......@@ -1985,7 +1973,7 @@ class CAReduceDtype(CAReduce):
if self.axis is not None:
axis = ", ".join(str(x) for x in self.axis)
axis = "axis=[%s], " % axis
return "%s{%sacc_dtype=%s}" % (name, axis, str(self.acc_dtype))
return "{}{{{}acc_dtype={}}}".format(name, axis, str(self.acc_dtype))
class Sum(CAReduceDtype):
......@@ -2038,7 +2026,7 @@ class Sum(CAReduceDtype):
if self.axis is not None:
axis = ", ".join(str(x) for x in self.axis)
axis = "axis=[%s], " % axis
return "%s{%sacc_dtype=%s}" % (name, axis, str(self.acc_dtype))
return "{}{{{}acc_dtype={}}}".format(name, axis, str(self.acc_dtype))
def L_op(self, inp, out, grads):
(x,) = inp
......@@ -2093,7 +2081,7 @@ class Prod(CAReduceDtype):
self.no_zeros_in_input = no_zeros_in_input
def __setstate__(self, dct):
super(Prod, self).__setstate__(dct)
super().__setstate__(dct)
# Add default value to be able to reload old pickled objects.
if "no_zeros_in_input" not in dct:
self.no_zeros_in_input = False
......
......@@ -77,7 +77,9 @@ def make_checks(loop_orders, dtypes, sub):
if index != "x":
# Initialize the variables associated to the jth loop
# jump = stride - adjust
jump = "(%s) - (%s)" % ("%(var)s_stride%(index)s" % locals(), adjust)
jump = "({}) - ({})".format(
"%(var)s_stride%(index)s" % locals(), adjust
)
init += (
"""
%(var)s_n%(index)s = PyArray_DIMS(%(var)s)[%(index)s];
......
......@@ -274,7 +274,7 @@ class CumOp(Op):
def __init__(self, axis=None, mode="add"):
if mode not in ("add", "mul"):
raise ValueError('%s: Unknown mode "%s"' % (type(self).__name__, mode))
raise ValueError('{}: Unknown mode "{}"'.format(type(self).__name__, mode))
self.axis = axis
self.mode = mode
......@@ -287,7 +287,7 @@ class CumOp(Op):
if self.axis is None:
out_type = theano.tensor.vector(dtype=x.dtype) # Flatten
elif self.axis >= x.ndim or self.axis < -x.ndim:
raise ValueError("axis(={0}) out of bounds".format(self.axis))
raise ValueError("axis(={}) out of bounds".format(self.axis))
return theano.Apply(self, [x], [out_type])
......@@ -327,7 +327,9 @@ class CumOp(Op):
return [cumsum((fx * gi)[reverse_slicing], self.axis)[reverse_slicing] / x]
else:
raise NotImplementedError(
'%s: unknown gradient for mode "%s"' % (type(self).__name__, self.mode)
'{}: unknown gradient for mode "{}"'.format(
type(self).__name__, self.mode
)
)
def infer_shape(self, node, shapes):
......@@ -391,7 +393,7 @@ class CumOp(Op):
return (8,)
def __str__(self):
return "%s{%s, %s}" % (self.__class__.__name__, self.axis, self.mode)
return "{}{{{}, {}}}".format(self.__class__.__name__, self.axis, self.mode)
def cumsum(x, axis=None):
......
......@@ -58,7 +58,7 @@ class LoadFromDisk(Op):
out[0][0] = result
def __str__(self):
return "Load{dtype: %s, broadcastable: %s, mmep: %s}" % (
return "Load{{dtype: {}, broadcastable: {}, mmep: {}}}".format(
self.dtype,
self.broadcastable,
self.mmap_mode,
......
......@@ -6,8 +6,6 @@ Abstract conv interface
import logging
import sys
from six import integer_types, reraise
try:
from math import gcd
......@@ -635,12 +633,12 @@ def border_mode_to_pad(mode, convdim, kshp):
)
border = ()
for m in mode:
if isinstance(m, integer_types) and m >= 0:
if isinstance(m, int) and m >= 0:
border += ((m, m),)
elif (
isinstance(m, tuple)
and min(m) >= 0
and all(isinstance(b, integer_types) for b in m)
and all(isinstance(b, int) for b in m)
):
if len(m) != 2:
raise NotImplementedError(
......@@ -1205,13 +1203,13 @@ def conv2d_grad_wrt_inputs(
# checking the type of input_shape
for dim in [0, 1]:
if not isinstance(
input_shape[dim], (theano.tensor.TensorConstant, integer_types, type(None))
input_shape[dim], (theano.tensor.TensorConstant, int, type(None))
):
raise ValueError("input_shape[%d] must be a constant or None." % dim)
for dim in [2, 3]:
if not isinstance(
input_shape[dim],
(theano.tensor.TensorVariable, theano.tensor.TensorConstant, integer_types),
(theano.tensor.TensorVariable, theano.tensor.TensorConstant, int),
):
raise ValueError(
"input_shape[%d] must be a symbolic variable,"
......@@ -1234,7 +1232,7 @@ def conv2d_grad_wrt_inputs(
for dim in range(expected_dim):
if not isinstance(
filter_shape[dim],
(theano.tensor.TensorConstant, integer_types, type(None)),
(theano.tensor.TensorConstant, int, type(None)),
):
raise ValueError("filter_shape[%d] must be a constant or None" % dim)
......@@ -1371,12 +1369,12 @@ def conv3d_grad_wrt_inputs(
# checking the type of input_shape
for dim in [0, 1]:
assert isinstance(
input_shape[dim], (theano.tensor.TensorConstant, integer_types, type(None))
input_shape[dim], (theano.tensor.TensorConstant, int, type(None))
)
for dim in [2, 3, 4]:
assert isinstance(
input_shape[dim],
(theano.tensor.TensorVariable, theano.tensor.TensorConstant, integer_types),
(theano.tensor.TensorVariable, theano.tensor.TensorConstant, int),
)
# checking the type of filter_shape
......@@ -1384,7 +1382,7 @@ def conv3d_grad_wrt_inputs(
for dim in [0, 1, 2, 3, 4]:
assert isinstance(
filter_shape[dim],
(theano.tensor.TensorConstant, integer_types, type(None)),
(theano.tensor.TensorConstant, int, type(None)),
)
# setting the last three dimensions of input_shape to None, if
......@@ -1526,18 +1524,18 @@ def conv2d_grad_wrt_weights(
# checking the type of filter_shape
for dim in [0, 1]:
assert isinstance(
filter_shape[dim], (theano.tensor.TensorConstant, integer_types, type(None))
filter_shape[dim], (theano.tensor.TensorConstant, int, type(None))
)
if unshared:
for dim in [2, 3]:
assert isinstance(
filter_shape[dim],
(theano.tensor.TensorConstant, integer_types, type(None)),
(theano.tensor.TensorConstant, int, type(None)),
)
for dim in [-2, -1]:
assert isinstance(
filter_shape[dim],
(theano.tensor.TensorVariable, theano.tensor.TensorConstant, integer_types),
(theano.tensor.TensorVariable, theano.tensor.TensorConstant, int),
)
# checking the type of input_shape
......@@ -1545,7 +1543,7 @@ def conv2d_grad_wrt_weights(
for dim in [0, 1, 2, 3]:
assert isinstance(
input_shape[dim],
(theano.tensor.TensorConstant, integer_types, type(None)),
(theano.tensor.TensorConstant, int, type(None)),
)
# setting the last two dimensions of filter_shape to None, if
......@@ -1672,12 +1670,12 @@ def conv3d_grad_wrt_weights(
# checking the type of filter_shape
for dim in [0, 1]:
assert isinstance(
filter_shape[dim], (theano.tensor.TensorConstant, integer_types, type(None))
filter_shape[dim], (theano.tensor.TensorConstant, int, type(None))
)
for dim in [2, 3, 4]:
assert isinstance(
filter_shape[dim],
(theano.tensor.TensorVariable, theano.tensor.TensorConstant, integer_types),
(theano.tensor.TensorVariable, theano.tensor.TensorConstant, int),
)
# checking the type of input_shape
......@@ -1685,7 +1683,7 @@ def conv3d_grad_wrt_weights(
for dim in [0, 1, 2, 3, 4]:
assert isinstance(
input_shape[dim],
(theano.tensor.TensorConstant, integer_types, type(None)),
(theano.tensor.TensorConstant, int, type(None)),
)
# setting the last three dimensions of filter_shape to None, if
......@@ -2228,7 +2226,7 @@ class BaseAbstractConv(Op):
if filter_dilation is None:
filter_dilation = (1,) * convdim
if isinstance(border_mode, integer_types):
if isinstance(border_mode, int):
if border_mode < 0:
raise ValueError(
"invalid border_mode {}, which must be a "
......@@ -2244,12 +2242,12 @@ class BaseAbstractConv(Op):
new_border_mode = ()
for mode in border_mode:
if not (
(isinstance(mode, integer_types) and mode >= 0)
(isinstance(mode, int) and mode >= 0)
or (
isinstance(mode, tuple)
and len(mode) == 2
and min(mode) >= 0
and all(isinstance(m, integer_types) for m in mode)
and all(isinstance(m, int) for m in mode)
)
):
raise ValueError(
......@@ -2283,13 +2281,9 @@ class BaseAbstractConv(Op):
try:
get_scalar_constant_value(imshp_i, only_process_constants=True)
except NotScalarConstantError:
reraise(
ValueError,
ValueError(
"imshp should be None or a tuple of " "constant int values"
),
sys.exc_info()[2],
)
raise ValueError(
"imshp should be None or a tuple of " "constant int values"
).with_traceback(sys.exc_info()[2])
if kshp:
self.kshp = tuple(kshp)
else:
......@@ -2300,13 +2294,9 @@ class BaseAbstractConv(Op):
try:
get_scalar_constant_value(kshp_i, only_process_constants=True)
except NotScalarConstantError:
reraise(
ValueError,
ValueError(
"kshp should be None or a tuple of " "constant int values"
),
sys.exc_info()[2],
)
raise ValueError(
"kshp should be None or a tuple of " "constant int values"
).with_traceback(sys.exc_info()[2])
self.border_mode = border_mode
self.filter_flip = filter_flip
......@@ -2374,7 +2364,7 @@ class BaseAbstractConv(Op):
"invalid mode {}, which must be either "
'"valid" or "full"'.format(mode)
)
if isinstance(dilation, integer_types):
if isinstance(dilation, int):
dilation = (dilation,) * self.convdim
if len(dilation) != self.convdim:
raise ValueError(
......@@ -2534,7 +2524,7 @@ class AbstractConv(BaseAbstractConv):
num_groups=1,
unshared=False,
):
super(AbstractConv, self).__init__(
super().__init__(
convdim=convdim,
imshp=imshp,
kshp=kshp,
......@@ -2749,7 +2739,7 @@ class AbstractConv2d(AbstractConv):
num_groups=1,
unshared=False,
):
super(AbstractConv2d, self).__init__(
super().__init__(
convdim=2,
imshp=imshp,
kshp=kshp,
......@@ -2814,7 +2804,7 @@ class AbstractConv3d(AbstractConv):
filter_dilation=(1, 1, 1),
num_groups=1,
):
super(AbstractConv3d, self).__init__(
super().__init__(
convdim=3,
imshp=imshp,
kshp=kshp,
......@@ -2882,7 +2872,7 @@ class AbstractConv_gradWeights(BaseAbstractConv):
num_groups=1,
unshared=False,
):
super(AbstractConv_gradWeights, self).__init__(
super().__init__(
convdim=convdim,
imshp=imshp,
kshp=kshp,
......@@ -3109,7 +3099,7 @@ class AbstractConv2d_gradWeights(AbstractConv_gradWeights):
num_groups=1,
unshared=False,
):
super(AbstractConv2d_gradWeights, self).__init__(
super().__init__(
convdim=2,
imshp=imshp,
kshp=kshp,
......@@ -3179,7 +3169,7 @@ class AbstractConv3d_gradWeights(AbstractConv_gradWeights):
filter_dilation=(1, 1, 1),
num_groups=1,
):
super(AbstractConv3d_gradWeights, self).__init__(
super().__init__(
convdim=3,
imshp=imshp,
kshp=kshp,
......@@ -3248,7 +3238,7 @@ class AbstractConv_gradInputs(BaseAbstractConv):
num_groups=1,
unshared=False,
):
super(AbstractConv_gradInputs, self).__init__(
super().__init__(
convdim=convdim,
imshp=imshp,
kshp=kshp,
......@@ -3502,7 +3492,7 @@ class AbstractConv2d_gradInputs(AbstractConv_gradInputs):
num_groups=1,
unshared=False,
):
super(AbstractConv2d_gradInputs, self).__init__(
super().__init__(
convdim=2,
imshp=imshp,
kshp=kshp,
......@@ -3572,7 +3562,7 @@ class AbstractConv3d_gradInputs(AbstractConv_gradInputs):
filter_dilation=(1, 1, 1),
num_groups=1,
):
super(AbstractConv3d_gradInputs, self).__init__(
super().__init__(
convdim=3,
imshp=imshp,
kshp=kshp,
......
......@@ -28,7 +28,7 @@ class BNComposite(Composite):
o = add(mul(true_div(sub(x, mean), std), gamma), beta)
inputs = [x, mean, std, gamma, beta]
outputs = [o]
super(BNComposite, self).__init__(inputs, outputs)
super().__init__(inputs, outputs)
def grad(self, inps, grads):
x, mean, std, gamma, beta = inps
......
......@@ -488,7 +488,7 @@ class ConvOp(OpenMPOp):
)
# Init the openmp attribute
super(ConvOp, self).__init__(openmp=openmp)
super().__init__(openmp=openmp)
if not all_shape or self.openmp:
# Only this version is parallelized
unroll_patch = True
......@@ -687,7 +687,7 @@ class ConvOp(OpenMPOp):
return True
def __setstate__(self, d):
super(ConvOp, self).__setstate__(d)
super().__setstate__(d)
self.direction_hint = d.get("direction_hint", None)
self._rehash()
......@@ -1197,7 +1197,7 @@ using namespace std;
if theano.gof.cmodule.gcc_version() in ["4.3.0"] and self.kshp == (1, 1):
ret += ["-O2"]
# Add the -fopenmp flags
ret += super(ConvOp, self).c_compile_args()
ret += super().c_compile_args()
return ret
......
import logging
import os
from six import integer_types
import theano
from theano import gof
from theano.gof.graph import Apply
......@@ -86,8 +84,8 @@ class BaseCorrMM(gof.OpenMPOp):
unshared=False,
openmp=None,
):
super(BaseCorrMM, self).__init__(openmp=openmp)
if isinstance(border_mode, integer_types):
super().__init__(openmp=openmp)
if isinstance(border_mode, int):
if border_mode < 0:
raise ValueError(
"invalid border_mode {}, which must be a "
......@@ -175,7 +173,7 @@ class BaseCorrMM(gof.OpenMPOp):
padW_r = property(lambda self: self.pad[1][1])
def __str__(self):
return "%s{%s, %s, %s, %s %s}" % (
return "{}{{{}, {}, {}, {} {}}}".format(
self.__class__.__name__,
self.border_mode,
str(self.subsample),
......@@ -210,7 +208,7 @@ class BaseCorrMM(gof.OpenMPOp):
def c_compile_args(self):
compile_args = ldflags(libs=False, flags=True)
compile_args += super(BaseCorrMM, self).c_compile_args()
compile_args += super().c_compile_args()
return compile_args
def c_lib_dirs(self):
......@@ -221,7 +219,7 @@ class BaseCorrMM(gof.OpenMPOp):
def c_headers(self):
headers = ["<stdio.h>"]
headers += super(BaseCorrMM, self).c_headers()
headers += super().c_headers()
return headers
def c_code_cache_version(self):
......@@ -710,7 +708,7 @@ class CorrMM(BaseCorrMM):
def c_code(self, node, nodename, inp, out_, sub):
bottom, weights = inp
(top,) = out_
return super(CorrMM, self).c_code_helper(bottom, weights, top, sub)
return super().c_code_helper(bottom, weights, top, sub)
def grad(self, inp, grads):
bottom, weights = inp
......@@ -835,9 +833,7 @@ class CorrMM_gradWeights(BaseCorrMM):
bottom, top = inp[:2]
height, width = inp[2:] or (None, None)
(weights,) = out_
return super(CorrMM_gradWeights, self).c_code_helper(
bottom, weights, top, sub, height, width
)
return super().c_code_helper(bottom, weights, top, sub, height, width)
def grad(self, inp, grads):
bottom, top = inp[:2]
......@@ -969,9 +965,7 @@ class CorrMM_gradInputs(BaseCorrMM):
weights, top = inp[:2]
height, width = inp[2:] or (None, None)
(bottom,) = out_
return super(CorrMM_gradInputs, self).c_code_helper(
bottom, weights, top, sub, height, width
)
return super().c_code_helper(bottom, weights, top, sub, height, width)
def grad(self, inp, grads):
weights, top = inp[:2]
......
import logging
import os
from six import integer_types
import theano
from theano import gof
from theano.gof.graph import Apply
......@@ -77,8 +75,8 @@ class BaseCorr3dMM(gof.OpenMPOp):
openmp=None,
num_groups=1,
):
super(BaseCorr3dMM, self).__init__(openmp=openmp)
if isinstance(border_mode, integer_types):
super().__init__(openmp=openmp)
if isinstance(border_mode, int):
if border_mode < 0:
raise ValueError(
"invalid border_mode {}, which must be a "
......@@ -159,7 +157,7 @@ class BaseCorr3dMM(gof.OpenMPOp):
padD = property(lambda self: self.pad[2])
def __str__(self):
return "%s{%s, %s, %s, %s}" % (
return "{}{{{}, {}, {}, {}}}".format(
self.__class__.__name__,
self.border_mode,
str(self.subsample),
......@@ -193,7 +191,7 @@ class BaseCorr3dMM(gof.OpenMPOp):
def c_compile_args(self):
compile_args = ldflags(libs=False, flags=True)
compile_args += super(BaseCorr3dMM, self).c_compile_args()
compile_args += super().c_compile_args()
return compile_args
def c_lib_dirs(self):
......@@ -204,7 +202,7 @@ class BaseCorr3dMM(gof.OpenMPOp):
def c_headers(self):
headers = ["<stdio.h>"]
headers += super(BaseCorr3dMM, self).c_headers()
headers += super().c_headers()
return headers
def c_code_cache_version(self):
......@@ -650,7 +648,7 @@ class Corr3dMM(BaseCorr3dMM):
def c_code(self, node, nodename, inp, out_, sub):
bottom, weights = inp
(top,) = out_
return super(Corr3dMM, self).c_code_helper(bottom, weights, top, sub)
return super().c_code_helper(bottom, weights, top, sub)
def grad(self, inp, grads):
bottom, weights = inp
......@@ -764,9 +762,7 @@ class Corr3dMMGradWeights(BaseCorr3dMM):
bottom, top = inp[:2]
height, width, depth = inp[2:] or (None, None, None)
(weights,) = out_
return super(Corr3dMMGradWeights, self).c_code_helper(
bottom, weights, top, sub, height, width, depth
)
return super().c_code_helper(bottom, weights, top, sub, height, width, depth)
def grad(self, inp, grads):
bottom, top = inp[:2]
......@@ -900,9 +896,7 @@ class Corr3dMMGradInputs(BaseCorr3dMM):
weights, top = inp[:2]
height, width, depth = inp[2:] or (None, None, None)
(bottom,) = out_
return super(Corr3dMMGradInputs, self).c_code_helper(
bottom, weights, top, sub, height, width, depth
)
return super().c_code_helper(bottom, weights, top, sub, height, width, depth)
def grad(self, inp, grads):
weights, top = inp[:2]
......
......@@ -2206,7 +2206,7 @@ class Prepend_scalar_constant_to_each_row(Op):
self.val = val
def __str__(self):
return "%s{%s}" % (self.__class__.__name__, self.val)
return "{}{{{}}}".format(self.__class__.__name__, self.val)
def make_node(self, mat):
# check type of input
......
......@@ -91,7 +91,7 @@ class ScalarSigmoid(scalar.UnaryScalarOp):
raise NotImplementedError("only floatingpoint is implemented")
def c_code_cache_version(self):
v = super(ScalarSigmoid, self).c_code_cache_version()
v = super().c_code_cache_version()
if v:
return (2,) + v
else:
......@@ -404,7 +404,7 @@ class ScalarSoftplus(scalar.UnaryScalarOp):
raise NotImplementedError("only floatingpoint is implemented")
def c_code_cache_version(self):
v = super(ScalarSoftplus, self).c_code_cache_version()
v = super().c_code_cache_version()
if v:
return (2,) + v
else:
......
......@@ -13,7 +13,7 @@ from collections import defaultdict
from functools import reduce
import numpy as np
from six import StringIO, integer_types
from six import StringIO
import theano
import theano.scalar.basic as ts
......@@ -515,7 +515,8 @@ class InplaceElemwiseOptimizer(Optimizer):
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print(
"%s%s (%s)" % ((" " * level), self.__class__.__name__, self.op), file=stream
"{}{} ({})".format((" " * level), self.__class__.__name__, self.op),
file=stream,
)
return inplace_elemwise_optimizer
......@@ -996,7 +997,7 @@ class MakeVectorPrinter:
tt.pprint.assign(MakeVector, MakeVectorPrinter())
class ShapeFeature(object):
class ShapeFeature:
"""Graph optimizer for removing all calls to shape().
This optimizer replaces all Shapes and Subtensors of Shapes with
......@@ -1222,12 +1223,10 @@ class ShapeFeature(object):
# don't make the optimizer merge a zillion ones together
# by always returning the same object to represent 1
return self.lscalar_one
if type(s_i) is float and int(s_i) == s_i:
if isinstance(s_i, float) and int(s_i) == s_i:
s_i = int(s_i)
if (
type(s_i) in integer_types
or isinstance(s_i, np.integer)
or (isinstance(s_i, np.ndarray) and s_i.ndim == 0)
if isinstance(s_i, (np.integer, int)) or (
isinstance(s_i, np.ndarray) and s_i.ndim == 0
):
# this shape is a constant
if s_i < 0:
......@@ -1241,7 +1240,7 @@ class ShapeFeature(object):
# message.
raise AssertionError(msg)
return tt.constant(s_i, dtype="int64")
if type(s_i) in (tuple, list):
if isinstance(s_i, (tuple, list)):
# this dimension is the same as many of the inputs
# which tells us that if one of the inputs is known,
# the others all become known.
......@@ -1384,11 +1383,11 @@ class ShapeFeature(object):
# - Shape_i(i)(other_r);
# - Shape_i(i)(r).
merged_shape.append(r_shape[i])
elif isinstance(r_shape[i], (Constant, integer_types)):
elif isinstance(r_shape[i], (Constant, int)):
# We do this to call less often ancestors and make
# sure we have the simplest shape possible.
merged_shape.append(r_shape[i])
elif isinstance(other_shape[i], (Constant, integer_types)):
elif isinstance(other_shape[i], (Constant, int)):
# We do this to call less often ancestors and make
# sure we have the simplest shape possible.
merged_shape.append(other_shape[i])
......@@ -2225,7 +2224,7 @@ def local_subtensor_remove_broadcastable_index(node):
elif isinstance(elem, slice):
if elem != slice(None):
return
elif isinstance(elem, (integer_types, np.integer)):
elif isinstance(elem, (int, np.integer)):
if elem in [0, -1] and node.inputs[0].broadcastable[dim]:
remove_dim.append(dim)
else:
......@@ -2277,7 +2276,7 @@ def local_subtensor_make_vector(node):
else:
return
if isinstance(idx, (integer_types, np.integer)):
if isinstance(idx, (int, np.integer)):
# We don't need to copy over any stack traces here
return [x.owner.inputs[idx]]
elif isinstance(idx, Variable):
......@@ -3014,7 +3013,7 @@ def local_useless_subtensor(node):
length_pos = shape_of[node.inputs[0]][pos]
if isinstance(idx.stop, (integer_types, np.integer)):
if isinstance(idx.stop, (int, np.integer)):
length_pos_data = sys.maxsize
try:
length_pos_data = get_scalar_constant_value(
......@@ -3281,12 +3280,10 @@ def merge_two_slices(slice1, len1, slice2, len2):
n_val = sl1.stop - 1 - sl2 * sl1.step
if config.warn.subtensor_merge_bug:
warnings.warning(
(
"Your current code is fine, but Theano versions "
"prior to 0.5rc2 might have given an incorrect result. "
"To disable this warning, set the Theano flag "
"warn.subtensor_merge_bug to False."
)
"Your current code is fine, but Theano versions "
"prior to 0.5rc2 might have given an incorrect result. "
"To disable this warning, set the Theano flag "
"warn.subtensor_merge_bug to False."
)
# we need to pick either n_val or p_val and then follow same
# steps as above for covering the index error cases
......@@ -5467,7 +5464,7 @@ class Canonizer(LocalOptimizer):
return getattr(
self,
"name",
"Canonizer(%s, %s, %s)" % (self.main, self.inverse, self.reciprocal),
"Canonizer({}, {}, {})".format(self.main, self.inverse, self.reciprocal),
)
......@@ -6125,17 +6122,15 @@ def local_reduce_join(node):
if len(reduce_axis) != 1 or 0 not in reduce_axis:
if theano.config.warn.reduce_join:
warnings.warning(
(
"Your current code is fine, but Theano versions "
"prior to 0.7 (or this development version Sept 2014) "
"might have given an incorrect result for this code. "
"To disable this warning, set the Theano flag "
"warn.reduce_join to False. The problem was an "
"optimization, that modified the pattern "
'"Reduce{scalar.op}(Join(axis=0, a, b), axis=0)", '
"did not check the reduction axis. So if the "
"reduction axis was not 0, you got a wrong answer."
)
"Your current code is fine, but Theano versions "
"prior to 0.7 (or this development version Sept 2014) "
"might have given an incorrect result for this code. "
"To disable this warning, set the Theano flag "
"warn.reduce_join to False. The problem was an "
"optimization, that modified the pattern "
'"Reduce{scalar.op}(Join(axis=0, a, b), axis=0)", '
"did not check the reduction axis. So if the "
"reduction axis was not 0, you got a wrong answer."
)
return
......@@ -7049,8 +7044,8 @@ def constant_folding(node):
# The op asks not to be constant folded.
return False
storage_map = dict([(i, [i.data]) for i in node.inputs])
compute_map = dict([(i, [True]) for i in node.inputs])
storage_map = {i: [i.data] for i in node.inputs}
compute_map = {i: [True] for i in node.inputs}
for o in node.outputs:
storage_map[o] = [None]
compute_map[o] = [False]
......@@ -7578,7 +7573,7 @@ def local_elemwise_fusion_op(op_class, max_input_fct=lambda node: 32, maker=None
if (
i.owner
and isinstance(i.owner.op, op_class)
and len(set([n for n, idx in i.clients])) == 1
and len({n for n, idx in i.clients}) == 1
and
# Do not merge elemwise that don't have the same
# broadcastable pattern to don't redo duplicate
......@@ -7789,7 +7784,6 @@ class FusionOptimizer(Optimizer):
nb_replacement += 1
except InconsistencyError:
nb_inconsistency_replace += 1
pass
nb_iter += 1
if fgraph.profile:
......
......@@ -6,7 +6,6 @@ from copy import copy
from functools import reduce
import numpy as np
from six import string_types
import theano
from theano import gof, tensor
......@@ -149,7 +148,7 @@ class RandomFunction(gof.Op):
state = dct
fn, outtype, inplace, ndim_added = state
self.fn = fn
if isinstance(fn, string_types):
if isinstance(fn, str):
self.exec_fn = getattr(np.random.RandomState, fn)
else:
self.exec_fn = fn
......@@ -353,7 +352,7 @@ def _infer_ndim_bcast(ndim, shape, *args):
else:
if s >= 0:
pre_v_shape.append(tensor.as_tensor_variable(s))
bcast.append((s == 1))
bcast.append(s == 1)
elif s == -1:
n_a_i = 0
for a in args:
......@@ -370,11 +369,9 @@ def _infer_ndim_bcast(ndim, shape, *args):
else:
if n_a_i == 0:
raise ValueError(
(
"Auto-shape of -1 must overlap"
"with the shape of one of the broadcastable"
"inputs"
)
"Auto-shape of -1 must overlap"
"with the shape of one of the broadcastable"
"inputs"
)
else:
pre_v_shape.append(tensor.as_tensor_variable(1))
......@@ -393,7 +390,7 @@ def _infer_ndim_bcast(ndim, shape, *args):
# but we need to know ndim
if not args:
raise TypeError(
("_infer_ndim_bcast cannot infer shape without" " either shape or args")
"_infer_ndim_bcast cannot infer shape without" " either shape or args"
)
template = reduce(lambda a, b: a + b, args)
v_shape = template.shape
......@@ -957,7 +954,7 @@ optdb.register(
)
class RandomStreamsBase(object):
class RandomStreamsBase:
def binomial(self, size=None, n=1, p=0.5, ndim=None, dtype="int64", prob=None):
"""
Sample n times with probability of success p for each trial and
......
......@@ -59,7 +59,7 @@ class RandomStreams(raw_random.RandomStreamsBase):
return list(self.state_updates)
def __init__(self, seed=None):
super(RandomStreams, self).__init__()
super().__init__()
# A list of pairs of the form (input_r, output_r). This will be
# over-ridden by the module instance to contain stream generators.
self.state_updates = []
......
import traceback
import numpy as np
from six import integer_types
import theano.tensor.basic
from theano.compile import SharedVariable, shared_constructor
......@@ -95,7 +94,7 @@ def scalar_constructor(
if target != "cpu":
raise TypeError("not for cpu")
if not isinstance(value, (np.number, float, integer_types, complex)):
if not isinstance(value, (np.number, float, int, complex)):
raise TypeError()
try:
dtype = value.dtype
......
......@@ -470,7 +470,7 @@ class Pool(OpenMPOp):
return rval
def __init__(self, ignore_border=False, mode="max", ndim=2, openmp=None):
super(Pool, self).__init__(openmp=openmp)
super().__init__(openmp=openmp)
self.ndim = ndim
self.ignore_border = ignore_border
if mode == "max_deterministic":
......@@ -649,7 +649,7 @@ class Pool(OpenMPOp):
def c_headers(self):
headers = ["<algorithm>"]
headers += super(Pool, self).c_headers()
headers += super().c_headers()
return headers
def c_code(self, node, name, inp, out, sub):
......@@ -1121,7 +1121,7 @@ class PoolGrad(OpenMPOp):
" 'average_inc_pad' and 'average_exc_pad'. Got %s" % mode
)
self.mode = mode
super(PoolGrad, self).__init__(openmp=openmp)
super().__init__(openmp=openmp)
def prepare_node(self, node, storage_map, compute_map, impl):
if len(node.inputs) < 5: # 5 for AveragePoolGrad, 6 for MaxPoolGrad
......@@ -1836,7 +1836,7 @@ class DownsampleFactorMaxGradGrad(OpenMPOp):
self.ndim = ndim
self.ignore_border = ignore_border
self.mode = mode
super(DownsampleFactorMaxGradGrad, self).__init__(openmp=openmp)
super().__init__(openmp=openmp)
assert self.mode == "max"
def make_node(self, x, maxout, gz, ws, stride=None, pad=None):
......@@ -2172,7 +2172,7 @@ class MaxPoolRop(OpenMPOp):
)
def __init__(self, ignore_border=False, mode="max", ndim=2, openmp=None):
super(MaxPoolRop, self).__init__(openmp=openmp)
super().__init__(openmp=openmp)
self.ndim = ndim
self.ignore_border = ignore_border
self.mode = mode
......@@ -2276,7 +2276,7 @@ class MaxPoolRop(OpenMPOp):
def c_headers(self):
headers = ["<algorithm>"]
headers += super(MaxPoolRop, self).c_headers()
headers += super().c_headers()
return headers
def c_code(self, node, name, inp, out, sub):
......
......@@ -33,7 +33,7 @@ class SortOp(Op):
self.order = order
def __str__(self):
return self.__class__.__name__ + "{%s, %s}" % (self.kind, str(self.order))
return self.__class__.__name__ + "{{{}, {}}}".format(self.kind, str(self.order))
def make_node(self, input, axis=-1):
input = theano.tensor.as_tensor_variable(input)
......@@ -168,7 +168,7 @@ class ArgSortOp(Op):
self.order = order
def __str__(self):
return self.__class__.__name__ + "{%s, %s}" % (self.kind, str(self.order))
return self.__class__.__name__ + "{{{}, {}}}".format(self.kind, str(self.order))
def make_node(self, input, axis=-1):
input = theano.tensor.as_tensor_variable(input)
......
......@@ -5,7 +5,6 @@ from itertools import chain, groupby
from textwrap import dedent
import numpy as np
from six import integer_types
import theano
from theano import config, gof
......@@ -36,8 +35,6 @@ class AdvancedIndexingError(TypeError):
"""
pass
def as_index_constant(a):
"""Convert Python literals to Theano constants--when possible--in Subtensor arguments.
......@@ -52,7 +49,7 @@ def as_index_constant(a):
as_index_constant(a.stop),
as_index_constant(a.step),
)
elif isinstance(a, (integer_types, np.integer)):
elif isinstance(a, (int, np.integer)):
return scal.ScalarConstant(scal.int64, a)
elif not isinstance(a, theano.tensor.Variable):
return theano.tensor.as_tensor(a)
......@@ -537,7 +534,7 @@ class Subtensor(Op):
slice_c = None
return slice(slice_a, slice_b, slice_c)
elif isinstance(entry, (integer_types, np.integer)):
elif isinstance(entry, (int, np.integer)):
# Disallow the use of python scalars in idx_list
raise TypeError(
"Python scalar in idx_list." "Please report this error to theano-dev."
......@@ -662,7 +659,7 @@ class Subtensor(Op):
if start is None:
start = 0
if p.stop is None or (
isinstance(p.stop, (integer_types, np.integer, np.ndarray))
isinstance(p.stop, (int, np.integer, np.ndarray))
and p.stop > start
):
broadcastable.append(True)
......@@ -778,7 +775,7 @@ class Subtensor(Op):
indices.append(self.str_from_slice(entry))
else:
indices.append(str(entry))
return "%s{%s}" % (self.__class__.__name__, ", ".join(indices))
return "{}{{{}}}".format(self.__class__.__name__, ", ".join(indices))
@staticmethod
def default_helper_c_code_args():
......@@ -844,7 +841,7 @@ class Subtensor(Op):
return pos[1]
def init_entry(entry, depth=0):
if isinstance(entry, (np.integer, integer_types)):
if isinstance(entry, (np.integer, int)):
init_cmds.append("subtensor_spec[%i] = %i;" % (spec_pos(), entry))
inc_spec_pos(1)
if depth == 0:
......@@ -1144,7 +1141,7 @@ class SubtensorPrinter:
pstate.precedence = -1000
for entry in idxs:
if isinstance(entry, integer_types):
if isinstance(entry, int):
sidxs.append(str(entry))
elif isinstance(entry, scal.Scalar):
sidxs.append(pstate.pprinter.process(inputs.pop()))
......@@ -1164,7 +1161,7 @@ class SubtensorPrinter:
else:
msg3 = ":%s" % entry.step
sidxs.append("%s:%s%s" % (msg1, msg2, msg3))
sidxs.append("{}:{}{}".format(msg1, msg2, msg3))
finally:
pstate.precedence = old_precedence
......@@ -1173,7 +1170,7 @@ class SubtensorPrinter:
sub = pstate.pprinter.process(input, pstate)
finally:
pstate.precedence = old_precedence
return "%s[%s]" % (sub, ", ".join(sidxs))
return "{}[{}]".format(sub, ", ".join(sidxs))
else:
raise TypeError("Can only print Subtensor.")
......@@ -1464,7 +1461,7 @@ class IncSubtensor(Op):
msg += "Inc"
else:
msg += "Set"
return "%s{%s;%s}" % (self.__class__.__name__, msg, ", ".join(indices))
return "{}{{{};{}}}".format(self.__class__.__name__, msg, ", ".join(indices))
def make_node(self, x, y, *inputs):
"""
......@@ -2430,7 +2427,7 @@ class AdvancedIncSubtensor(Op):
raise NotImplementedError("In place computation is not" " implemented")
def __str__(self):
return "%s{%s, %s}" % (
return "{}{{{}, {}}}".format(
self.__class__.__name__,
"inplace=" + str(self.inplace),
" set_instead_of_inc=" + str(self.set_instead_of_inc),
......
......@@ -283,7 +283,9 @@ class TensorType(Type):
}[self.dtype]
except KeyError:
raise TypeError(
"Unsupported dtype for %s: %s" % (self.__class__.__name__, self.dtype)
"Unsupported dtype for {}: {}".format(
self.__class__.__name__, self.dtype
)
)
def to_scalar_type(self):
......@@ -391,7 +393,7 @@ class TensorType(Type):
bcast = str(b)
else:
bcast = "%iD" % len(b)
return "TensorType(%s, %s)" % (str(self.dtype), bcast)
return "TensorType({}, {})".format(self.dtype, bcast)
def __repr__(self):
return str(self)
......
......@@ -94,7 +94,7 @@ class SliceConstant(Constant):
return (SliceConstant, self.data.start, self.data.stop, self.data.step)
def __str__(self):
return "%s{%s, %s, %s}" % (
return "{}{{{}, {}, {}}}".format(
self.__class__.__name__,
self.data.start,
self.data.stop,
......
......@@ -4,7 +4,6 @@ import warnings
from collections.abc import Iterable
import numpy as np
from six import integer_types
import theano
from theano import config
......@@ -15,7 +14,7 @@ from theano.tensor.type import TensorType
from theano.tensor.utils import hash_from_ndarray
class _tensor_py_operators(object):
class _tensor_py_operators:
def __abs__(self):
return theano.tensor.basic.abs_(self)
......@@ -297,7 +296,7 @@ class _tensor_py_operators(object):
"""
if ndim is not None:
if not isinstance(ndim, integer_types):
if not isinstance(ndim, int):
raise ValueError(
"Expected ndim to be an integer, is " + str(type(ndim))
)
......@@ -640,11 +639,9 @@ class _tensor_py_operators(object):
except TypeError:
# This prevents accidental iteration via sum(self)
raise TypeError(
(
"TensorType does not support iteration. "
"Maybe you are using builtins.sum instead of "
"theano.tensor.sum? (Maybe .max?)"
)
"TensorType does not support iteration. "
"Maybe you are using builtins.sum instead of "
"theano.tensor.sum? (Maybe .max?)"
)
ndim = property(lambda self: self.type.ndim)
......@@ -839,7 +836,7 @@ class TensorVariable(_tensor_py_operators, Variable):
"""
def __init__(self, type, owner=None, index=None, name=None):
super(TensorVariable, self).__init__(type, owner=owner, index=index, name=name)
super().__init__(type, owner=owner, index=index, name=name)
if config.warn_float64 != "ignore" and type.dtype == "float64":
msg = (
"You are creating a TensorVariable "
......@@ -997,7 +994,7 @@ class TensorConstant(_tensor_py_operators, Constant):
def __str__(self):
if self.tag.unique_value is not None:
name = "%s of %s" % (str(self.data.shape), str(self.tag.unique_value))
name = "{} of {}".format(str(self.data.shape), str(self.tag.unique_value))
else:
name = "%s" % self.data
if len(name) > 20:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论