提交 27bd9aaf authored 作者: Ben Mares's avatar Ben Mares

Fix UP031: Use format specifiers instead of percent format

上级 75b57060
......@@ -75,9 +75,9 @@ def main():
if items:
_logger.warning(
"There remain elements in the cache dir that you may "
"need to erase manually. The cache dir is:\n %s\n"
f"need to erase manually. The cache dir is:\n {config.compiledir}\n"
'You can also call "pytensor-cache purge" to '
"remove everything from that directory." % config.compiledir
"remove everything from that directory."
)
_logger.debug(f"Remaining elements ({len(items)}): {', '.join(items)}")
elif sys.argv[1] == "list":
......
......@@ -105,7 +105,7 @@ class PdbBreakpoint(Op):
except Exception:
raise ValueError(
"Some of the inputs to the PdbBreakpoint op "
"'%s' could not be casted to NumPy arrays" % self.name
f"'{self.name}' could not be casted to NumPy arrays"
)
print("\n")
......
......@@ -709,7 +709,7 @@ class OpFromGraph(Op, HasInnerGraph):
if not isinstance(roverrides_l, list):
raise TypeError(
"Rop overriding function should return a list, "
'got "%s"' % type(roverrides_l)
f'got "{type(roverrides_l)}"'
)
all_rops_l, all_rops_ov_l = zip(
*[
......
......@@ -252,7 +252,7 @@ class FromFunctionOp(Op):
return hash(type(self)) ^ hash(self.__fn)
def __str__(self):
return "FromFunctionOp{%s}" % self.__fn.__name__
return f"FromFunctionOp{{{self.__fn.__name__}}}"
def perform(self, node, inputs, outputs):
outs = self.__fn(*inputs)
......
......@@ -262,8 +262,13 @@ class Params(dict):
self.__dict__.update(__params_type__=params_type, __signatures__=None)
def __repr__(self):
return "Params(%s)" % ", ".join(
[(f"{k}:{type(self[k]).__name__}:{self[k]}") for k in sorted(self.keys())]
return "Params({})".format(
", ".join(
[
(f"{k}:{type(self[k]).__name__}:{self[k]}")
for k in sorted(self.keys())
]
)
)
def __getattr__(self, key):
......@@ -346,13 +351,11 @@ class ParamsType(CType):
for attribute_name in kwargs:
if re.match("^[A-Za-z_][A-Za-z0-9_]*$", attribute_name) is None:
raise AttributeError(
'ParamsType: attribute "%s" should be a valid identifier.'
% attribute_name
f'ParamsType: attribute "{attribute_name}" should be a valid identifier.'
)
if attribute_name in c_cpp_keywords:
raise SyntaxError(
'ParamsType: "%s" is a potential C/C++ keyword and should not be used as attribute name.'
% attribute_name
f'ParamsType: "{attribute_name}" is a potential C/C++ keyword and should not be used as attribute name.'
)
type_instance = kwargs[attribute_name]
type_name = type_instance.__class__.__name__
......@@ -424,8 +427,10 @@ class ParamsType(CType):
return super().__getattr__(self, key)
def __repr__(self):
return "ParamsType<%s>" % ", ".join(
[(f"{self.fields[i]}:{self.types[i]}") for i in range(self.length)]
return "ParamsType<{}>".format(
", ".join(
[(f"{self.fields[i]}:{self.types[i]}") for i in range(self.length)]
)
)
def __eq__(self, other):
......@@ -733,18 +738,18 @@ class ParamsType(CType):
struct_cleanup = "\n".join(c_cleanup_list)
struct_extract = "\n\n".join(c_extract_list)
struct_extract_method = """
void extract(PyObject* object, int field_pos) {
switch(field_pos) {
void extract(PyObject* object, int field_pos) {{
switch(field_pos) {{
// Extraction cases.
%s
{}
// Default case.
default:
PyErr_Format(PyExc_TypeError, "ParamsType: no extraction defined for a field %%d.", field_pos);
PyErr_Format(PyExc_TypeError, "ParamsType: no extraction defined for a field %d.", field_pos);
this->setErrorOccurred();
break;
}
}
""" % (
}}
}}
""".format(
"\n".join(
[
("case %d: extract_%s(object); break;" % (i, self.fields[i]))
......@@ -866,7 +871,7 @@ class ParamsType(CType):
struct_name=self.name,
length=self.length,
fail=sub["fail"],
fields_list='"%s"' % '", "'.join(self.fields),
fields_list='"{}"'.format('", "'.join(self.fields)),
)
)
......
......@@ -355,8 +355,13 @@ def raise_with_op(
+ f"\nInputs values: {scalar_values}"
)
if verbosity == "high":
detailed_err_msg += "\nInputs type_num: %s" % str(
[getattr(getattr(i[0], "dtype", ""), "num", "") for i in thunk.inputs]
detailed_err_msg += "\nInputs type_num: {}".format(
str(
[
getattr(getattr(i[0], "dtype", ""), "num", "")
for i in thunk.inputs
]
)
)
detailed_err_msg += f"\nOutputs clients: {clients}\n"
......
......@@ -475,7 +475,7 @@ class ScalarType(CType, HasDataType, HasShape):
sub,
name=name,
dtype=specs[1],
pyarr_type="Py%sArrType_Type" % specs[2],
pyarr_type=f"Py{specs[2]}ArrType_Type",
)
)
else:
......
......@@ -180,9 +180,9 @@ class Gemv(Op):
def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return f"{self.__class__.__name__}{{inplace}}"
else:
return "%s{no_inplace}" % self.__class__.__name__
return f"{self.__class__.__name__}{{no_inplace}}"
def make_node(self, y, alpha, A, x, beta):
y = ptb.as_tensor_variable(y)
......@@ -279,9 +279,9 @@ class Ger(Op):
def __str__(self):
if self.destructive:
return "%s{destructive}" % self.__class__.__name__
return f"{self.__class__.__name__}{{destructive}}"
else:
return "%s{non-destructive}" % self.__class__.__name__
return f"{self.__class__.__name__}{{non-destructive}}"
def make_node(self, A, alpha, x, y):
A = ptb.as_tensor_variable(A)
......@@ -1811,9 +1811,10 @@ class BatchedDot(COp):
f"{strides}[{i}] > 0 && {strides}[{i}] % type_size == 0"
for i in range(1, ndim)
),
"(%s)"
% " || ".join(
f"{strides}[{i}] == type_size" for i in range(1, ndim)
"({})".format(
" || ".join(
f"{strides}[{i}] == type_size" for i in range(1, ndim)
)
),
]
)
......
......@@ -1098,14 +1098,14 @@ class Elemwise(OpenMPOp):
all_broadcastable = all(s == 1 for s in var.type.shape)
cond1 = " && ".join(
[
"PyArray_ISCONTIGUOUS(%s)" % arr
f"PyArray_ISCONTIGUOUS({arr})"
for arr, var in z
if not all_broadcastable
]
)
cond2 = " && ".join(
[
"PyArray_ISFORTRAN(%s)" % arr
f"PyArray_ISFORTRAN({arr})"
for arr, var in z
if not all_broadcastable
]
......
......@@ -652,8 +652,8 @@ class Repeat(Op):
if repeats.dtype in numpy_unsupported_dtypes:
raise TypeError(
(
"dtypes %s are not supported by numpy.repeat "
"for the 'repeats' parameter, " % str(numpy_unsupported_dtypes)
f"dtypes {numpy_unsupported_dtypes!s} are not supported by numpy.repeat "
"for the 'repeats' parameter, "
),
repeats.dtype,
)
......@@ -882,8 +882,8 @@ class FillDiagonal(Op):
val = ptb.as_tensor_variable(val)
if a.ndim < 2:
raise TypeError(
"%s: first parameter must have at least"
" two dimensions" % self.__class__.__name__
f"{self.__class__.__name__}: first parameter must have at least"
" two dimensions"
)
elif val.ndim != 0:
raise TypeError(
......@@ -892,8 +892,8 @@ class FillDiagonal(Op):
val = ptb.cast(val, dtype=upcast(a.dtype, val.dtype))
if val.dtype != a.dtype:
raise TypeError(
"%s: type of second parameter must be the same as"
" the first's" % self.__class__.__name__
f"{self.__class__.__name__}: type of second parameter must be the same as"
" the first's"
)
return Apply(self, [a, val], [a.type()])
......@@ -926,8 +926,8 @@ class FillDiagonal(Op):
return [None, None]
elif a.ndim > 2:
raise NotImplementedError(
"%s: gradient is currently implemented"
" for matrices only" % self.__class__.__name__
f"{self.__class__.__name__}: gradient is currently implemented"
" for matrices only"
)
wr_a = fill_diagonal(grad, 0) # valid for any number of dimensions
# diag is only valid for matrices
......@@ -984,8 +984,8 @@ class FillDiagonalOffset(Op):
offset = ptb.as_tensor_variable(offset)
if a.ndim != 2:
raise TypeError(
"%s: first parameter must have exactly"
" two dimensions" % self.__class__.__name__
f"{self.__class__.__name__}: first parameter must have exactly"
" two dimensions"
)
elif val.ndim != 0:
raise TypeError(
......@@ -998,8 +998,8 @@ class FillDiagonalOffset(Op):
val = ptb.cast(val, dtype=upcast(a.dtype, val.dtype))
if val.dtype != a.dtype:
raise TypeError(
"%s: type of second parameter must be the same"
" as the first's" % self.__class__.__name__
f"{self.__class__.__name__}: type of second parameter must be the same"
" as the first's"
)
elif offset.dtype not in integer_dtypes:
raise TypeError(
......
......@@ -20,8 +20,7 @@ class RFFTOp(Op):
a = as_tensor_variable(a)
if a.ndim < 2:
raise TypeError(
"%s: input must have dimension > 2, with first dimension batches"
% self.__class__.__name__
f"{self.__class__.__name__}: input must have dimension > 2, with first dimension batches"
)
if s is None:
......@@ -31,8 +30,8 @@ class RFFTOp(Op):
s = as_tensor_variable(s)
if s.dtype not in integer_dtypes:
raise TypeError(
"%s: length of the transformed axis must be"
" of type integer" % self.__class__.__name__
f"{self.__class__.__name__}: length of the transformed axis must be"
" of type integer"
)
return Apply(self, [a, s], [self.output_type(a)()])
......@@ -92,8 +91,8 @@ class IRFFTOp(Op):
s = as_tensor_variable(s)
if s.dtype not in integer_dtypes:
raise TypeError(
"%s: length of the transformed axis must be"
" of type integer" % self.__class__.__name__
f"{self.__class__.__name__}: length of the transformed axis must be"
" of type integer"
)
return Apply(self, [a, s], [self.output_type(a)()])
......
......@@ -28,7 +28,7 @@ class LoadFromDisk(Op):
if mmap_mode not in (None, "c"):
raise ValueError(
"The only supported values for mmap_mode "
"are None and 'c', got %s" % mmap_mode
f"are None and 'c', got {mmap_mode}"
)
self.mmap_mode = mmap_mode
......
......@@ -1540,7 +1540,7 @@ class Mean(FixedOpCAReduce):
def __str__(self):
if self.axis is not None:
return "Mean{%s}" % (", ".join(str(x) for x in self.axis))
return "Mean{{{}}}".format(", ".join(str(x) for x in self.axis))
else:
return "Mean"
......
......@@ -2174,7 +2174,7 @@ class AdvancedIncSubtensor1(COp):
else:
msg += ",inc"
return self.__class__.__name__ + "{%s}" % msg
return self.__class__.__name__ + f"{{{msg}}}"
def make_node(self, x, y, ilist):
x_ = as_tensor_variable(x)
......
差异被折叠。
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论