提交 27bd9aaf authored 作者: Ben Mares's avatar Ben Mares

Fix UP031: Use format specifiers instead of percent format

上级 75b57060
...@@ -75,9 +75,9 @@ def main(): ...@@ -75,9 +75,9 @@ def main():
if items: if items:
_logger.warning( _logger.warning(
"There remain elements in the cache dir that you may " "There remain elements in the cache dir that you may "
"need to erase manually. The cache dir is:\n %s\n" f"need to erase manually. The cache dir is:\n {config.compiledir}\n"
'You can also call "pytensor-cache purge" to ' 'You can also call "pytensor-cache purge" to '
"remove everything from that directory." % config.compiledir "remove everything from that directory."
) )
_logger.debug(f"Remaining elements ({len(items)}): {', '.join(items)}") _logger.debug(f"Remaining elements ({len(items)}): {', '.join(items)}")
elif sys.argv[1] == "list": elif sys.argv[1] == "list":
......
...@@ -105,7 +105,7 @@ class PdbBreakpoint(Op): ...@@ -105,7 +105,7 @@ class PdbBreakpoint(Op):
except Exception: except Exception:
raise ValueError( raise ValueError(
"Some of the inputs to the PdbBreakpoint op " "Some of the inputs to the PdbBreakpoint op "
"'%s' could not be casted to NumPy arrays" % self.name f"'{self.name}' could not be casted to NumPy arrays"
) )
print("\n") print("\n")
......
...@@ -709,7 +709,7 @@ class OpFromGraph(Op, HasInnerGraph): ...@@ -709,7 +709,7 @@ class OpFromGraph(Op, HasInnerGraph):
if not isinstance(roverrides_l, list): if not isinstance(roverrides_l, list):
raise TypeError( raise TypeError(
"Rop overriding function should return a list, " "Rop overriding function should return a list, "
'got "%s"' % type(roverrides_l) f'got "{type(roverrides_l)}"'
) )
all_rops_l, all_rops_ov_l = zip( all_rops_l, all_rops_ov_l = zip(
*[ *[
......
...@@ -252,7 +252,7 @@ class FromFunctionOp(Op): ...@@ -252,7 +252,7 @@ class FromFunctionOp(Op):
return hash(type(self)) ^ hash(self.__fn) return hash(type(self)) ^ hash(self.__fn)
def __str__(self): def __str__(self):
return "FromFunctionOp{%s}" % self.__fn.__name__ return f"FromFunctionOp{{{self.__fn.__name__}}}"
def perform(self, node, inputs, outputs): def perform(self, node, inputs, outputs):
outs = self.__fn(*inputs) outs = self.__fn(*inputs)
......
...@@ -262,8 +262,13 @@ class Params(dict): ...@@ -262,8 +262,13 @@ class Params(dict):
self.__dict__.update(__params_type__=params_type, __signatures__=None) self.__dict__.update(__params_type__=params_type, __signatures__=None)
def __repr__(self): def __repr__(self):
return "Params(%s)" % ", ".join( return "Params({})".format(
[(f"{k}:{type(self[k]).__name__}:{self[k]}") for k in sorted(self.keys())] ", ".join(
[
(f"{k}:{type(self[k]).__name__}:{self[k]}")
for k in sorted(self.keys())
]
)
) )
def __getattr__(self, key): def __getattr__(self, key):
...@@ -346,13 +351,11 @@ class ParamsType(CType): ...@@ -346,13 +351,11 @@ class ParamsType(CType):
for attribute_name in kwargs: for attribute_name in kwargs:
if re.match("^[A-Za-z_][A-Za-z0-9_]*$", attribute_name) is None: if re.match("^[A-Za-z_][A-Za-z0-9_]*$", attribute_name) is None:
raise AttributeError( raise AttributeError(
'ParamsType: attribute "%s" should be a valid identifier.' f'ParamsType: attribute "{attribute_name}" should be a valid identifier.'
% attribute_name
) )
if attribute_name in c_cpp_keywords: if attribute_name in c_cpp_keywords:
raise SyntaxError( raise SyntaxError(
'ParamsType: "%s" is a potential C/C++ keyword and should not be used as attribute name.' f'ParamsType: "{attribute_name}" is a potential C/C++ keyword and should not be used as attribute name.'
% attribute_name
) )
type_instance = kwargs[attribute_name] type_instance = kwargs[attribute_name]
type_name = type_instance.__class__.__name__ type_name = type_instance.__class__.__name__
...@@ -424,8 +427,10 @@ class ParamsType(CType): ...@@ -424,8 +427,10 @@ class ParamsType(CType):
return super().__getattr__(self, key) return super().__getattr__(self, key)
def __repr__(self): def __repr__(self):
return "ParamsType<%s>" % ", ".join( return "ParamsType<{}>".format(
[(f"{self.fields[i]}:{self.types[i]}") for i in range(self.length)] ", ".join(
[(f"{self.fields[i]}:{self.types[i]}") for i in range(self.length)]
)
) )
def __eq__(self, other): def __eq__(self, other):
...@@ -733,18 +738,18 @@ class ParamsType(CType): ...@@ -733,18 +738,18 @@ class ParamsType(CType):
struct_cleanup = "\n".join(c_cleanup_list) struct_cleanup = "\n".join(c_cleanup_list)
struct_extract = "\n\n".join(c_extract_list) struct_extract = "\n\n".join(c_extract_list)
struct_extract_method = """ struct_extract_method = """
void extract(PyObject* object, int field_pos) { void extract(PyObject* object, int field_pos) {{
switch(field_pos) { switch(field_pos) {{
// Extraction cases. // Extraction cases.
%s {}
// Default case. // Default case.
default: default:
PyErr_Format(PyExc_TypeError, "ParamsType: no extraction defined for a field %%d.", field_pos); PyErr_Format(PyExc_TypeError, "ParamsType: no extraction defined for a field %d.", field_pos);
this->setErrorOccurred(); this->setErrorOccurred();
break; break;
} }}
} }}
""" % ( """.format(
"\n".join( "\n".join(
[ [
("case %d: extract_%s(object); break;" % (i, self.fields[i])) ("case %d: extract_%s(object); break;" % (i, self.fields[i]))
...@@ -866,7 +871,7 @@ class ParamsType(CType): ...@@ -866,7 +871,7 @@ class ParamsType(CType):
struct_name=self.name, struct_name=self.name,
length=self.length, length=self.length,
fail=sub["fail"], fail=sub["fail"],
fields_list='"%s"' % '", "'.join(self.fields), fields_list='"{}"'.format('", "'.join(self.fields)),
) )
) )
......
...@@ -355,8 +355,13 @@ def raise_with_op( ...@@ -355,8 +355,13 @@ def raise_with_op(
+ f"\nInputs values: {scalar_values}" + f"\nInputs values: {scalar_values}"
) )
if verbosity == "high": if verbosity == "high":
detailed_err_msg += "\nInputs type_num: %s" % str( detailed_err_msg += "\nInputs type_num: {}".format(
[getattr(getattr(i[0], "dtype", ""), "num", "") for i in thunk.inputs] str(
[
getattr(getattr(i[0], "dtype", ""), "num", "")
for i in thunk.inputs
]
)
) )
detailed_err_msg += f"\nOutputs clients: {clients}\n" detailed_err_msg += f"\nOutputs clients: {clients}\n"
......
...@@ -475,7 +475,7 @@ class ScalarType(CType, HasDataType, HasShape): ...@@ -475,7 +475,7 @@ class ScalarType(CType, HasDataType, HasShape):
sub, sub,
name=name, name=name,
dtype=specs[1], dtype=specs[1],
pyarr_type="Py%sArrType_Type" % specs[2], pyarr_type=f"Py{specs[2]}ArrType_Type",
) )
) )
else: else:
......
...@@ -180,9 +180,9 @@ class Gemv(Op): ...@@ -180,9 +180,9 @@ class Gemv(Op):
def __str__(self): def __str__(self):
if self.inplace: if self.inplace:
return "%s{inplace}" % self.__class__.__name__ return f"{self.__class__.__name__}{{inplace}}"
else: else:
return "%s{no_inplace}" % self.__class__.__name__ return f"{self.__class__.__name__}{{no_inplace}}"
def make_node(self, y, alpha, A, x, beta): def make_node(self, y, alpha, A, x, beta):
y = ptb.as_tensor_variable(y) y = ptb.as_tensor_variable(y)
...@@ -279,9 +279,9 @@ class Ger(Op): ...@@ -279,9 +279,9 @@ class Ger(Op):
def __str__(self): def __str__(self):
if self.destructive: if self.destructive:
return "%s{destructive}" % self.__class__.__name__ return f"{self.__class__.__name__}{{destructive}}"
else: else:
return "%s{non-destructive}" % self.__class__.__name__ return f"{self.__class__.__name__}{{non-destructive}}"
def make_node(self, A, alpha, x, y): def make_node(self, A, alpha, x, y):
A = ptb.as_tensor_variable(A) A = ptb.as_tensor_variable(A)
...@@ -1811,9 +1811,10 @@ class BatchedDot(COp): ...@@ -1811,9 +1811,10 @@ class BatchedDot(COp):
f"{strides}[{i}] > 0 && {strides}[{i}] % type_size == 0" f"{strides}[{i}] > 0 && {strides}[{i}] % type_size == 0"
for i in range(1, ndim) for i in range(1, ndim)
), ),
"(%s)" "({})".format(
% " || ".join( " || ".join(
f"{strides}[{i}] == type_size" for i in range(1, ndim) f"{strides}[{i}] == type_size" for i in range(1, ndim)
)
), ),
] ]
) )
......
...@@ -1098,14 +1098,14 @@ class Elemwise(OpenMPOp): ...@@ -1098,14 +1098,14 @@ class Elemwise(OpenMPOp):
all_broadcastable = all(s == 1 for s in var.type.shape) all_broadcastable = all(s == 1 for s in var.type.shape)
cond1 = " && ".join( cond1 = " && ".join(
[ [
"PyArray_ISCONTIGUOUS(%s)" % arr f"PyArray_ISCONTIGUOUS({arr})"
for arr, var in z for arr, var in z
if not all_broadcastable if not all_broadcastable
] ]
) )
cond2 = " && ".join( cond2 = " && ".join(
[ [
"PyArray_ISFORTRAN(%s)" % arr f"PyArray_ISFORTRAN({arr})"
for arr, var in z for arr, var in z
if not all_broadcastable if not all_broadcastable
] ]
......
...@@ -652,8 +652,8 @@ class Repeat(Op): ...@@ -652,8 +652,8 @@ class Repeat(Op):
if repeats.dtype in numpy_unsupported_dtypes: if repeats.dtype in numpy_unsupported_dtypes:
raise TypeError( raise TypeError(
( (
"dtypes %s are not supported by numpy.repeat " f"dtypes {numpy_unsupported_dtypes!s} are not supported by numpy.repeat "
"for the 'repeats' parameter, " % str(numpy_unsupported_dtypes) "for the 'repeats' parameter, "
), ),
repeats.dtype, repeats.dtype,
) )
...@@ -882,8 +882,8 @@ class FillDiagonal(Op): ...@@ -882,8 +882,8 @@ class FillDiagonal(Op):
val = ptb.as_tensor_variable(val) val = ptb.as_tensor_variable(val)
if a.ndim < 2: if a.ndim < 2:
raise TypeError( raise TypeError(
"%s: first parameter must have at least" f"{self.__class__.__name__}: first parameter must have at least"
" two dimensions" % self.__class__.__name__ " two dimensions"
) )
elif val.ndim != 0: elif val.ndim != 0:
raise TypeError( raise TypeError(
...@@ -892,8 +892,8 @@ class FillDiagonal(Op): ...@@ -892,8 +892,8 @@ class FillDiagonal(Op):
val = ptb.cast(val, dtype=upcast(a.dtype, val.dtype)) val = ptb.cast(val, dtype=upcast(a.dtype, val.dtype))
if val.dtype != a.dtype: if val.dtype != a.dtype:
raise TypeError( raise TypeError(
"%s: type of second parameter must be the same as" f"{self.__class__.__name__}: type of second parameter must be the same as"
" the first's" % self.__class__.__name__ " the first's"
) )
return Apply(self, [a, val], [a.type()]) return Apply(self, [a, val], [a.type()])
...@@ -926,8 +926,8 @@ class FillDiagonal(Op): ...@@ -926,8 +926,8 @@ class FillDiagonal(Op):
return [None, None] return [None, None]
elif a.ndim > 2: elif a.ndim > 2:
raise NotImplementedError( raise NotImplementedError(
"%s: gradient is currently implemented" f"{self.__class__.__name__}: gradient is currently implemented"
" for matrices only" % self.__class__.__name__ " for matrices only"
) )
wr_a = fill_diagonal(grad, 0) # valid for any number of dimensions wr_a = fill_diagonal(grad, 0) # valid for any number of dimensions
# diag is only valid for matrices # diag is only valid for matrices
...@@ -984,8 +984,8 @@ class FillDiagonalOffset(Op): ...@@ -984,8 +984,8 @@ class FillDiagonalOffset(Op):
offset = ptb.as_tensor_variable(offset) offset = ptb.as_tensor_variable(offset)
if a.ndim != 2: if a.ndim != 2:
raise TypeError( raise TypeError(
"%s: first parameter must have exactly" f"{self.__class__.__name__}: first parameter must have exactly"
" two dimensions" % self.__class__.__name__ " two dimensions"
) )
elif val.ndim != 0: elif val.ndim != 0:
raise TypeError( raise TypeError(
...@@ -998,8 +998,8 @@ class FillDiagonalOffset(Op): ...@@ -998,8 +998,8 @@ class FillDiagonalOffset(Op):
val = ptb.cast(val, dtype=upcast(a.dtype, val.dtype)) val = ptb.cast(val, dtype=upcast(a.dtype, val.dtype))
if val.dtype != a.dtype: if val.dtype != a.dtype:
raise TypeError( raise TypeError(
"%s: type of second parameter must be the same" f"{self.__class__.__name__}: type of second parameter must be the same"
" as the first's" % self.__class__.__name__ " as the first's"
) )
elif offset.dtype not in integer_dtypes: elif offset.dtype not in integer_dtypes:
raise TypeError( raise TypeError(
......
...@@ -20,8 +20,7 @@ class RFFTOp(Op): ...@@ -20,8 +20,7 @@ class RFFTOp(Op):
a = as_tensor_variable(a) a = as_tensor_variable(a)
if a.ndim < 2: if a.ndim < 2:
raise TypeError( raise TypeError(
"%s: input must have dimension > 2, with first dimension batches" f"{self.__class__.__name__}: input must have dimension > 2, with first dimension batches"
% self.__class__.__name__
) )
if s is None: if s is None:
...@@ -31,8 +30,8 @@ class RFFTOp(Op): ...@@ -31,8 +30,8 @@ class RFFTOp(Op):
s = as_tensor_variable(s) s = as_tensor_variable(s)
if s.dtype not in integer_dtypes: if s.dtype not in integer_dtypes:
raise TypeError( raise TypeError(
"%s: length of the transformed axis must be" f"{self.__class__.__name__}: length of the transformed axis must be"
" of type integer" % self.__class__.__name__ " of type integer"
) )
return Apply(self, [a, s], [self.output_type(a)()]) return Apply(self, [a, s], [self.output_type(a)()])
...@@ -92,8 +91,8 @@ class IRFFTOp(Op): ...@@ -92,8 +91,8 @@ class IRFFTOp(Op):
s = as_tensor_variable(s) s = as_tensor_variable(s)
if s.dtype not in integer_dtypes: if s.dtype not in integer_dtypes:
raise TypeError( raise TypeError(
"%s: length of the transformed axis must be" f"{self.__class__.__name__}: length of the transformed axis must be"
" of type integer" % self.__class__.__name__ " of type integer"
) )
return Apply(self, [a, s], [self.output_type(a)()]) return Apply(self, [a, s], [self.output_type(a)()])
......
...@@ -28,7 +28,7 @@ class LoadFromDisk(Op): ...@@ -28,7 +28,7 @@ class LoadFromDisk(Op):
if mmap_mode not in (None, "c"): if mmap_mode not in (None, "c"):
raise ValueError( raise ValueError(
"The only supported values for mmap_mode " "The only supported values for mmap_mode "
"are None and 'c', got %s" % mmap_mode f"are None and 'c', got {mmap_mode}"
) )
self.mmap_mode = mmap_mode self.mmap_mode = mmap_mode
......
...@@ -1540,7 +1540,7 @@ class Mean(FixedOpCAReduce): ...@@ -1540,7 +1540,7 @@ class Mean(FixedOpCAReduce):
def __str__(self): def __str__(self):
if self.axis is not None: if self.axis is not None:
return "Mean{%s}" % (", ".join(str(x) for x in self.axis)) return "Mean{{{}}}".format(", ".join(str(x) for x in self.axis))
else: else:
return "Mean" return "Mean"
......
...@@ -2174,7 +2174,7 @@ class AdvancedIncSubtensor1(COp): ...@@ -2174,7 +2174,7 @@ class AdvancedIncSubtensor1(COp):
else: else:
msg += ",inc" msg += ",inc"
return self.__class__.__name__ + "{%s}" % msg return self.__class__.__name__ + f"{{{msg}}}"
def make_node(self, x, y, ilist): def make_node(self, x, y, ilist):
x_ = as_tensor_variable(x) x_ = as_tensor_variable(x)
......
差异被折叠。
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论