提交 93825dcc authored 作者: Virgile Andreani's avatar Virgile Andreani 提交者: Virgile Andreani

Replace % formatting with f-strings

上级 55b2f4fa
......@@ -31,7 +31,7 @@ for letter in letters:
if len(shape[1]) < 6 or len(set(shape[1])) > 1:
broadcastable_str = str(shape[1])
else:
broadcastable_str = '(%s,) * %d' % (str(shape[1][0]), len(shape[1]))
broadcastable_str = f'({shape[1][0]},) * {len(shape[1])}'
print('%s%-10s %-10s %-4s %-15s %-20s' %(
letter[0], shape[0], letter[1], len(shape[1]), s, broadcastable_str
))
......
......@@ -110,6 +110,6 @@ def detect_nan(fgraph, i, node, fn):
):
print("*** NaN detected ***")
debugprint(node)
print("Inputs : %s" % [input[0] for input in fn.inputs])
print("Outputs: %s" % [output[0] for output in fn.outputs])
print(f"Inputs : {[input[0] for input in fn.inputs]}")
print(f"Outputs: {[output[0] for output in fn.outputs]}")
break
......@@ -753,14 +753,11 @@ class ProfileStats:
)
# Same as before, this I've sacrificed some information making
# the output more readable
percent = sum(f for f, t, a, nd_id, nb_call in atimes[N:])
duration = sum(t for f, t, a, nd_id, nb_call in atimes[N:])
print(
" ... (remaining %i Apply instances account for "
"%.2f%%(%.2fs) of the runtime)"
% (
max(0, len(atimes) - N),
sum(f for f, t, a, nd_id, nb_call in atimes[N:]),
sum(t for f, t, a, nd_id, nb_call in atimes[N:]),
),
f" ... (remaining {max(0, len(atimes) - N)} Apply instances account for "
f"{percent:.2f}%%({duration:.2f}s) of the runtime)",
file=file,
)
print("", file=file)
......
......@@ -87,9 +87,8 @@ class CodeBlock:
# for that...)
# we need the label even if cleanup is empty because the
# behavior block jumps there on failure
self.cleanup = (
"__label_%(id)i:\n" % sub + cleanup + "\ndouble __DUMMY_%(id)i;\n" % sub
) # % sub
id = sub["id"]
self.cleanup = f"__label_{id}:\n{cleanup}\ndouble __DUMMY_{id};\n"
def failure_code(sub, use_goto=True):
......@@ -114,14 +113,16 @@ def failure_code(sub, use_goto=True):
goto_statement = "goto __label_%(id)i;" % sub
else:
goto_statement = ""
return """{
%(failure_var)s = %(id)i;
if (!PyErr_Occurred()) {
id = sub["id"]
failure_var = sub["failure_var"]
return f"""{{
{failure_var} = {id};
if (!PyErr_Occurred()) {{
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
%(goto_statement)s}""" % dict(sub, goto_statement=goto_statement)
}}
{goto_statement}}}"""
def failure_code_init(sub):
......@@ -135,17 +136,15 @@ def failure_code_init(sub):
* failure_var -> must contain a variable name to use for
the failure code.
"""
return (
"""{
if (!PyErr_Occurred()) {
id = sub["id"]
return f"""{{
if (!PyErr_Occurred()) {{
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
return %(id)d;
}"""
% sub
)
}}
return {id};
}}"""
def code_gen(blocks):
......@@ -1657,10 +1656,9 @@ class CLinker(Linker):
file=code,
)
print(" assert(PyTuple_Check(argtuple));", file=code)
print(" if (%(n_args)i != PyTuple_Size(argtuple)){ " % locals(), file=code)
print(f" if ({n_args} != PyTuple_Size(argtuple)){{ ", file=code)
print(
' PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected %(n_args)i, got %%i", (int)PyTuple_Size(argtuple));'
% locals(),
f' PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected {n_args}, got %%i", (int)PyTuple_Size(argtuple));',
file=code,
)
print(" return NULL;", file=code)
......
......@@ -513,8 +513,6 @@ class ExternalCOp(COp):
self, node: Apply, name: str, check_input: bool | None = None
) -> tuple[str, str]:
"Construct a pair of C ``#define`` and ``#undef`` code strings."
define_template = "#define %s %s"
undef_template = "#undef %s"
define_macros = []
undef_macros = []
......@@ -535,28 +533,23 @@ class ExternalCOp(COp):
vname = variable_names[i]
macro_items = (f"DTYPE_{vname}", f"npy_{v.type.dtype}")
define_macros.append(define_template % macro_items)
undef_macros.append(undef_template % macro_items[0])
define_macros.append(f"#define DTYPE_{vname} npy_{v.type.dtype}")
undef_macros.append(f"#undef DTYPE_{vname}")
d = np.dtype(v.type.dtype)
macro_items_2 = (f"TYPENUM_{vname}", d.num)
define_macros.append(define_template % macro_items_2)
undef_macros.append(undef_template % macro_items_2[0])
define_macros.append(f"#define TYPENUM_{vname} {d.num}")
undef_macros.append(f"#undef TYPENUM_{vname}")
macro_items_3 = (f"ITEMSIZE_{vname}", d.itemsize)
define_macros.append(define_template % macro_items_3)
undef_macros.append(undef_template % macro_items_3[0])
define_macros.append(f"#define ITEMSIZE_{vname} {d.itemsize}")
undef_macros.append(f"#undef ITEMSIZE_{vname}")
# Generate a macro to mark code as being apply-specific
define_macros.append(define_template % ("APPLY_SPECIFIC(str)", f"str##_{name}"))
undef_macros.append(undef_template % "APPLY_SPECIFIC")
define_macros.append(f"#define APPLY_SPECIFIC(str) str##_{name}")
undef_macros.append("#undef APPLY_SPECIFIC")
define_macros.extend(
define_template % (n, v) for n, v in self.__get_op_params()
)
undef_macros.extend(undef_template % (n,) for n, _ in self.__get_op_params())
define_macros.extend(f"#define {n} {v}" for n, v in self.__get_op_params())
undef_macros.extend(f"#undef {n}" for n, _ in self.__get_op_params())
return "\n".join(define_macros), "\n".join(undef_macros)
......
......@@ -208,29 +208,29 @@ class CDataType(CType[D]):
freefunc = self.freefunc
if freefunc is None:
freefunc = "NULL"
s = """
Py_XDECREF(py_%(name)s);
if (%(name)s == NULL) {
py_%(name)s = Py_None;
Py_INCREF(py_%(name)s);
} else {
py_%(name)s = PyCapsule_New((void *)%(name)s, NULL,
s = f"""
Py_XDECREF(py_{name});
if ({name} == NULL) {{
py_{name} = Py_None;
Py_INCREF(py_{name});
}} else {{
py_{name} = PyCapsule_New((void *){name}, NULL,
_capsule_destructor);
if (py_%(name)s != NULL) {
if (PyCapsule_SetContext(py_%(name)s, (void *)%(freefunc)s) != 0) {
if (py_{name} != NULL) {{
if (PyCapsule_SetContext(py_{name}, (void *){freefunc}) != 0) {{
/* This won't trigger a call to freefunc since it could not be
set. The error case below will do it. */
Py_DECREF(py_%(name)s);
Py_DECREF(py_{name});
/* Signal the error */
py_%(name)s = NULL;
}
}
}"""
py_{name} = NULL;
}}
}}
}}"""
if self.freefunc is not None:
s += """
if (py_%(name)s == NULL) { %(freefunc)s(%(name)s); }
s += f"""
if (py_{name} == NULL) {{ {freefunc}({name}); }}
"""
return s % dict(name=name, freefunc=freefunc)
return s
def c_cleanup(self, name, sub):
# No need to do anything here since the CObject/Capsule will
......
......@@ -36,7 +36,7 @@ for dir in dirs:
if DISPLAY_DUPLICATE_KEYS:
for k, v in keys.items():
if v > 1:
print("Duplicate key (%i copies): %s" % (v, pickle.loads(k)))
print(f"Duplicate key ({v} copies): {pickle.loads(k)}")
# nb seen -> how many keys
nbs_keys = Counter(val for val in keys.values())
......
......@@ -1120,7 +1120,7 @@ class PPrinter(Printer):
i += 1
if output.name is not None or output in outputs:
if output.name is None:
name = "out[%i]" % outputs.index(output)
name = f"out[{outputs.index(output)}]"
else:
name = output.name
# backport
......
......@@ -1827,7 +1827,7 @@ class BatchedDot(COp):
]
z_shape_correct = " && ".join(
"PyArray_DIMS(%s)[%i] == %s" % (_z, i, dim) for i, dim in enumerate(z_dims)
f"PyArray_DIMS({_z})[{i}] == {dim}" for i, dim in enumerate(z_dims)
)
z_shape = ", ".join(z_dims)
z_contiguous = contiguous(_z, z_ndim)
......
......@@ -223,32 +223,31 @@ class Argmax(COp):
{fail}
}}
"""
ret = """
return f"""
int axis;
Py_CLEAR(%(argmax)s);//todo pass them as out parameter.
%(axis_code)s
%(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);
if(%(argmax)s == NULL){
%(fail)s;
}
if(!PyArray_CheckExact(%(argmax)s)){
%(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);
if(%(argmax)s == NULL){
%(fail)s;
}
}
if(PyArray_TYPE(%(argmax)s) != NPY_INT64){
PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);
if (NULL == tmp){
%(fail)s;
}
Py_DECREF(%(argmax)s);
%(argmax)s = (PyArrayObject*)tmp;
}
Py_CLEAR({argmax});//todo pass them as out parameter.
{axis_code}
{argmax} = (PyArrayObject*)PyArray_ArgMax({x}, axis, NULL);
if({argmax} == NULL){{
{fail};
}}
if(!PyArray_CheckExact({argmax})){{
{argmax} = (PyArrayObject*)PyArray_FromAny((PyObject*){argmax}, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);
if({argmax} == NULL){{
{fail};
}}
}}
if(PyArray_TYPE({argmax}) != NPY_INT64){{
PyObject * tmp = PyArray_Cast({argmax}, NPY_INT64);
if (NULL == tmp){{
{fail};
}}
Py_DECREF({argmax});
{argmax} = (PyArrayObject*)tmp;
}}
"""
return ret % locals()
def c_code_cache_version(self):
return (2,)
......@@ -2602,10 +2601,7 @@ class MulWithoutZeros(BinaryScalarOp):
def c_code(self, node, name, inp, out, sub):
x, y = inp
(z,) = out
return (
"%(z)s = ((%(x)s == 0) ? (%(y)s) : "
+ "((%(y)s == 0) ? (%(x)s) : ((%(y)s)*(%(x)s))) );"
) % locals()
return f"{z} = (({x} == 0) ? ({y}) : (({y} == 0) ? ({x}) : (({y})*({x}))) );"
def c_code_cache_version(self):
return (1,)
......
......@@ -3254,7 +3254,7 @@ def simplify_mul(tree):
rval = [neg, s_inputs]
else:
rval = tree
# print 'simplify_mul: %s -> %s' % (tree, rval)
# print(f"simplify_mul: {tree} -> {rval}")
return rval
......
......@@ -243,7 +243,7 @@ class Shape_i(COp):
return ParamsType(i=pytensor.scalar.basic.int64)
def __str__(self):
return "%s{%i}" % (self.__class__.__name__, self.i)
return f"{self.__class__.__name__}{{{self.i}}}"
def make_node(self, x):
if not (isinstance(x, Variable) and hasattr(x.type, "ndim")):
......
......@@ -1073,20 +1073,20 @@ class Subtensor(COp):
def init_entry(entry, depth=0):
if isinstance(entry, np.integer | int):
init_cmds.append("subtensor_spec[%i] = %i;" % (spec_pos(), entry))
init_cmds.append(f"subtensor_spec[{spec_pos()}] = {entry};")
inc_spec_pos(1)
if depth == 0:
is_slice.append(0)
elif isinstance(entry, Type):
init_cmds.append(
"subtensor_spec[%i] = %s;" % (spec_pos(), inputs[input_pos()])
f"subtensor_spec[{spec_pos()}] = {inputs[input_pos()]};"
)
inc_spec_pos(1)
inc_input_pos(1)
if depth == 0:
is_slice.append(0)
elif entry is None:
init_cmds.append("subtensor_spec[%i] = %i;" % (spec_pos(), NONE_CODE))
init_cmds.append(f"subtensor_spec[{spec_pos()}] = {NONE_CODE};")
inc_spec_pos(1)
if depth == 0:
is_slice.append(0)
......
......@@ -902,8 +902,8 @@ class TestPicklefunction:
assert f._check_for_aliased_inputs is g._check_for_aliased_inputs
assert f.name == g.name
assert f.maker.fgraph.name == g.maker.fgraph.name
# print 'f.defaults = %s' % (f.defaults, )
# print 'g.defaults = %s' % (g.defaults, )
# print(f"{f.defaults = }")
# print(f"{g.defaults = }")
for (f_req, f_feed, f_val), (g_req, g_feed, g_val) in zip(
f.defaults, g.defaults
):
......
......@@ -152,28 +152,28 @@ class WeirdBrokenOp(COp):
(a,) = inp
(z,) = out
if "inplace" in self.behaviour:
z_code = """
{Py_XDECREF(%(z)s);}
Py_INCREF(%(a)s);
%(z)s = %(a)s;
z_code = f"""
{{Py_XDECREF({z});}}
Py_INCREF({a});
{z} = {a};
"""
else:
z_code = """
{Py_XDECREF(%(z)s);}
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, PyArray_DIMS(%(a)s), PyArray_DESCR(%(a)s)->type_num);
z_code = f"""
{{Py_XDECREF({z});}}
{z} = (PyArrayObject*) PyArray_SimpleNew(1, PyArray_DIMS({a}), PyArray_DESCR({a})->type_num);
"""
prep_vars = """
prep_vars = f"""
//the output array has size M x N
npy_intp M = PyArray_DIMS(%(a)s)[0];
npy_intp Sa = PyArray_STRIDES(%(a)s)[0] / PyArray_DESCR(%(a)s)->elsize;
npy_intp Sz = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;
npy_intp M = PyArray_DIMS({a})[0];
npy_intp Sa = PyArray_STRIDES({a})[0] / PyArray_DESCR({a})->elsize;
npy_intp Sz = PyArray_STRIDES({z})[0] / PyArray_DESCR({z})->elsize;
npy_double * Da = (npy_double*)PyArray_BYTES(%(a)s);
npy_double * Dz = (npy_double*)PyArray_BYTES(%(z)s);
npy_double * Da = (npy_double*)PyArray_BYTES({a});
npy_double * Dz = (npy_double*)PyArray_BYTES({z});
//clear the output array
for (npy_intp m = 0; m < M; ++m)
{
{{
"""
if self.behaviour == "times2":
......@@ -196,7 +196,7 @@ class WeirdBrokenOp(COp):
}
"""
total = (z_code + prep_vars + behaviour + prep_vars2) % dict(locals(), **sub)
total = z_code + prep_vars + behaviour + prep_vars2
return total
......
......@@ -65,7 +65,7 @@ def test_nnet():
output, cost = nnet.sgd_step(input, target)
mean_cost += cost
mean_cost /= float(len(data))
# print 'Mean cost at epoch %s: %s' % (epoch, mean_cost)
# print(f"Mean cost at epoch {epoch}: {mean_cost}")
# Seed based test
assert abs(mean_cost - 0.2301901) < 1e-6
# Just call functions to make sure they do not crash.
......
......@@ -20,8 +20,8 @@ def test_detect_nan():
if np.isnan(output[0]).any():
print("*** NaN detected ***")
debugprint(node)
print("Inputs : %s" % [input[0] for input in fn.inputs])
print("Outputs: %s" % [output[0] for output in fn.outputs])
print(f"Inputs : {[input[0] for input in fn.inputs]}")
print(f"Outputs: {[output[0] for output in fn.outputs]}")
nan_detected[0] = True
break
......@@ -50,8 +50,8 @@ def test_optimizer():
if np.isnan(output[0]).any():
print("*** NaN detected ***")
debugprint(node)
print("Inputs : %s" % [input[0] for input in fn.inputs])
print("Outputs: %s" % [output[0] for output in fn.outputs])
print(f"Inputs : {[input[0] for input in fn.inputs]}")
print(f"Outputs: {[output[0] for output in fn.outputs]}")
nan_detected[0] = True
break
......@@ -82,8 +82,8 @@ def test_not_inplace():
if np.isnan(output[0]).any():
print("*** NaN detected ***")
debugprint(node)
print("Inputs : %s" % [input[0] for input in fn.inputs])
print("Outputs: %s" % [output[0] for output in fn.outputs])
print(f"Inputs : {[input[0] for input in fn.inputs]}")
print(f"Outputs: {[output[0] for output in fn.outputs]}")
nan_detected[0] = True
break
......
......@@ -25,7 +25,7 @@ class TestProfiling:
config.profile_memory = True
config.profiling__min_peak_memory = True
x = [fvector("val%i" % i) for i in range(3)]
x = [fvector(f"val{i}") for i in range(3)]
z = []
z += [pt.outer(x[i], x[i + 1]).sum(axis=1) for i in range(len(x) - 1)]
......
......@@ -74,7 +74,7 @@ class TestNodeFinder:
assert hasattr(g, "get_nodes")
for type, num in ((add, 3), (sigmoid, 3), (dot, 2)):
if len(list(g.get_nodes(type))) != num:
raise Exception("Expected: %i times %s" % (num, type))
raise Exception(f"Expected: {num} times {type}")
new_e0 = add(y, z)
assert e0.owner in g.get_nodes(dot)
assert new_e0.owner not in g.get_nodes(add)
......@@ -83,7 +83,7 @@ class TestNodeFinder:
assert new_e0.owner in g.get_nodes(add)
for type, num in ((add, 4), (sigmoid, 3), (dot, 1)):
if len(list(g.get_nodes(type))) != num:
raise Exception("Expected: %i times %s" % (num, type))
raise Exception(f"Expected: {num} times {type}")
class TestReplaceValidate:
......
......@@ -177,8 +177,7 @@ class multiple_outputs_numeric_grad:
for i, (a, b) in enumerate(zip(g_pt, self.gx)):
if a.shape != b.shape:
raise ValueError(
"argument element %i has wrong shape %s"
% (i, str((a.shape, b.shape)))
f"argument element {i} has wrong shape {(a.shape, b.shape)}"
)
errs.append(np.max(multiple_outputs_numeric_grad.abs_rel_err(a, b)))
if np.all(np.isfinite(errs)):
......
......@@ -672,10 +672,9 @@ def just_gemm(i, o, ishapes=None, max_graphlen=0, expected_nb_gemm=1):
assert node.op != gemm_inplace, "gemm_inplace in original graph"
graphlen = len(f.maker.fgraph.toposort())
assert not (max_graphlen and (graphlen <= max_graphlen)), "graphlen=%i>%i" % (
graphlen,
max_graphlen,
)
assert not (
max_graphlen and (graphlen <= max_graphlen)
), f"graphlen={graphlen}>{max_graphlen}"
rng = np.random.default_rng(unittest_tools.fetch_seed(234))
r0 = f(*[np.asarray(rng.standard_normal(sh), config.floatX) for sh in ishapes])
......
......@@ -1851,8 +1851,8 @@ class TestBitwise:
class TestAdd:
def test_complex_all_ops(self):
for nbits in (64, 128):
a = shared(np.ones(3, dtype="complex%i" % nbits) + 0.5j)
b = shared(np.ones(3, dtype="complex%i" % nbits) + 1.5j)
a = shared(np.ones(3, dtype=f"complex{nbits}") + 0.5j)
b = shared(np.ones(3, dtype=f"complex{nbits}") + 1.5j)
tests = (
("+", lambda x, y: x + y),
("-", lambda x, y: x - y),
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论