提交 f2bf051c authored 作者: Virgile Andreani's avatar Virgile Andreani 提交者: Ricardo Vieira

Fix PERF401: list comprehensions when appropriate

上级 3647c98b
...@@ -756,10 +756,7 @@ def _get_preallocated_maps( ...@@ -756,10 +756,7 @@ def _get_preallocated_maps(
# TODO: Sparse? Scalar does not really make sense. # TODO: Sparse? Scalar does not really make sense.
# Do not preallocate memory for outputs that actually work inplace # Do not preallocate memory for outputs that actually work inplace
considered_outputs = [] considered_outputs = [r for r in node.outputs if r not in inplace_outs]
for r in node.outputs:
if r not in inplace_outs:
considered_outputs.append(r)
# Output storage that was initially present in the storage_map # Output storage that was initially present in the storage_map
if "initial" in prealloc_modes or "ALL" in prealloc_modes: if "initial" in prealloc_modes or "ALL" in prealloc_modes:
......
...@@ -1450,12 +1450,16 @@ class CLinker(Linker): ...@@ -1450,12 +1450,16 @@ class CLinker(Linker):
if props: if props:
version.append(props) version.append(props)
for i in node.inputs: version.extend(
if isinstance(i.type, CLinkerObject): i.type.c_code_cache_version()
version.append(i.type.c_code_cache_version()) for i in node.inputs
for o in node.outputs: if isinstance(i.type, CLinkerObject)
if isinstance(o.type, CLinkerObject): )
version.append(o.type.c_code_cache_version()) version.extend(
o.type.c_code_cache_version()
for o in node.outputs
if isinstance(o.type, CLinkerObject)
)
# add the signature for this node # add the signature for this node
sig.append( sig.append(
......
...@@ -2131,9 +2131,11 @@ class GCC_compiler(Compiler): ...@@ -2131,9 +2131,11 @@ class GCC_compiler(Compiler):
or "-march=native" in line or "-march=native" in line
): ):
continue continue
for reg in ("-march=", "-mtune=", "-target-cpu", "-mabi="): selected_lines.extend(
if reg in line: line.strip()
selected_lines.append(line.strip()) for reg in ("-march=", "-mtune=", "-target-cpu", "-mabi=")
if reg in line
)
lines = list(set(selected_lines)) # to remove duplicate lines = list(set(selected_lines)) # to remove duplicate
return lines return lines
......
...@@ -1270,15 +1270,16 @@ class VMLinker(LocalLinker): ...@@ -1270,15 +1270,16 @@ class VMLinker(LocalLinker):
if self.allow_gc: if self.allow_gc:
post_thunk_clear = [] post_thunk_clear = []
for node in order: for node in order:
clear_after_this_thunk = [] clear_after_this_thunk = [
for input in node.inputs: storage_map[input]
for input in node.inputs
if ( if (
input in computed input in computed
and input not in fgraph.outputs and input not in fgraph.outputs
and node == last_user[input] and node == last_user[input]
and input not in reallocated_vars and input not in reallocated_vars
): )
clear_after_this_thunk.append(storage_map[input]) ]
post_thunk_clear.append(clear_after_this_thunk) post_thunk_clear.append(clear_after_this_thunk)
else: else:
post_thunk_clear = None post_thunk_clear = None
......
...@@ -585,10 +585,11 @@ def assert_shape(x, expected_shape, msg="Unexpected shape."): ...@@ -585,10 +585,11 @@ def assert_shape(x, expected_shape, msg="Unexpected shape."):
if expected_shape is None or not config.conv__assert_shape: if expected_shape is None or not config.conv__assert_shape:
return x return x
shape = x.shape shape = x.shape
tests = [] tests = [
for i in range(x.ndim): pt.eq(shape[i], expected_shape[i])
if expected_shape[i] is not None: for i in range(x.ndim)
tests.append(pt.eq(shape[i], expected_shape[i])) if expected_shape[i] is not None
]
if tests: if tests:
return Assert(msg)(x, *tests) return Assert(msg)(x, *tests)
else: else:
......
...@@ -2107,10 +2107,7 @@ class AdvancedSubtensor1(COp): ...@@ -2107,10 +2107,7 @@ class AdvancedSubtensor1(COp):
out[0] = x.take(i, axis=0, out=o) out[0] = x.take(i, axis=0, out=o)
def connection_pattern(self, node): def connection_pattern(self, node):
rval = [[True]] rval = [[True], *([False] for _ in node.inputs[1:])]
for ipt in node.inputs[1:]:
rval.append([False])
return rval return rval
......
...@@ -1784,13 +1784,14 @@ class TestUsmm: ...@@ -1784,13 +1784,14 @@ class TestUsmm:
) )
== len(topo) - 5 == len(topo) - 5
) )
new_topo = [] new_topo = [
for node in topo: node
for node in topo
if not ( if not (
isinstance(node.op, Elemwise) isinstance(node.op, Elemwise)
and isinstance(node.op.scalar_op, pytensor.scalar.basic.Cast) and isinstance(node.op.scalar_op, pytensor.scalar.basic.Cast)
): )
new_topo.append(node) ]
topo = new_topo topo = new_topo
assert len(topo) == 5, topo assert len(topo) == 5, topo
# Usmm is tested at the same time in debugmode # Usmm is tested at the same time in debugmode
......
...@@ -3477,8 +3477,9 @@ def test_grad_useless_sum(): ...@@ -3477,8 +3477,9 @@ def test_grad_useless_sum():
old_values_eq_approx = staticmethod(TensorType.values_eq_approx) old_values_eq_approx = staticmethod(TensorType.values_eq_approx)
TensorType.values_eq_approx = staticmethod(values_eq_approx_remove_nan) TensorType.values_eq_approx = staticmethod(values_eq_approx_remove_nan)
try: try:
for test_value in test_values: outputs.extend(
outputs.append(f(np.array([test_value]).astype("float32"))) f(np.array([test_value]).astype("float32")) for test_value in test_values
)
finally: finally:
TensorType.values_eq_approx = old_values_eq_approx TensorType.values_eq_approx = old_values_eq_approx
......
...@@ -1143,9 +1143,11 @@ class TestSubtensor(utt.OptimizationTestMixin): ...@@ -1143,9 +1143,11 @@ class TestSubtensor(utt.OptimizationTestMixin):
data = random(4) data = random(4)
data = np.asarray(data, dtype=self.dtype) data = np.asarray(data, dtype=self.dtype)
idxs = [[i] for i in range(data.shape[0])] idxs = [[i] for i in range(data.shape[0])]
for i in range(data.shape[0]): idxs.extend(
for j in range(0, data.shape[0], 2): [i, j, (i + 1) % data.shape[0]]
idxs.append([i, j, (i + 1) % data.shape[0]]) for i in range(data.shape[0])
for j in range(0, data.shape[0], 2)
)
self.grad_list_(idxs, data) self.grad_list_(idxs, data)
data = random(4, 3) data = random(4, 3)
......
...@@ -78,9 +78,7 @@ class TestTypedListType: ...@@ -78,9 +78,7 @@ class TestTypedListType:
myType = TypedListType(TensorType(pytensor.config.floatX, shape=(None, None))) myType = TypedListType(TensorType(pytensor.config.floatX, shape=(None, None)))
x = random_ranged(-1000, 1000, [10, 10]) x = random_ranged(-1000, 1000, [10, 10])
testList = [] testList = [x for _ in range(10000)]
for i in range(10000):
testList.append(x)
assert np.array_equal(myType.filter(testList), testList) assert np.array_equal(myType.filter(testList), testList)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论