提交 265c0d94 authored 作者: Brandon T. Willard's avatar Brandon T. Willard 提交者: Brandon T. Willard

Fix deprecated and unnecessary uses of np.sum

上级 458312ee
......@@ -527,7 +527,7 @@ class ProfileStats:
hs += ["<#apply>"]
es += [" %4d "]
upto_length = np.sum([len(x) for x in hs]) + len(hs)
upto_length = sum(len(x) for x in hs) + len(hs)
maxlen = max(self.line_width - upto_length, 0)
hs += ["<Class name>"]
es += ["%s"]
......@@ -624,7 +624,7 @@ class ProfileStats:
hs += ["<#apply>"]
es += [" %4d "]
upto_length = np.sum([len(x) for x in hs]) + len(hs)
upto_length = sum(len(x) for x in hs) + len(hs)
maxlen = max(self.line_width - upto_length, 0)
hs += ["<Op name>"]
es += ["%s"]
......@@ -703,7 +703,7 @@ class ProfileStats:
if self.variable_shape:
hs += ["<Mflops>", "<Gflops/s>"]
upto_length = np.sum([len(x) for x in hs]) + len(hs)
upto_length = sum(len(x) for x in hs) + len(hs)
maxlen = max(self.line_width - upto_length, 0)
hs += ["<Apply name>"]
es += ["%s"]
......
......@@ -3086,7 +3086,7 @@ class Scan(Op, ScanMethodsMixin, HasInnerGraph):
b = e
e = e + self.n_mit_mot
ib = ie
ie = ie + int(np.sum([len(x) for x in self.tap_array[: self.n_mit_mot]]))
ie = ie + int(sum(len(x) for x in self.tap_array[: self.n_mit_mot]))
clean_eval_points = []
for inp, evp in zip(inputs[b:e], eval_points[b:e]):
if evp is not None:
......@@ -3102,12 +3102,10 @@ class Scan(Op, ScanMethodsMixin, HasInnerGraph):
e = e + self.n_mit_sot
ib = ie
ie = ie + int(
np.sum(
[
len(x)
for x in self.tap_array[
self.n_mit_mot : self.n_mit_mot + self.n_mit_sot
]
sum(
len(x)
for x in self.tap_array[
self.n_mit_mot : self.n_mit_mot + self.n_mit_sot
]
)
)
......@@ -3161,7 +3159,7 @@ class Scan(Op, ScanMethodsMixin, HasInnerGraph):
inner_other = self_inputs[ie:] + inner_eval_points[ib:]
# Outputs
n_mit_mot_outs = int(np.sum([len(x) for x in self.mit_mot_out_slices]))
n_mit_mot_outs = int(sum(len(x) for x in self.mit_mot_out_slices))
b = 0
e = n_mit_mot_outs
......
......@@ -1598,8 +1598,8 @@ class IncSubtensor(COp):
else:
op_is_set = 0
fail = sub["fail"]
view_ndim = node.inputs[0].ndim - np.sum(
[not isinstance(idx, slice) for idx in self.idx_list]
view_ndim = node.inputs[0].ndim - sum(
not isinstance(idx, slice) for idx in self.idx_list
)
copy_of_x = self.copy_of_x(x)
......
......@@ -729,12 +729,11 @@ class TestAlloc:
fgrad = aesara.function([some_vector], grad_derp, mode=self.mode)
topo_obj = fobj.maker.fgraph.toposort()
assert np.sum([isinstance(node.op, type(alloc_)) for node in topo_obj]) == 0
assert sum(isinstance(node.op, type(alloc_)) for node in topo_obj) == 0
topo_grad = fgrad.maker.fgraph.toposort()
assert (
np.sum([isinstance(node.op, type(alloc_)) for node in topo_grad])
== n_alloc
sum(isinstance(node.op, type(alloc_)) for node in topo_grad) == n_alloc
), (alloc_, subtensor, n_alloc, topo_grad)
fobj(test_params)
fgrad(test_params)
......@@ -748,7 +747,7 @@ class TestAlloc:
f = aesara.function([], out, mode=self.mode)
topo = f.maker.fgraph.toposort()
assert np.sum([isinstance(node.op, type(alloc_)) for node in topo]) == 1
assert sum(isinstance(node.op, type(alloc_)) for node in topo) == 1
assert not isinstance(topo[0].op, DeepCopyOp)
def test_ones(self):
......@@ -3610,7 +3609,7 @@ class TestDiag:
f = aesara.function([x], g.shape)
topo = f.maker.fgraph.toposort()
if config.mode != "FAST_COMPILE":
assert np.sum([isinstance(node.op, AllocDiag) for node in topo]) == 0
assert sum(isinstance(node.op, AllocDiag) for node in topo) == 0
for shp in [5, 0, 1]:
m = rng.random(shp).astype(self.floatX)
assert (f(m) == np.diag(m).shape).all()
......@@ -3620,7 +3619,7 @@ class TestDiag:
f = aesara.function([x], g.shape)
topo = f.maker.fgraph.toposort()
if config.mode != "FAST_COMPILE":
assert np.sum([isinstance(node.op, ExtractDiag) for node in topo]) == 0
assert sum(isinstance(node.op, ExtractDiag) for node in topo) == 0
for shp in [(5, 3), (3, 5), (5, 1), (1, 5), (5, 0), (0, 5), (1, 0), (0, 1)]:
m = rng.random(shp).astype(self.floatX)
assert (f(m) == np.diag(m).shape).all()
......
......@@ -1027,8 +1027,8 @@ class TestFusion:
# check that the number of input to the Composite
# Elemwise is ok
if len(set(g.owner.inputs)) == len(g.owner.inputs):
expected_len_sym_inputs = np.sum(
[not isinstance(x, Constant) for x in topo_[0].inputs]
expected_len_sym_inputs = sum(
not isinstance(x, Constant) for x in topo_[0].inputs
)
assert expected_len_sym_inputs == len(sym_inputs)
......
......@@ -862,7 +862,7 @@ def test_upcasting_scalar_nogemm():
f = function([w, v, t, alpha], rval)
t = f.maker.fgraph.toposort()
assert np.sum([isinstance(n.op, Gemm) for n in t]) == 0
assert sum(isinstance(n.op, Gemm) for n in t) == 0
# aesara.printing.debugprint(f, print_type=True)
v = fmatrix("v")
......@@ -875,7 +875,7 @@ def test_upcasting_scalar_nogemm():
f = function([w, v, t, alpha], rval)
t = f.maker.fgraph.toposort()
assert np.sum([isinstance(n.op, Gemm) for n in t]) == 0
assert sum(isinstance(n.op, Gemm) for n in t) == 0
# aesara.printing.debugprint(f, print_type=True)
......
......@@ -1771,8 +1771,8 @@ class TestFusion:
# check that the number of input to the Composite
# Elemwise is ok
if len(set(g.owner.inputs)) == len(g.owner.inputs):
expected_len_sym_inputs = np.sum(
[not isinstance(x, Constant) for x in topo_[0].inputs]
expected_len_sym_inputs = sum(
not isinstance(x, Constant) for x in topo_[0].inputs
)
assert expected_len_sym_inputs == len(sym_inputs)
......
......@@ -788,8 +788,8 @@ class TestSubtensor(utt.OptimizationTestMixin):
topo_ = [node for node in topo if not isinstance(node.op, DeepCopyOp)]
if not self.fast_compile:
assert len(topo_) == 6
assert np.sum([isinstance(node.op, IncSubtensor) for node in topo_]) == 1
assert np.sum([isinstance(node.op, Subtensor) for node in topo_]) == 1
assert sum(isinstance(node.op, IncSubtensor) for node in topo_) == 1
assert sum(isinstance(node.op, Subtensor) for node in topo_) == 1
gval = f()
good = np.zeros_like(data)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论