提交 47fc8d35 authored 作者: Brandon T. Willard's avatar Brandon T. Willard 提交者: Brandon T. Willard

Replace not `... == ...` with `... != ...`

上级 fbc28965
......@@ -1128,7 +1128,7 @@ def _pickle_Function(f):
# HACK to detect aliased storage.
# This is here because aliased relationships are not [currently]
# preserved across the pickle operation
if not (f.pickle_aliased_memory_strategy == "ignore"):
if f.pickle_aliased_memory_strategy != "ignore":
all_data = input_storage + inputs_data
for i, d_i in enumerate(all_data):
for j, d_j in enumerate(all_data):
......
......@@ -79,7 +79,7 @@ class SymbolicInput:
raise TypeError(f"name must be a string! (got: {self.name})")
self.update = update
if update is not None:
if not variable.type == update.type:
if variable.type != update.type:
raise TypeError(
f"Variable '{variable}' has type {variable.type} but an update of "
f"type {update.type}. The type of the update should be "
......
......@@ -1240,7 +1240,7 @@ def add_multiprocessing_configvars():
f"The environment variable OMP_NUM_THREADS should be a number, got '{var}'."
)
else:
default_openmp = not int(var) == 1
default_openmp = int(var) != 1
else:
# Check the number of cores availables.
count = os.cpu_count()
......
......@@ -82,7 +82,7 @@ def d3viz(fct, outfile, copy_deps=True, *args, **kwargs):
# Create output directory if not existing
outdir = os.path.dirname(outfile)
if not outdir == "" and not os.path.exists(outdir):
if outdir != "" and not os.path.exists(outdir):
os.makedirs(outdir)
# Read template HTML file
......
......@@ -248,7 +248,7 @@ def dnn_available(context_name):
ctx = get_context(context_name)
if not ctx.kind == b"cuda":
if ctx.kind != b"cuda":
dnn_available.msg = "Not on a CUDA device."
return False
......@@ -719,7 +719,7 @@ def ensure_dt(val, default, name, dtype):
val = as_scalar(val)
if not isinstance(val.type, aesara.scalar.Scalar):
raise TypeError(f"{name}: expected a scalar value")
if not val.type.dtype == dtype:
if val.type.dtype != dtype:
val = val.astype(dtype)
return val
......
......@@ -258,7 +258,7 @@ class GpuAveragePoolGrad(CGpuKernelBase):
if pad is None:
pad = (0,) * nd
elif isinstance(pad, (tuple, list)):
if max(pad) != 0 and not self.mode == "average_exc_pad":
if max(pad) != 0 and self.mode != "average_exc_pad":
raise ValueError("Padding must be zero for average_exc_pad")
ws = as_tensor_variable(ws)
stride = as_tensor_variable(stride)
......
......@@ -249,7 +249,7 @@ class Apply(Node):
remake_node = False
new_inputs = inputs[:]
for i, (curr, new) in enumerate(zip(self.inputs, new_inputs)):
if not curr.type == new.type:
if curr.type != new.type:
if strict:
# If compatible, casts new into curr.type
new_inputs[i] = curr.type.filter_variable(new)
......
......@@ -450,7 +450,7 @@ class FunctionGraph(MetaObject):
# TODO: ERROR HANDLING FOR LISTENERS (should it complete the change or revert it?)
if node == "output":
r = self.outputs[i]
if not r.type == new_var.type:
if r.type != new_var.type:
raise TypeError(
f"The type of the replacement ({new_var.type}) must be the"
f" same as the type of the original Variable ({r.type})."
......@@ -458,7 +458,7 @@ class FunctionGraph(MetaObject):
self.outputs[i] = new_var
else:
r = node.inputs[i]
if not r.type == new_var.type:
if r.type != new_var.type:
raise TypeError(
f"The type of the replacement ({new_var.type}) must be the"
f" same as the type of the original Variable ({r.type})."
......
......@@ -2832,7 +2832,7 @@ def _check_chain(r, chain):
elif r.owner is None:
return False
elif isinstance(elem, Op):
if not r.owner.op == elem:
if r.owner.op != elem:
return False
else:
try:
......
......@@ -565,7 +565,7 @@ class WrapLinker(Linker):
order_list0 = order_lists[0]
for order_list in order_lists[1:]:
if not order_list0 == order_list:
if order_list0 != order_list:
raise Exception(
"All linkers to WrapLinker should execute operations in the same order."
)
......
......@@ -761,7 +761,7 @@ def scan(
if isinstance(inner_out.type, TensorType) and return_steps.get(pos, 0) != 1:
outputs[pos] = at.unbroadcast(shape_padleft(inner_out), 0)
if return_list is not True and len(outputs) == 1:
if not return_list and len(outputs) == 1:
outputs = outputs[0]
return (outputs, updates)
......@@ -1136,7 +1136,7 @@ def scan(
# refers to update rule of index -1 - `pos`.
update_map[sit_sot_shared[abs(pos) - 1]] = _scan_out_list[idx][-1]
scan_out_list = [x for x in scan_out_list if x is not None]
if return_list is not True and len(scan_out_list) == 1:
if not return_list and len(scan_out_list) == 1:
scan_out_list = scan_out_list[0]
elif len(scan_out_list) == 0:
scan_out_list = None
......
......@@ -1390,7 +1390,7 @@ class ShapeFeature(features.Feature):
return False
opx = dx.owner.op
opy = dy.owner.op
if not (opx.i == opy.i):
if opx.i != opy.i:
return False
# FB I'm not sure if this handle correctly constants.
if dx.owner.inputs[0] == dy.owner.inputs[0]:
......@@ -1633,7 +1633,7 @@ def local_fill_sink(fgraph, node):
if (
hasattr(client, "op")
and isinstance(client.op, Elemwise)
and not client.op == fill
and client.op != fill
):
client_inputs = client.inputs[:]
client_inputs[cl_idx] = c
......
......@@ -187,7 +187,7 @@ class DimShuffle(ExternalCOp):
def make_node(self, _input):
input = as_tensor_variable(_input)
ib = tuple(input.type.broadcastable)
if not ib == self.input_broadcastable:
if ib != self.input_broadcastable:
if len(ib) != len(self.input_broadcastable):
raise TypeError(
"The number of dimensions of the "
......
......@@ -1021,7 +1021,7 @@ class AlgebraicCanonizer(LocalOptimizer):
assert (new.type == out.type) == (not (new.type != out.type))
if not (new.type == out.type):
if new.type != out.type:
new = fill_chain(new, node.inputs)[0]
if new.type == out.type:
......@@ -1138,7 +1138,7 @@ def local_sum_prod_mul_by_scalar(fgraph, node):
new_op_input_nb_elements = new_op_input.size
new_op_output = node.op(new_op_input)
if not len(non_scalars) == 0:
if len(non_scalars) != 0:
# Copy over stacktrace from previous output to new mul op,
# for same reason as above.
copy_stack_trace(node.outputs, new_op_output)
......@@ -2519,7 +2519,7 @@ def local_greedy_distributor(fgraph, node):
rval = local_mul_canonizer.merge_num_denum(new_num, new_denum)
if not (rval.type == out.type):
if rval.type != out.type:
# WHY DOES THIS HAPPEN?
return False
......
......@@ -3300,7 +3300,7 @@ class AbstractConv_gradInputs(BaseAbstractConv):
expected_topgrad_shape = get_conv_output_shape(
imshp, kern.shape, self.border_mode, self.subsample, self.filter_dilation
)
if not tuple(expected_topgrad_shape) == tuple(topgrad.shape):
if tuple(expected_topgrad_shape) != tuple(topgrad.shape):
raise ValueError(
"invalid input_shape for gradInputs: the given input_shape "
"would produce an output of shape {}, but the given topgrad "
......
......@@ -2527,7 +2527,7 @@ class Prepend_scalar_constant_to_each_row(Op):
def make_node(self, mat):
# check type of input
x = at.as_tensor_variable(mat)
if not mat.type.broadcastable == (False, False):
if mat.type.broadcastable != (False, False):
raise TypeError("Expected a matrix as input")
y = at.as_tensor_variable(self.val)
assert y.ndim == 0
......@@ -2574,7 +2574,7 @@ class Prepend_scalar_to_each_row(Op):
x = at.as_tensor_variable(mat)
if isinstance(val, float):
val = aes.constant(val)
if not mat.type.broadcastable == (False, False):
if mat.type.broadcastable != (False, False):
raise TypeError("Expected a matrix as input")
y = at.as_tensor_variable(val)
assert y.ndim == 0
......
......@@ -1085,7 +1085,7 @@ class ConvOp(OpenMPOp):
# Determine gradient on inputs ########
mode = "valid"
if not self.out_mode == "full":
if self.out_mode != "full":
mode = "full"
filters = kerns.dimshuffle((1, 0, 2, 3))
......@@ -1416,7 +1416,7 @@ if(%(value)s != %(expected)s){
d["self_imshp_logical_stride_c"] = int(
np.ceil(self.imshp_logical[2] / float(self.imshp[2]))
)
if not self.imshp[0] == 1:
if self.imshp[0] != 1:
d["affectation"] = "+="
d["all_shape"] = "1"
d["dim_zz_const"] = "const"
......@@ -1473,7 +1473,7 @@ if(kerns_dim[1] != img2d_dim[1]){
f"Type {node.inputs[0].type.dtype} not implemented"
)
d["gemm"] = "dgemm_"
if not d["type"] == "double":
if d["type"] != "double":
d["gemm"] = "sgemm_"
if self.imshp != self.imshp_logical or self.kshp != self.kshp_logical:
......
......@@ -448,7 +448,7 @@ def local_conv2d_gradinputs_cpu(fgraph, node):
return None
mode = "valid"
if not node.op.border_mode == "full":
if node.op.border_mode != "full":
mode = "full"
filters = kern.dimshuffle((1, 0, 2, 3))
filters = filters[:, :, ::-1, ::-1]
......
......@@ -413,7 +413,7 @@ class SpecifyShape(COp):
raise AssertionError(
f"SpecifyShape: Got {x.ndim} dimensions (shape {x.shape}), expected {ndim} dimensions with shape {tuple(shape)}."
)
if not np.all(x.shape == shape):
if x.shape != tuple(shape):
raise AssertionError(
f"SpecifyShape: Got shape {x.shape}, expected {tuple(shape)}."
)
......
......@@ -75,7 +75,7 @@ class TypedListType(CType):
return 0
def values_eq(self, a, b):
if not len(a) == len(b):
if len(a) != len(b):
return False
for x in range(len(a)):
......
......@@ -74,7 +74,7 @@ class TestNodeFinder:
assert hasattr(g, "get_nodes")
for type, num in ((add, 3), (sigmoid, 3), (dot, 2)):
if not len([t for t in g.get_nodes(type)]) == num:
if len([t for t in g.get_nodes(type)]) != num:
raise Exception("Expected: %i times %s" % (num, type))
new_e0 = add(y, z)
assert e0.owner in g.get_nodes(dot)
......@@ -83,7 +83,7 @@ class TestNodeFinder:
assert e0.owner not in g.get_nodes(dot)
assert new_e0.owner in g.get_nodes(add)
for type, num in ((add, 4), (sigmoid, 3), (dot, 1)):
if not len([t for t in g.get_nodes(type)]) == num:
if len([t for t in g.get_nodes(type)]) != num:
raise Exception("Expected: %i times %s" % (num, type))
......
......@@ -1740,7 +1740,7 @@ class TestFusion:
topo = f.maker.fgraph.toposort()
topo_ = [n for n in topo if not isinstance(n.op, self.topo_exclude)]
if assert_len_topo:
if not len(topo_) == nb_elemwise:
if len(topo_) != nb_elemwise:
fail3.append((id, topo_, nb_elemwise))
if nb_elemwise == 1:
# if no variable appears multiple times in the
......@@ -1753,7 +1753,7 @@ class TestFusion:
)
assert expected_len_sym_inputs == len(sym_inputs)
if not out_dtype == out.dtype:
if out_dtype != out.dtype:
fail4.append((id, out_dtype, out.dtype))
assert len(fail1 + fail2 + fail3 + fail4) == 0
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论