提交 24264dd7 authored 作者: nouiz's avatar nouiz

Merge pull request #799 from dwf/check_none_with_is

Check 'None' with 'is' or 'is not'
......@@ -589,7 +589,7 @@ def debugprint(r, prefix='', depth=-1, done=None, print_type=False,
r_name = getattr(r, 'name', '')
# normally if the name isn't set, it'll be None, so
# r_name == None here
# r_name is None here
if r_name is None:
r_name = ''
......
......@@ -1112,10 +1112,13 @@ class FunctionMaker(object):
self.accept_inplace = accept_inplace
self.function_builder = function_builder
self.required = [(i.value == None) for i in self.inputs]
self.required = [(i.value is None) for i in self.inputs]
self.refeed = [
(i.value != None and not isinstance(i.value, gof.Container) and i.update == None)
for i in self.inputs]
(i.value is not None and
not isinstance(i.value, gof.Container) and
i.update is None)
for i in self.inputs
]
def _check_unused_inputs(self, inputs, outputs, on_unused_input):
if on_unused_input is None:
......
......@@ -421,7 +421,7 @@ class T_module(unittest.TestCase):
m = M.make()
m.y = 77
assert m.f(23) == 100
assert m.x == None
assert m.x is None
m.x = 1000
assert m.g(23) == 977
assert m.y == 77
......
......@@ -249,7 +249,8 @@ class Variable(utils.object2):
- `RandomVariable`
A Variable which is the output of a symbolic computation will have an owner != None.
A Variable which is the output of a symbolic computation will have an owner
not equal to None.
Using the Variables' owner field and the Apply nodes' inputs fields, one can navigate a graph
from an output all the way to the inputs. The opposite direction is not possible until an
......
......@@ -379,7 +379,7 @@ def computeR(W,b,d,H,Rshape = None):
videoWidth = (outputWidth-1) * dc + filterWidth
videoDur = (outputDur-1) * dt + filterDur
if Rshape != None and Rshape[0] != -1:
if Rshape is not None and Rshape[0] != -1:
if Rshape[0] < videoHeight:
print (Rshape[0], videoHeight)
assert False
......
......@@ -59,7 +59,7 @@ def get_mode(use_gpu, check_isfinite=True):
def print_mode(mode):
if mode != None and isinstance(mode, (theano.compile.ProfileMode,)):
if mode is not None and isinstance(mode, (theano.compile.ProfileMode,)):
mode.print_summary()
......
......@@ -62,7 +62,7 @@ class CudaNdarrayType(Type):
"""
def __init__(self, broadcastable, name=None, dtype=None):
if dtype != None and dtype != 'float32':
if dtype is not None and dtype != 'float32':
raise TypeError('%s only supports dtype float32 for now. Tried '
'using dtype %s for variable %s' %
(self.__class__.__name__, dtype, name))
......
......@@ -1293,7 +1293,7 @@ class Scan(PureOp):
## 8. Mask the outputs that are not differentiable
# backwards pass
for i in xrange(len(inner_gfn_outs)):
if inner_gfn_outs[i] == None:
if inner_gfn_outs[i] is None:
inner_gfn_outs[i] = tensor.zeros_like(diff_inputs[i])
## 9. Mask the g_outs that are Nones :
......
......@@ -501,7 +501,7 @@ class ScanSaveMem(gof.Optimizer):
# 2.3.1 extract idx list of subtensor
this_slice = tensor.basic.get_idx_list(cl.inputs,
cl.op.idx_list)
if this_slice == None:
if this_slice is None:
# if unable to extract idx_list
#=> outputs needs all its intermediate values
global_nsteps = None
......@@ -601,7 +601,7 @@ class ScanSaveMem(gof.Optimizer):
else:
this_slice = tensor.basic.get_idx_list(cl.inputs,
cl.op.idx_list)
if this_slice == None:
if this_slice is None:
store_steps[i] = 0
break
......
......@@ -3175,10 +3175,10 @@ class Mean(elemwise.CAReduce):
output[0] = numpy.mean(input, axis=self.axis)
def c_code(self, node, name, inames, onames, sub):
if self.axis != None:
if self.axis is not None:
return super(Op, self).c_code(node, name, inames, onames, sub)
ret = elemwise.CAReduce.c_code(self, node, name, inames, onames, sub)
#TODO: c_code perform support only axis==None
#TODO: c_code perform support only axis is None
return ret + """
*((double *)PyArray_DATA(%s)) /= PyArray_SIZE(%s);
""" % (onames[0], inames[0])
......
......@@ -1312,7 +1312,7 @@ class CAReduce(Op):
fail = sub["fail"]
pattern = [0] * len(node.inputs[0].broadcastable)
axis = self.axis
if axis == None:
if axis is None:
axis = range(len(pattern))
for i in axis:
pattern[i] = 1
......
......@@ -163,7 +163,7 @@ class BinCountOp(theano.Op):
def infer_shape(self, node, ins_shapes):
x = node.inputs[0]
m = basic.max(x) + 1
if self.minlength != None:
if self.minlength is not None:
m = basic.maximum(m, self.minlength)
return [[m]]
......@@ -286,7 +286,7 @@ class RepeatOp(theano.Op):
def make_node(self, x, repeats):
x = basic.as_tensor_variable(x)
repeats = basic.as_tensor_variable(repeats)
if self.axis == None:
if self.axis is None:
out_type = theano.tensor.TensorType(dtype=x.dtype,
broadcastable=[False])
else:
......@@ -326,7 +326,7 @@ class RepeatOp(theano.Op):
repeats = node.inputs[1]
out_shape = list(i0_shapes)
if self.axis == None:
if self.axis is None:
if repeats.ndim == 0:
if len(i0_shapes) == 0:
out_shape = [repeats]
......
......@@ -361,7 +361,7 @@ class ConvOp(Op):
self.dy=dy
self.verbose=verbose
self.version=version
if openmp == None:
if openmp is None:
openmp = config.openmp
self.openmp = openmp
......
......@@ -1648,7 +1648,7 @@ class Prepend_scalar_constant_to_each_row(gof.Op):
mat, = inp
output, = out
new_shape = (mat.shape[0], mat.shape[1] + 1)
if output[0] == None:
if output[0] is None:
output[0] = numpy.empty(new_shape, dtype=mat.dtype)
out = output[0]
else:
......@@ -1665,7 +1665,7 @@ class Prepend_scalar_constant_to_each_row(gof.Op):
def infer_shape(self, node, in_shapes):
shp = (in_shapes[0][0], in_shapes[0][1] + 1)
return [shp]
def grad(self, inp, grads):
mat, = inp
......@@ -1703,7 +1703,7 @@ class Prepend_scalar_to_each_row(gof.Op):
val, mat = inp
output, = out
new_shape = (mat.shape[0], mat.shape[1] + 1)
if output[0] == None:
if output[0] is None:
output[0] = numpy.empty(new_shape, dtype=mat.dtype)
out = output[0]
else:
......
......@@ -454,7 +454,7 @@ def normal(random_state, size=None, avg=0.0, std=1.0, ndim=None, dtype=None):
"""
avg = tensor.as_tensor_variable(avg)
std = tensor.as_tensor_variable(std)
if dtype == None:
if dtype is None:
dtype = tensor.scal.upcast(theano.config.floatX, avg.dtype, std.dtype)
ndim, size, bcast = _infer_ndim_bcast(ndim, size, avg, std)
op = RandomFunction('normal',
......
......@@ -5888,7 +5888,7 @@ def test_transpose():
assert tensor.transpose(x1).name == 'x1.T'
assert tensor.transpose(x2).name == 'x2.T'
assert tensor.transpose(x3).name == 'x3.T'
assert tensor.transpose(tensor.dmatrix()).name == None
assert tensor.transpose(tensor.dmatrix()).name is None
if __name__ == '__main__':
......
......@@ -65,8 +65,8 @@ class TestCGer(TestCase, TestOptimizationMixin):
self.assert_(CGer(False) != Ger(False))
# assert that eq works for non-CGer instances
self.assert_(CGer(False) != None)
self.assert_(CGer(True) != None)
self.assert_(CGer(False) is not None)
self.assert_(CGer(True) is not None)
def test_hash(self):
self.assert_(hash(CGer(True)) == hash(CGer(True)))
......
......@@ -909,7 +909,7 @@ class test_fusion(unittest.TestCase):
continue
print "new cases", id
if shared_fn == None:
if shared_fn is None:
assert gpu==False
f = compile.function(list(sym_inputs), g,mode=mode)
for x in range(nb_repeat):
......@@ -2398,7 +2398,7 @@ class test_assert(utt.InferShapeTester):
def setUp(self):
super(test_assert, self).setUp()
def test0(self):
x=T.scalar()
y=T.scalar()
......@@ -2471,7 +2471,7 @@ class test_assert(utt.InferShapeTester):
self._compile_and_check([admat, adscal, bdscal], [out],
[admat_val, adscal_val, bdscal_val], Assert)
def test_local_mul_specialize():
mode = theano.config.mode
if mode == 'FAST_COMPILE':
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论