提交 b69d2aae authored 作者: Frederic Bastien's avatar Frederic Bastien

white space fix.

上级 49e67887
...@@ -155,7 +155,7 @@ class T_AddMul(unittest.TestCase): ...@@ -155,7 +155,7 @@ class T_AddMul(unittest.TestCase):
elif op is mul: elif op is mul:
self.failUnless(_is_sparse_variable(apb)) self.failUnless(_is_sparse_variable(apb))
self.failUnless(numpy.all(val.todense() == (b.multiply(a)))) self.failUnless(numpy.all(val.todense() == (b.multiply(a))))
self.failUnless(numpy.all(val.todense() == numpy.array([[1, 0], self.failUnless(numpy.all(val.todense() == numpy.array([[1, 0],
[9, 0], [0, 36]]))) [9, 0], [0, 36]])))
def _testDS(self, op, array1 = numpy.array([[1., 0], [3, 0], [0, 6]]), def _testDS(self, op, array1 = numpy.array([[1., 0], [3, 0], [0, 6]]),
...@@ -187,7 +187,7 @@ class T_AddMul(unittest.TestCase): ...@@ -187,7 +187,7 @@ class T_AddMul(unittest.TestCase):
elif op is mul: elif op is mul:
self.failUnless(_is_sparse_variable(apb)) self.failUnless(_is_sparse_variable(apb))
self.failUnless(numpy.all(val.todense() == (a.multiply(b)))) self.failUnless(numpy.all(val.todense() == (a.multiply(b))))
self.failUnless(numpy.all(val.todense() == numpy.array([[1, 0], self.failUnless(numpy.all(val.todense() == numpy.array([[1, 0],
[9, 0], [0, 36]]))) [9, 0], [0, 36]])))
...@@ -244,7 +244,7 @@ class test_structureddot(unittest.TestCase): ...@@ -244,7 +244,7 @@ class test_structureddot(unittest.TestCase):
assert rval.type.dtype == 'float32' assert rval.type.dtype == 'float32'
return rval return rval
utt.verify_grad(buildgraphCSC, utt.verify_grad(buildgraphCSC,
[spmat.data, mat]) [spmat.data, mat])
def test_structureddot_csr_grad(self): def test_structureddot_csr_grad(self):
...@@ -264,7 +264,7 @@ class test_structureddot(unittest.TestCase): ...@@ -264,7 +264,7 @@ class test_structureddot(unittest.TestCase):
assert rval.type.dtype == 'float64' assert rval.type.dtype == 'float64'
return rval return rval
utt.verify_grad(buildgraph, utt.verify_grad(buildgraph,
[spmat.data, mat]) [spmat.data, mat])
def test_upcast(self): def test_upcast(self):
...@@ -307,7 +307,7 @@ class test_structureddot(unittest.TestCase): ...@@ -307,7 +307,7 @@ class test_structureddot(unittest.TestCase):
# Test that a graph involving structured_dot(assembled_csc_matrix) is optimized to be # Test that a graph involving structured_dot(assembled_csc_matrix) is optimized to be
# just a structured_dot_csc Op and no assembly of a csc_matrix. # just a structured_dot_csc Op and no assembly of a csc_matrix.
# #
# The optimization from structured_dot -> structured_dot_csc is currently disabled, # The optimization from structured_dot -> structured_dot_csc is currently disabled,
# So this test is not expected to pass # So this test is not expected to pass
return return
...@@ -320,7 +320,7 @@ class test_structureddot(unittest.TestCase): ...@@ -320,7 +320,7 @@ class test_structureddot(unittest.TestCase):
y = numpy.floor(numpy.random.rand()*spmat.shape[1]) y = numpy.floor(numpy.random.rand()*spmat.shape[1])
spmat[x,y] = numpy.random.rand()*10 spmat[x,y] = numpy.random.rand()*10
spmat = sp.csc_matrix(spmat) spmat = sp.csc_matrix(spmat)
images = tensor.Tensor(dtype='float32', broadcastable=[False, False])('images') images = tensor.Tensor(dtype='float32', broadcastable=[False, False])('images')
cscmat = CSC(kerns, spmat.indices[:spmat.size], spmat.indptr, spmat.shape) cscmat = CSC(kerns, spmat.indices[:spmat.size], spmat.indptr, spmat.shape)
...@@ -364,7 +364,7 @@ class test_structureddot(unittest.TestCase): ...@@ -364,7 +364,7 @@ class test_structureddot(unittest.TestCase):
#print f.maker.env.toposort() #print f.maker.env.toposort()
for M,N,K,nnz in [(4,3,2,3), for M,N,K,nnz in [(4,3,2,3),
(40,30,20,3), (40,30,20,3),
(40,30,20,30), (40,30,20,30),
(400,3000,200,6000), (400,3000,200,6000),
...@@ -417,7 +417,7 @@ class test_structureddot(unittest.TestCase): ...@@ -417,7 +417,7 @@ class test_structureddot(unittest.TestCase):
print f.maker.env.toposort() print f.maker.env.toposort()
for M,N,K,nnz in [(4,3,2,3), for M,N,K,nnz in [(4,3,2,3),
(40,30,20,3), (40,30,20,3),
(40,30,20,30), (40,30,20,30),
(400,3000,200,6000), (400,3000,200,6000),
......
...@@ -398,7 +398,7 @@ class TensorType(Type): ...@@ -398,7 +398,7 @@ class TensorType(Type):
""" """
self.dtype = str(dtype) self.dtype = str(dtype)
if self.dtype=='floatX': if self.dtype=='floatX':
self.dtype=config.floatX self.dtype=config.floatX
### broadcastable is immutable, and all elements are either True or False ### broadcastable is immutable, and all elements are either True or False
self.broadcastable = tuple(bool(b) for b in broadcastable) self.broadcastable = tuple(bool(b) for b in broadcastable)
self.dtype_specs() # error checking is done there self.dtype_specs() # error checking is done there
...@@ -676,7 +676,7 @@ class TensorType(Type): ...@@ -676,7 +676,7 @@ class TensorType(Type):
if any(b): if any(b):
bcast = str(b) bcast = str(b)
else: else:
bcast = '%iD' % len(b) bcast = '%iD' % len(b)
return "TensorType(%s, %s)" % (str(self.dtype), bcast) return "TensorType(%s, %s)" % (str(self.dtype), bcast)
def __repr__(self): def __repr__(self):
...@@ -1291,9 +1291,9 @@ def _scal_elemwise(symbol): ...@@ -1291,9 +1291,9 @@ def _scal_elemwise(symbol):
symbolname = symbol.__name__ symbolname = symbol.__name__
inplace = symbolname.endswith('_inplace') inplace = symbolname.endswith('_inplace')
if inplace: if inplace:
msg = "inplace" msg = "inplace"
else: else:
msg = "no_inplace" msg = "no_inplace"
n="Elemwise{%s,%s}"%(symbolname,msg) n="Elemwise{%s,%s}"%(symbolname,msg)
if inplace: if inplace:
...@@ -1507,7 +1507,7 @@ class MaxAndArgmax(Op): ...@@ -1507,7 +1507,7 @@ class MaxAndArgmax(Op):
for id,a in enumerate(axis): for id,a in enumerate(axis):
if not isinstance(a, TensorVariable) and a<0: if not isinstance(a, TensorVariable) and a<0:
if -a>x.type.ndim: if -a>x.type.ndim:
raise ValueError('axis out of range') raise ValueError('axis out of range')
axis[id]=x.type.ndim+a axis[id]=x.type.ndim+a
axis = _as_tensor_variable(axis) axis = _as_tensor_variable(axis)
inputs = [x, axis] inputs = [x, axis]
...@@ -1540,18 +1540,18 @@ class MaxAndArgmax(Op): ...@@ -1540,18 +1540,18 @@ class MaxAndArgmax(Op):
if not ( axis.data == 0 or axis.data == x.ndim-1): if not ( axis.data == 0 or axis.data == x.ndim-1):
raise NotImplementedError('MaxAndArgmax gradient with axis corresponding to internal dimension') raise NotImplementedError('MaxAndArgmax gradient with axis corresponding to internal dimension')
if axis.data==0: if axis.data==0:
g_max_pad = shape_padleft(g_max) g_max_pad = shape_padleft(g_max)
else: else:
g_max_pad = shape_padright(g_max) g_max_pad = shape_padright(g_max)
xmax = max(x, axis) xmax = max(x, axis)
if axis.data==0: if axis.data==0:
xmax_pad = shape_padleft(xmax) xmax_pad = shape_padleft(xmax)
else: else:
xmax_pad = shape_padright(xmax) xmax_pad = shape_padright(xmax)
g_x = eq(xmax_pad, x) * g_max_pad g_x = eq(xmax_pad, x) * g_max_pad
return g_x, None return g_x, None
def __str__(self): def __str__(self):
return self.__class__.__name__ return self.__class__.__name__
_max_and_argmax = MaxAndArgmax() _max_and_argmax = MaxAndArgmax()
@_redefine_asRoutine(_max_and_argmax) @_redefine_asRoutine(_max_and_argmax)
...@@ -1579,12 +1579,12 @@ def max(x, axis='DEFAULT'): ...@@ -1579,12 +1579,12 @@ def max(x, axis='DEFAULT'):
axis = x.type.ndim - 1 axis = x.type.ndim - 1
warnings.warn("The behavior of max when axis==None will change! Now we return the max over the last dimensions. It will change to the max over all dimensions as numpy. To hide this warning and be compatible with the future behavior, set axis to -1 to have the current behavior. To have the futur behavior set axis to range(nb dim), but this don't support the grad. To have the grad, you must flatten the tensor before calling max().") warnings.warn("The behavior of max when axis==None will change! Now we return the max over the last dimensions. It will change to the max over all dimensions as numpy. To hide this warning and be compatible with the future behavior, set axis to -1 to have the current behavior. To have the futur behavior set axis to range(nb dim), but this don't support the grad. To have the grad, you must flatten the tensor before calling max().")
if isinstance(axis,(list,tuple)) and len(axis)>1: if isinstance(axis,(list,tuple)) and len(axis)>1:
return CAReduce(scal.maximum,axis)(x) return CAReduce(scal.maximum,axis)(x)
try: try:
const = get_constant_value(axis) const = get_constant_value(axis)
return CAReduce(scal.maximum,list(const))(x) return CAReduce(scal.maximum,list(const))(x)
except: except:
return max_and_argmax(x,axis)[0] return max_and_argmax(x,axis)[0]
@constructor @constructor
def argmax(x, axis='DEFAULT'): def argmax(x, axis='DEFAULT'):
...@@ -2086,16 +2086,16 @@ class Mean(elemwise.CAReduce): ...@@ -2086,16 +2086,16 @@ class Mean(elemwise.CAReduce):
return 'float64' return 'float64'
def perform(self, node, (input, ), (output, )): def perform(self, node, (input, ), (output, )):
output[0]=numpy.mean(input,axis=self.axis) output[0]=numpy.mean(input,axis=self.axis)
def c_code(self, node, name, inames, onames, sub): def c_code(self, node, name, inames, onames, sub):
if self.axis!=None: if self.axis!=None:
return super(Op, self).c_code(node, name, inames, onames, sub) return super(Op, self).c_code(node, name, inames, onames, sub)
ret = elemwise.CAReduce.c_code(self, node, name, inames, onames, sub) ret = elemwise.CAReduce.c_code(self, node, name, inames, onames, sub)
#TODO: c_code perform support only axis==None #TODO: c_code perform support only axis==None
return ret + """ return ret + """
*((double *)PyArray_DATA(%s)) /= PyArray_SIZE(%s); *((double *)PyArray_DATA(%s)) /= PyArray_SIZE(%s);
"""%(onames[0],inames[0]) """%(onames[0],inames[0])
#TODO: implement the grad. When done and tested, you can make this the default version. #TODO: implement the grad. When done and tested, you can make this the default version.
# def grad(self, (x,), (gout,)): # def grad(self, (x,), (gout,)):
...@@ -2114,11 +2114,11 @@ def mean(input, axis = None, op = False): ...@@ -2114,11 +2114,11 @@ def mean(input, axis = None, op = False):
mean, everything will be done on the gpu. mean, everything will be done on the gpu.
""" """
if op: if op:
return Mean(axis)(input) return Mean(axis)(input)
if str(input.dtype).startswith('int'): if str(input.dtype).startswith('int'):
# we need to cast eventually anyway, and this helps # we need to cast eventually anyway, and this helps
# to prevents overflow # to prevents overflow
input = cast(input, 'float64') input = cast(input, 'float64')
s = sum(input, axis) s = sum(input, axis)
shp = shape(input) shp = shape(input)
...@@ -2183,10 +2183,10 @@ if 0: ...@@ -2183,10 +2183,10 @@ if 0:
assert axis.type == iscalar assert axis.type == iscalar
broadcastable = [] broadcastable = []
for i,x in enumerate(input.broadcastable): for i,x in enumerate(input.broadcastable):
if i==axis: if i==axis:
broadcastable += [False] broadcastable += [False]
else: else:
broadcastable += [x] broadcastable += [x]
type = TensorType(dtype = input.type.dtype, broadcastable = \ type = TensorType(dtype = input.type.dtype, broadcastable = \
broadcastable) broadcastable)
...@@ -2360,46 +2360,46 @@ class Subtensor(Op): ...@@ -2360,46 +2360,46 @@ class Subtensor(Op):
@staticmethod @staticmethod
def convert(entry, slice_ok=True): def convert(entry, slice_ok=True):
scal_types = [scal.int64, scal.int32, scal.int16, scal.int8] scal_types = [scal.int64, scal.int32, scal.int16, scal.int8]
tensor_types = [bscalar, iscalar, lscalar] tensor_types = [bscalar, iscalar, lscalar]
if isinstance(entry, gof.Variable) and entry.type in scal_types: if isinstance(entry, gof.Variable) and entry.type in scal_types:
return entry.type return entry.type
elif isinstance(entry, gof.Type) and entry in scal_types: elif isinstance(entry, gof.Type) and entry in scal_types:
return entry return entry
if isinstance(entry, gof.Variable) and entry.type in tensor_types and numpy.all(entry.type.broadcastable): if isinstance(entry, gof.Variable) and entry.type in tensor_types and numpy.all(entry.type.broadcastable):
return scal.Scalar(entry.type.dtype) return scal.Scalar(entry.type.dtype)
elif isinstance(entry, gof.Type) and entry in tensor_types and numpy.all(entry.broadcastable): elif isinstance(entry, gof.Type) and entry in tensor_types and numpy.all(entry.broadcastable):
return scal.Scalar(entry.dtype) return scal.Scalar(entry.dtype)
elif slice_ok and isinstance(entry, slice): elif slice_ok and isinstance(entry, slice):
a = entry.start a = entry.start
b = entry.stop b = entry.stop
c = entry.step c = entry.step
if a is not None: if a is not None:
slice_a = Subtensor.convert(a, False) slice_a = Subtensor.convert(a, False)
else: else:
slice_a = None slice_a = None
if b is not None: if b is not None:
slice_b = Subtensor.convert(b, False) slice_b = Subtensor.convert(b, False)
else: else:
slice_b = None slice_b = None
if c is not None: if c is not None:
slice_c = Subtensor.convert(c, False) slice_c = Subtensor.convert(c, False)
else: else:
slice_c = None slice_c = None
return slice(slice_a,slice_b,slice_c) return slice(slice_a,slice_b,slice_c)
#backport #backport
#return slice(Subtensor.convert(a, False) if a is not None else None, #return slice(Subtensor.convert(a, False) if a is not None else None,
# Subtensor.convert(b, False) if b is not None else None, # Subtensor.convert(b, False) if b is not None else None,
# Subtensor.convert(c, False) if c is not None else None) # Subtensor.convert(c, False) if c is not None else None)
elif isinstance(entry, int): elif isinstance(entry, int):
return entry return entry
else: else:
raise TypeError(Subtensor.e_indextype, entry) raise TypeError(Subtensor.e_indextype, entry)
def __init__(self, idx_list): def __init__(self, idx_list):
self.idx_list = map(self.convert, idx_list) self.idx_list = map(self.convert, idx_list)
...@@ -2493,7 +2493,7 @@ class Subtensor(Op): ...@@ -2493,7 +2493,7 @@ class Subtensor(Op):
if (idx.start is None or idx.start == 0)\ if (idx.start is None or idx.start == 0)\
and (idx.stop is None or idx.stop == sys.maxint)\ and (idx.stop is None or idx.stop == sys.maxint)\
and (idx.step is None or idx.step == 1): and (idx.step is None or idx.step == 1):
outshp.append(xl) outshp.append(xl)
else: else:
# Not implemented yet # Not implemented yet
outshp.append(shape_i(i)(node.outputs[0])) outshp.append(shape_i(i)(node.outputs[0]))
...@@ -2517,10 +2517,10 @@ class Subtensor(Op): ...@@ -2517,10 +2517,10 @@ class Subtensor(Op):
#TODO: optimize by cache this hash value #TODO: optimize by cache this hash value
msg = [] msg = []
for entry in self.idx_list: for entry in self.idx_list:
if isinstance(entry, slice): if isinstance(entry, slice):
msg += [(entry.start, entry.stop, entry.step)] msg += [(entry.start, entry.stop, entry.step)]
else: else:
msg += [entry] msg += [entry]
idx_list = tuple(msg) idx_list = tuple(msg)
#backport #backport
...@@ -2568,19 +2568,19 @@ class SubtensorPrinter: ...@@ -2568,19 +2568,19 @@ class SubtensorPrinter:
sidxs.append(inbrack_pstate.pprinter.process(inputs.pop())) sidxs.append(inbrack_pstate.pprinter.process(inputs.pop()))
elif isinstance(entry, slice): elif isinstance(entry, slice):
if entry.start is None or entry.start==0: if entry.start is None or entry.start==0:
msg1 = "" msg1 = ""
else: else:
msg1 = entry.start msg1 = entry.start
if entry.stop is None or entry.stop == sys.maxint: if entry.stop is None or entry.stop == sys.maxint:
msg2 = "" msg2 = ""
else: else:
msg2 = entry.stop msg2 = entry.stop
if entry.step is None: if entry.step is None:
msg3 = "" msg3 = ""
else: else:
msg3 = ":%s" % entry.step msg3 = ":%s" % entry.step
sidxs.append("%s:%s%s" % (msg1, msg2, msg3)) sidxs.append("%s:%s%s" % (msg1, msg2, msg3))
#backport #backport
...@@ -2666,10 +2666,10 @@ class IncSubtensor(Op): ...@@ -2666,10 +2666,10 @@ class IncSubtensor(Op):
def __hash__(self): def __hash__(self):
msg = [] msg = []
for entry in self.idx_list: for entry in self.idx_list:
if isinstance(entry, slice): if isinstance(entry, slice):
msg += [(entry.start, entry.stop, entry.step)] msg += [(entry.start, entry.stop, entry.step)]
else: else:
msg += [entry] msg += [entry]
idx_list = tuple(msg) idx_list = tuple(msg)
#backport #backport
...@@ -2848,7 +2848,7 @@ class Split(Op): ...@@ -2848,7 +2848,7 @@ class Split(Op):
"""WRITEME""" """WRITEME"""
#in python 2.4, x.shape[numpy.asarray(1)] don't work. #in python 2.4, x.shape[numpy.asarray(1)] don't work.
if sys.version_info[0:2]==(2, 4) and axis.size==1: if sys.version_info[0:2]==(2, 4) and axis.size==1:
axis=int(axis) axis=int(axis)
try: try:
len_along_axis = x.shape[axis] len_along_axis = x.shape[axis]
...@@ -3032,8 +3032,8 @@ class Join(Op): ...@@ -3032,8 +3032,8 @@ class Join(Op):
# for the output. # for the output.
for x in as_tensor_variable_args: for x in as_tensor_variable_args:
for current_axis, bflag in enumerate(x.type.broadcastable): for current_axis, bflag in enumerate(x.type.broadcastable):
# Not sure if this Op supports/supported/will support # Not sure if this Op supports/supported/will support
# negative indices, but just to be sure... # negative indices, but just to be sure...
if current_axis == axis % ndim: if current_axis == axis % ndim:
continue continue
if bflag: if bflag:
...@@ -3103,9 +3103,9 @@ class Join(Op): ...@@ -3103,9 +3103,9 @@ class Join(Op):
if node.ndim != 1: if node.ndim != 1:
raise TypeError('argument must be symbolic vector') raise TypeError('argument must be symbolic vector')
if node.owner.tag.shape_zero is None: if node.owner.tag.shape_zero is None:
raise ValueError("could not determine vector length") raise ValueError("could not determine vector length")
else: else:
return node.owner.tag.shape_zero return node.owner.tag.shape_zero
@_redefine_asRoutine(Join()) @_redefine_asRoutine(Join())
def join(axis, *tensors): def join(axis, *tensors):
...@@ -3420,7 +3420,7 @@ def tile(x, reps, ndim=None): ...@@ -3420,7 +3420,7 @@ def tile(x, reps, ndim=None):
if not hasattr(tile, 'op'): if not hasattr(tile, 'op'):
tile.op = {} tile.op = {}
if ndim is None: if ndim is None:
ndim = len(reps) ndim = len(reps)
#backport #backport
#ndim = len(reps) if ndim is None else ndim #not sure if len(shp) is going to work. #ndim = len(reps) if ndim is None else ndim #not sure if len(shp) is going to work.
...@@ -4404,9 +4404,9 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None, abs_tol=None, rel_tol=No ...@@ -4404,9 +4404,9 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None, abs_tol=None, rel_tol=No
o_fn_out = o_fn(*[p.copy() for p in pt]) o_fn_out = o_fn(*[p.copy() for p in pt])
if isinstance(o_fn_out, tuple) or isinstance(o_fn_out, list): if isinstance(o_fn_out, tuple) or isinstance(o_fn_out, list):
raise TypeError('It seems like you are trying to use verify_grad ' raise TypeError('It seems like you are trying to use verify_grad '
'on an op or a function which outputs a list: there should' 'on an op or a function which outputs a list: there should'
' be a single (array-like) output instead') ' be a single (array-like) output instead')
# random_projection should not have elements too small, # random_projection should not have elements too small,
# otherwise too much precision is lost in numerical gradient # otherwise too much precision is lost in numerical gradient
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论