提交 2c9fa876 authored 作者: lamblin's avatar lamblin

Merge pull request #573 from delallea/minor

Minor stuff (PEP8 and typos mostly)
...@@ -27,9 +27,9 @@ Interface changes ...@@ -27,9 +27,9 @@ Interface changes
instance, function([x, y], [y]). You can use the kwarg instance, function([x, y], [y]). You can use the kwarg
``on_unused_input={'raise', 'warn', 'ignore'}`` to control this. ``on_unused_input={'raise', 'warn', 'ignore'}`` to control this.
(Pascal L.) (Pascal L.)
* tensor.alloc() now raise an error during graph build time * tensor.alloc() now raises an error during graph build time
when we try to create less dimensions then the number of dimensions when we try to create less dimensions than the number of dimensions
the provieded value have. In the past, the error was at run time. the provided value have. In the past, the error was at run time.
(Frederic B.) (Frederic B.)
New Features New Features
...@@ -48,7 +48,7 @@ New Features ...@@ -48,7 +48,7 @@ New Features
contains dimensions with bad value like 0. (Frédéric B. reported by Ian G.) contains dimensions with bad value like 0. (Frédéric B. reported by Ian G.)
Sparse Sparse
* Implement theano.sparse.mul(sparse1, sparse2) when both input don't * Implement theano.sparse.mul(sparse1, sparse2) when both inputs don't
have the same sparsity pattern. (Frederic B.) have the same sparsity pattern. (Frederic B.)
Sparse Sandbox graduate Sparse Sandbox graduate
......
...@@ -519,8 +519,8 @@ import theano and print the config variable, as in: ...@@ -519,8 +519,8 @@ import theano and print the config variable, as in:
Bool value, default: False Bool value, default: False
Should all SeqOptimizer object print the time taked by each of its Should each SeqOptimizer object print the time taken by each of its
optimizer. Each SeqOptimizer print something like this: optimizer. Each SeqOptimizer prints something like this:
SeqOptimizer gpu_opt time 0.014s for 8/9 nodes before/after optimization SeqOptimizer gpu_opt time 0.014s for 8/9 nodes before/after optimization
[(0.0004410743713378906, ('InputToGpuOptimizer', [(0.0004410743713378906, ('InputToGpuOptimizer',
...@@ -529,11 +529,11 @@ import theano and print the config variable, as in: ...@@ -529,11 +529,11 @@ import theano and print the config variable, as in:
(0.012573957443237305, ('gpu_local_optimizations', (0.012573957443237305, ('gpu_local_optimizations',
'EquilibriumOptimizer'))] 'EquilibriumOptimizer'))]
This print the name of the SeqOptimizer (gpu_opt), the number of This prints the name of the SeqOptimizer (gpu_opt), the number of
Apply node in the graph before (8) and after (9) Apply nodes in the graph before (8) and after (9)
optimizations. Then a list of tuple with 1 tuple by optimization optimizations. Then a list of tuples with 1 tuple per optimization
in this SeqOptimizer. The first element of the tuple is the time in this SeqOptimizer. The first element of the tuple is the time
by this optimization and then it is a tuple with the name of the taken by this optimization and then it is a tuple with the name of the
optimization and this class. This list is sorted from the sub optimization and this class. This list is sorted from the sub
optimization that take the most time to the optimization that take optimization that takes the most time to the optimization that takes
the less time. the least time.
...@@ -1064,7 +1064,7 @@ def _get_preallocated_maps(node, thunk, prealloc_modes, def_val, ...@@ -1064,7 +1064,7 @@ def _get_preallocated_maps(node, thunk, prealloc_modes, def_val,
del f_cont_outputs del f_cont_outputs
# We assume that the different outputs of a same Op will behave # We assume that the different outputs of a same Op will behave
# independantly, and there is no need to test over all combinations # independently, and there is no need to test over all combinations
# of outputs (the time taken is prohibitive). # of outputs (the time taken is prohibitive).
max_ndim = 0 max_ndim = 0
for r in node.outputs: for r in node.outputs:
......
...@@ -1431,7 +1431,7 @@ class GCC_compiler(object): ...@@ -1431,7 +1431,7 @@ class GCC_compiler(object):
preargs.append('-fPIC') preargs.append('-fPIC')
if sys.platform == 'win32' and local_bitwidth() == 64: if sys.platform == 'win32' and local_bitwidth() == 64:
# Under 64-bits windows installation, sys.platform is 'win32'. # Under 64-bit Windows installation, sys.platform is 'win32'.
# We need to define MS_WIN64 for the preprocessor to be able to # We need to define MS_WIN64 for the preprocessor to be able to
# link with libpython. # link with libpython.
preargs.append('-DMS_WIN64') preargs.append('-DMS_WIN64')
......
...@@ -95,7 +95,7 @@ class Unification: ...@@ -95,7 +95,7 @@ class Unification:
def __init__(self, inplace = False): def __init__(self, inplace = False):
""" """
If inplace is False, the merge method will return a new Unification If inplace is False, the merge method will return a new Unification
that is independant from the previous one (which allows backtracking). that is independent from the previous one (which allows backtracking).
""" """
self.unif = {} self.unif = {}
self.inplace = inplace self.inplace = inplace
......
...@@ -147,14 +147,17 @@ def verify_grad_sparse(op, pt, structured=False, *args, **kwargs): ...@@ -147,14 +147,17 @@ def verify_grad_sparse(op, pt, structured=False, *args, **kwargs):
Converts sparse variables back and forth. Converts sparse variables back and forth.
""" """
conv_none = lambda x: x conv_none = lambda x: x
def conv_csr(ind, indptr, shp): def conv_csr(ind, indptr, shp):
def f(spdata): def f(spdata):
return CSR(spdata, ind, indptr, shp) return CSR(spdata, ind, indptr, shp)
return f return f
def conv_csc(ind, indptr, shp): def conv_csc(ind, indptr, shp):
def f(spdata): def f(spdata):
return CSC(spdata, ind, indptr, shp) return CSC(spdata, ind, indptr, shp)
return f return f
iconv = [] iconv = []
dpt = [] dpt = []
...@@ -189,10 +192,12 @@ def verify_grad_sparse(op, pt, structured=False, *args, **kwargs): ...@@ -189,10 +192,12 @@ def verify_grad_sparse(op, pt, structured=False, *args, **kwargs):
oconv = DenseFromSparse(structured=structured) oconv = DenseFromSparse(structured=structured)
else: else:
oconv = conv_none oconv = conv_none
def conv_op(*inputs): def conv_op(*inputs):
ipt = [conv(i) for i, conv in zip(inputs, iconv)] ipt = [conv(i) for i, conv in zip(inputs, iconv)]
out = op(*ipt) out = op(*ipt)
return oconv(out) return oconv(out)
return utt.verify_grad(conv_op, dpt, *args, **kwargs) return utt.verify_grad(conv_op, dpt, *args, **kwargs)
verify_grad_sparse.E_grad = utt.verify_grad.E_grad verify_grad_sparse.E_grad = utt.verify_grad.E_grad
...@@ -746,7 +751,7 @@ class DenseFromSparse(gof.op.Op): ...@@ -746,7 +751,7 @@ class DenseFromSparse(gof.op.Op):
(self.sparse_grad == other.sparse_grad) (self.sparse_grad == other.sparse_grad)
def __hash__(self): def __hash__(self):
return hash(type(self))^hash(self.sparse_grad) return hash(type(self)) ^ hash(self.sparse_grad)
def __str__(self): def __str__(self):
return "%s{structured_grad=%s}" % ( return "%s{structured_grad=%s}" % (
...@@ -1180,8 +1185,8 @@ class MulSS(gof.op.Op): ...@@ -1180,8 +1185,8 @@ class MulSS(gof.op.Op):
assert _is_sparse(x) and _is_sparse(y) assert _is_sparse(x) and _is_sparse(y)
assert len(x.shape) == 2 assert len(x.shape) == 2
assert y.shape == x.shape assert y.shape == x.shape
# This call the element-wise multiple # This calls the element-wise multiple
# x * y call dot... # x * y calls dot...
out[0] = x.multiply(y) out[0] = x.multiply(y)
def grad(self, (x, y), (gz,)): def grad(self, (x, y), (gz,)):
......
...@@ -515,8 +515,8 @@ def get_constant_value(v): ...@@ -515,8 +515,8 @@ def get_constant_value(v):
# TODO: implement the case where we take a scalar in a matrix # TODO: implement the case where we take a scalar in a matrix
assert len(v.owner.op.idx_list) == v.owner.inputs[0].ndim assert len(v.owner.op.idx_list) == v.owner.inputs[0].ndim
#Needed to make better graph in this test. # Needed to make better graph in this test in theano/tensor/tests:
#theano/tensor/tests/test_sharedvar.py:test_shared_options.test_specify_shape_partial # test_sharedvar.py:test_shared_options.test_specify_shape_partial
if (v.owner.inputs[0].owner and if (v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op, Join) and isinstance(v.owner.inputs[0].owner.op, Join) and
# Ensure the Join is joining only scalar variables (so that # Ensure the Join is joining only scalar variables (so that
...@@ -956,9 +956,9 @@ class TensorType(Type): ...@@ -956,9 +956,9 @@ class TensorType(Type):
return """ return """
%(name)s = NULL; %(name)s = NULL;
if (py_%(name)s == Py_None) { if (py_%(name)s == Py_None) {
// We can either fail here or set %(name)s to NULL and rely on Ops using // We can either fail here or set %(name)s to NULL and rely on Ops
// tensors to handle the NULL case, but if they fail to do so they'll end up // using tensors to handle the NULL case, but if they fail to do so
// with nasty segfaults, so this is public service. // they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None"); PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
%(fail)s %(fail)s
} }
...@@ -966,15 +966,19 @@ class TensorType(Type): ...@@ -966,15 +966,19 @@ class TensorType(Type):
PyErr_SetString(PyExc_ValueError, "expected an ndarray"); PyErr_SetString(PyExc_ValueError, "expected an ndarray");
%(fail)s %(fail)s
} }
type_num_%(name)s = ((PyArrayObject*)py_%(name)s)->descr->type_num; //we expect %(type_num)s // We expect %(type_num)s
type_num_%(name)s = ((PyArrayObject*)py_%(name)s)->descr->type_num;
if (!PyArray_ISALIGNED(py_%(name)s)) { if (!PyArray_ISALIGNED(py_%(name)s)) {
PyErr_Format(PyExc_NotImplementedError, PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %%d (%(type_num)s), got non-aligned array of type %%d", "expected an aligned array of type %%d "
"(%(type_num)s), got non-aligned array of type %%d",
%(type_num)s, type_num_%(name)s); %(type_num)s, type_num_%(name)s);
%(fail)s %(fail)s
} }
if (type_num_%(name)s != %(type_num)s) { if (type_num_%(name)s != %(type_num)s) {
PyErr_Format(PyExc_ValueError, "expected type_num %%d (%(type_num)s) got %%d", %(type_num)s, type_num_%(name)s); PyErr_Format(PyExc_ValueError,
"expected type_num %%d (%(type_num)s) got %%d",
%(type_num)s, type_num_%(name)s);
%(fail)s %(fail)s
} }
%(name)s = (PyArrayObject*)(py_%(name)s); %(name)s = (PyArrayObject*)(py_%(name)s);
...@@ -2713,12 +2717,12 @@ if 0: ...@@ -2713,12 +2717,12 @@ if 0:
## TODO (DOCUMENT AND WRITE TESTS) OR DELETE ## TODO (DOCUMENT AND WRITE TESTS) OR DELETE
class Filler(gof.Op): class Filler(gof.Op):
"""WRITEME""" """WRITEME"""
def __init__(self, value, ndim, dtype = 'float64'): def __init__(self, value, ndim, dtype='float64'):
self.value = value self.value = value
self.ndim = ndim self.ndim = ndim
self.dtype = dtype self.dtype = dtype
self.type = TensorType(dtype = dtype, self.type = TensorType(dtype=dtype,
broadcastable = (False,)*ndim) broadcastable=(False,) * ndim)
def make_node(self, dims): def make_node(self, dims):
dims = as_tensor_variable(dims) dims = as_tensor_variable(dims)
...@@ -2728,21 +2732,22 @@ if 0: ...@@ -2728,21 +2732,22 @@ if 0:
dims, = inp dims, = inp
out, = out_ out, = out_
if out[0] is not None: if out[0] is not None:
out[0].resize(dims, refcheck = 0) out[0].resize(dims, refcheck=0)
out[0].fill(self.value) out[0].fill(self.value)
else: else:
if self.value == 0: if self.value == 0:
out[0] = numpy.zeros(dims, dtype = self.dtype) out[0] = numpy.zeros(dims, dtype=self.dtype)
elif self.value == 1: elif self.value == 1:
out[0] = numpy.ones(dims, dtype = self.dtype) out[0] = numpy.ones(dims, dtype=self.dtype)
else: else:
out[0] = numpy.ones(dims, dtype = self.dtype) * self.value out[0] = numpy.ones(dims, dtype=self.dtype) * self.value
def grad(self, inp, grads): def grad(self, inp, grads):
return None, return None,
def __eq__(self, other): def __eq__(self, other):
return type(self) == type(other) and self.ndim == other.ndim and self.dtype == other.dtype return (type(self) == type(other) and self.ndim == other.ndim and
self.dtype == other.dtype)
def __hash__(self): def __hash__(self):
return hash(self.ndim) ^ hash(self.dtype) return hash(self.ndim) ^ hash(self.dtype)
...@@ -2765,8 +2770,14 @@ if 0: ...@@ -2765,8 +2770,14 @@ if 0:
"""WRITEME""" """WRITEME"""
return Ones(0)([]) return Ones(0)([])
pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Filler) and r.owner.op.value == 0, printing.FunctionPrinter('zeros')) pprint.assign(lambda pstate, r: r.owner and
pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Filler) and r.owner.op.value == 1, printing.FunctionPrinter('ones')) isinstance(r.owner.op, Filler) and
r.owner.op.value == 0,
printing.FunctionPrinter('zeros'))
pprint.assign(lambda pstate, r: r.owner and
isinstance(r.owner.op, Filler) and
r.owner.op.value == 1,
printing.FunctionPrinter('ones'))
class Alloc(gof.Op): class Alloc(gof.Op):
...@@ -2802,8 +2813,8 @@ class Alloc(gof.Op): ...@@ -2802,8 +2813,8 @@ class Alloc(gof.Op):
sh = [as_tensor_variable(s) for s in shape] sh = [as_tensor_variable(s) for s in shape]
bcast = [] bcast = []
if v.ndim > len(sh): if v.ndim > len(sh):
raise TypeError("Alloc value to use have more dimensions" raise TypeError("The Alloc value to use has more dimensions"
" then the specified dimensions", " than the specified dimensions",
v.ndim, len(sh)) v.ndim, len(sh))
for i, s in enumerate(sh): for i, s in enumerate(sh):
if s.type.dtype[:3] not in ('int', 'uin'): if s.type.dtype[:3] not in ('int', 'uin'):
...@@ -3106,17 +3117,19 @@ if 0: ...@@ -3106,17 +3117,19 @@ if 0:
assert repeats.type == iscalar assert repeats.type == iscalar
assert axis.type == iscalar assert axis.type == iscalar
broadcastable = [] broadcastable = []
for i,x in enumerate(input.broadcastable): for i, x in enumerate(input.broadcastable):
if i==axis: if i == axis:
broadcastable += [False] broadcastable += [False]
else: else:
broadcastable += [x] broadcastable += [x]
type = TensorType(dtype = input.type.dtype, broadcastable = \ type = TensorType(dtype=input.type.dtype,
broadcastable) broadcastable=broadcastable)
#backport #backport
#type = TensorType(dtype = input.type.dtype, #type = TensorType(dtype=input.type.dtype,
# broadcastable = [False if i==axis else x for i, x in enumerate(input.broadcastable)]) # broadcastable=[
# False if i==axis else x
# for i, x in enumerate(input.broadcastable)])
return gof.Apply(self, [inputs, repeats, axis], [type()]) return gof.Apply(self, [inputs, repeats, axis], [type()])
def perform(self, node, inp, out_): def perform(self, node, inp, out_):
...@@ -3807,7 +3820,8 @@ class Subtensor(Op): ...@@ -3807,7 +3820,8 @@ class Subtensor(Op):
if (!step) if (!step)
{ {
Py_DECREF(xview); Py_DECREF(xview);
PyErr_Format(PyExc_ValueError, "slice step cannot be zero"); PyErr_Format(PyExc_ValueError,
"slice step cannot be zero");
%(fail)s; %(fail)s;
} }
...@@ -4209,7 +4223,8 @@ class IncSubtensor(Op): ...@@ -4209,7 +4223,8 @@ class IncSubtensor(Op):
else else
{ {
if (%(z)s) Py_DECREF(%(z)s); if (%(z)s) Py_DECREF(%(z)s);
%(z)s = (PyArrayObject*)PyArray_FromAny(py_%(x)s, NULL, 0, 0, NPY_ENSURECOPY, NULL); %(z)s = (PyArrayObject*)PyArray_FromAny(py_%(x)s, NULL, 0, 0,
NPY_ENSURECOPY, NULL);
} }
""" % locals() """ % locals()
...@@ -5532,8 +5547,8 @@ def inverse_permutation(perm): ...@@ -5532,8 +5547,8 @@ def inverse_permutation(perm):
# Advanced indexing # Advanced indexing
######################### #########################
# #
# Should reproduce numpy's behaviour: # Should reproduce numpy's behaviour, see url:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing # docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
class AdvancedSubtensor1(Op): class AdvancedSubtensor1(Op):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论