提交 52afebdf authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Merge pull request #2159 from nouiz/permutation

Allow permutation to return just one permutation.
......@@ -11,6 +11,10 @@
:synopsis: Linear Algebra Ops Using Numpy
.. moduleauthor:: LISA
.. note::
This module is not imported by default. You need to import it to use it.
API
===
......
......@@ -11,6 +11,10 @@
:synopsis: Linear Algebra Ops Using Scipy
.. moduleauthor:: LISA
.. note::
This module is not imported by default. You need to import it to use it.
API
===
......
......@@ -4,6 +4,7 @@
import logging
import os
import shlex
import sys
import warnings
......@@ -39,7 +40,10 @@ def parse_config_string(config_string, issue_warnings=True):
Parses a config string (comma-separated key=value components) into a dict.
"""
config_dict = {}
for kv_pair in config_string.split(','):
my_splitter = shlex.shlex(config_string, posix=True)
my_splitter.whitespace = ','
my_splitter.whitespace_split = True
for kv_pair in my_splitter:
kv_pair = kv_pair.strip()
if not kv_pair:
continue
......
......@@ -558,7 +558,7 @@ if (%(name)s == NULL) {
py_%(name)s = PyCapsule_New((void *)%(name)s, NULL,
_py3_destructor);
if (py_%(name)s != NULL) {
if (PyCaspule_SetContext(py_%(name)s, (void *)%(freefunc)s) != 0) {
if (PyCapsule_SetContext(py_%(name)s, (void *)%(freefunc)s) != 0) {
/* This won't trigger a call to freefunc since it could not be
set. The error case below will do it. */
Py_DECREF(py_%(name)s);
......
......@@ -649,6 +649,10 @@ def local_gpu_careduce(node):
if isinstance(node.op.scalar_op, (scal.Add, scal.Mul,
scal.Maximum, scal.Minimum)):
x, = node.inputs
# Otherwise, is some corner case, we will try to move it
# to the GPU later and this cause not wanted user warning.
if x.dtype != 'float32':
return
replace = False
if x.owner and isinstance(x.owner.op, HostFromGpu):
replace = True
......@@ -666,7 +670,7 @@ def local_gpu_careduce(node):
# don't introduce a bigger transfer. It is hard to
# know if after all optimization we will do the bigger
# transfer or not. I'm guessing an heuristic to find
# that. I suppose that if the input of the recution is
# that. I suppose that if the input of the reduction is
# generated by an op that we can in some cases move to
# the GPU, that we will move it. If some CPU ops are
# supported only in some cases on the GPU, this will
......
......@@ -632,22 +632,38 @@ def get_scalar_constant_value(orig_v, elemwise=True):
# test_sharedvar.py:test_shared_options.test_specify_shape_partial
if (v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op, Join) and
len(v.owner.op.idx_list) == 1):
# Ensure the Join is joining only scalar variables (so that
# the constant value can be found at the same index as the one
# used in the sub-tensor).
python_all(var.ndim == 0 for var in
v.owner.inputs[0].owner.inputs) and
len(v.owner.op.idx_list) == 1):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
# Note the '+ 1' is because the first argument to Join is the
# axis.
ret = v.owner.inputs[0].owner.inputs[idx + 1]
ret = get_scalar_constant_value(ret)
# join can cast implicitly its input in some case.
return theano._asarray(ret, dtype=v.type.dtype)
if python_all(var.ndim == 0 for var in
v.owner.inputs[0].owner.inputs[1:]):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
# Note the '+ 1' is because the first argument to Join is the
# axis.
ret = v.owner.inputs[0].owner.inputs[idx + 1]
ret = get_scalar_constant_value(ret)
# join can cast implicitly its input in some case.
return theano._asarray(ret, dtype=v.type.dtype)
if python_all(var.ndim == 1 for var in
v.owner.inputs[0].owner.inputs[1:]):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
try:
#TODO: assert joined axis is 0.
length = 0
for joined in v.owner.inputs[0].owner.inputs[1:]:
ll = get_vector_length(joined)
if idx < length + ll:
return get_scalar_constant_value(joined[idx-length])
length += ll
except TypeError:
pass
except ValueError:
pass
elif (v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op,
......@@ -2035,6 +2051,8 @@ class Nonzero(gof.Op):
flattened input array.
"""
__props__ = ()
def make_node(self, a):
a = as_tensor_variable(a)
if a.ndim == 0:
......@@ -3663,6 +3681,15 @@ def get_vector_length(v):
return len(v.owner.inputs)
if v.owner and isinstance(v.owner.op, Shape):
return v.owner.inputs[0].type.ndim
# If we take this slice: var[:0], we know it will have 0 elements.
if (v.owner and
isinstance(v.owner.op, theano.tensor.subtensor.Subtensor) and
isinstance(v.owner.op.idx_list[0], slice) and
v.owner.op.idx_list[0].start in [None, 0]):
stop = theano.tensor.subtensor.get_idx_list(
v.owner.inputs, v.owner.op.idx_list)[0].stop
if extract_constant(stop) == 0:
return 0
raise ValueError("length not known")
......
......@@ -58,7 +58,8 @@ def conv2d(input, filters, image_shape=None, filter_shape=None,
of shape: image_shape + filter_shape - 1
:type subsample: tuple of len 2
:param subsample: factor by which to subsample the output
:param subsample: factor by which to subsample the output.
Also called strides elsewhere.
:type image_shape: None, tuple/list of len 4 of int or Constant variable
:param image_shape: The shape of the input parameter.
......
......@@ -183,7 +183,11 @@ class RandomFunction(gof.Op):
draw.
"""
shape = tensor.as_tensor_variable(shape, ndim=1)
shape_ = tensor.as_tensor_variable(shape, ndim=1)
if shape == ():
shape = shape_.astype('int32')
else:
shape = shape_
assert shape.type.ndim == 1
assert (shape.type.dtype == 'int64') or (shape.type.dtype == 'int32')
if not isinstance(r.type, RandomStateType):
......@@ -700,7 +704,15 @@ def permutation(random_state, size=None, n=1, ndim=None, dtype='int64'):
:note:
Note that the output will then be of dimension ndim+1.
"""
ndim, size, bcast = _infer_ndim_bcast(ndim, size)
if size is None or size == ():
if not(ndim is None or ndim == 1):
raise TypeError(
"You asked for just one permutation but asked for more then 1 dimensions.")
ndim = 1
size = ()
bcast = ()
else:
ndim, size, bcast = _infer_ndim_bcast(ndim, size)
#print "NDIM", ndim, size
op = RandomFunction(permutation_helper,
tensor.TensorType(dtype=dtype, broadcastable=bcast + (False,)),
......
......@@ -2097,7 +2097,12 @@ def take(a, indices, axis=None, mode='raise'):
shape = indices.shape
ndim = indices.ndim
else:
shape = theano.tensor.concatenate(
[a.shape[:axis], indices.shape, a.shape[axis + 1:]])
# If axis is 0, don't generate a useless concatenation.
if axis == 0:
shape = theano.tensor.concatenate(
[indices.shape, a.shape[axis + 1:]])
else:
shape = theano.tensor.concatenate(
[a.shape[:axis], indices.shape, a.shape[axis + 1:]])
ndim = a.ndim + indices.ndim - 1
return take(a, indices.flatten(), axis, mode).reshape(shape, ndim)
......@@ -3889,7 +3889,8 @@ class T_local_reduce(unittest.TestCase):
assert isinstance(topo[-1].op, T.Elemwise), out
# Test different axis for the join and the reduction
A = theano.shared(numpy.array([1, 2, 3, 4, 5]))
# We must force the dtype, of otherwise, this tests will fail in 32 bit system
A = theano.shared(numpy.array([1, 2, 3, 4, 5], dtype='int64'))
f = theano.function([], T.sum(T.stack(A, A), axis=0), mode=self.mode)
assert numpy.allclose(f(), [2, 4, 6, 8, 10])
......
......@@ -230,7 +230,7 @@ class T_random_function(utt.InferShapeTester):
rng_R = random_state_type()
# No shape, no args -> TypeError
self.assertRaises(TypeError, permutation, rng_R, size=None, ndim=2)
self.assertRaises(TypeError, poisson, rng_R, size=None, ndim=2)
def test_random_function_ndim_added(self):
"""Test that random_function helper function accepts ndim_added as
......@@ -561,6 +561,19 @@ class T_random_function(utt.InferShapeTester):
self.assertTrue(numpy.all(val0 == numpy_val0))
self.assertTrue(numpy.all(val1 == numpy_val1))
# Test that we can generate a list: have size=None or ().
for ndim in [1, None]:
post_r, out = permutation(rng_R, n=10, size=None, ndim=ndim)
inp = compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_r, mutable=True)
f = theano.function([inp], out)
o = f()
assert o.shape == (10,)
assert (numpy.sort(o) == numpy.arange(10)).all()
# Wrong number of dimensions asked
self.assertRaises(TypeError, permutation, rng_R, size=None, ndim=2)
def test_multinomial(self):
"""Test that raw_random.multinomial generates the same
results as numpy."""
......
......@@ -27,7 +27,7 @@ from theano.tensor.subtensor import (inc_subtensor, set_subtensor,
from theano.tensor import (as_tensor_variable, _shared,
NotScalarConstantError,
fscalar, iscalar, dscalar, cscalar,
vector, dvector, fvector, lvector,
vector, dvector, fvector, lvector, lrow,
fmatrix, dmatrix, lmatrix, matrix,
ctensor3, dtensor4)
from theano.tensor.tests.test_basic import rand, randint_ranged, inplace_func
......@@ -1140,6 +1140,7 @@ class TestAdvancedSubtensor(unittest.TestCase):
self.ix1 = lvector() # advanced 1d query
self.ix12 = lvector()
self.ix2 = lmatrix()
self.ixr = lrow()
def eval_output_and_check(self, t):
f = inplace_func([], t, mode=self.mode)
......@@ -1164,6 +1165,11 @@ class TestAdvancedSubtensor(unittest.TestCase):
assert a.broadcastable == self.ix2.broadcastable, (
a.broadcastable, self.ix2.broadcastable)
def test_index_into_mat_w_row(self):
a = self.m[self.ixr]
assert a.dtype == self.m.dtype, (a.dtype, self.m.dtype)
assert a.broadcastable == (True, False, False)
def test_index_w_int_and_vec(self):
# like test_ok_list, but with a single index on the first one
# data has to have at least 2 dimensions
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论