提交 458a5ccd authored 作者: Christos Tsirigotis's avatar Christos Tsirigotis

Add `side` of searchsorted as dynamic param

- Fix tests and grad - Add more documentation - Fix doc Raises ValueError in `as_tensor_variable` - Remove 'DebugMode' from tests
上级 02835cce
......@@ -138,17 +138,15 @@ def as_tensor_variable(x, name=None, ndim=None):
If a new `Variable` instance is created, it will be named with this
string.
ndim : None or integer
Return a Variable with this many dimensions. Raise TypeError if it's
not possible.
Return a Variable with this many dimensions.
Raises
------
ValueError
If an `Apply` with more than one output is fetched.
If an `Apply` with more than one output is fetched or
if `x` cannot be made into a Variable with `ndim` dimensions.
AsTensorError
If `x` cannot be converted to a TensorType Variable.
TypeError
If `x` cannot be made into a Variable with `ndim` dimensions.
"""
if hasattr(x, '_as_TensorVariable'):
......
......@@ -8,7 +8,9 @@ import theano
from theano.tensor import basic
from theano.tensor import nlinalg # noqa
from theano import gof, scalar
from theano.gradient import DisconnectedType
from theano.gof import Generic
from theano import gradient
from theano.gradient import DisconnectedType, disconnected_type
tensor = basic
......@@ -79,11 +81,15 @@ class SearchsortedOp(theano.Op):
"""
params_type = Generic()
__props__ = ("side", )
def __init__(self, side='left'):
self.side = side
def get_params(self, node):
return self.side
def make_node(self, x, v, sorter=None):
x = basic.as_tensor(x, ndim=1)
v = basic.as_tensor(v)
......@@ -100,7 +106,7 @@ class SearchsortedOp(theano.Op):
def infer_shape(self, node, shapes):
return [shapes[1]]
def perform(self, node, inputs, output_storage):
def perform(self, node, inputs, output_storage, params):
x = inputs[0]
v = inputs[1]
if len(node.inputs) == 3:
......@@ -109,7 +115,23 @@ class SearchsortedOp(theano.Op):
sorter = None
z = output_storage[0]
z[0] = np.searchsorted(x, v, side=self.side, sorter=sorter)
z[0] = np.searchsorted(x, v, side=params, sorter=sorter)
def c_support_code_struct(self, node, name):
return """
int right_%(name)s;
""" % locals()
def c_init_code_struct(self, node, name, sub):
side = sub['params']
fail = sub['fail']
return """
PyObject* tmp_%(name)s = PyUnicode_FromString("right");
if (tmp_%(name)s == NULL)
%(fail)s;
right_%(name)s = PyUnicode_Compare(%(side)s, tmp_%(name)s);
Py_DECREF(tmp_%(name)s);
""" % locals()
def c_code(self, node, name, inames, onames, sub):
sorter = None
......@@ -120,19 +142,18 @@ class SearchsortedOp(theano.Op):
if not sorter:
sorter = "NULL"
z, = onames
side = "NPY_SEARCHRIGHT" if self.side == 'right' else "NPY_SEARCHLEFT"
fail = sub['fail']
return """
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SearchSorted(%(x)s, (PyObject*) %(v)s,
%(side)s, (PyObject*) %(sorter)s);
right_%(name)s ? NPY_SEARCHLEFT : NPY_SEARCHRIGHT, (PyObject*) %(sorter)s);
if (!%(z)s)
%(fail)s;
""" % locals()
def c_code_cache_version(self):
return (0, 1, 2)
return (1,)
def grad(self, inputs, output_gradients):
num_ins = len(inputs)
......@@ -141,20 +162,10 @@ class SearchsortedOp(theano.Op):
else:
x, v = inputs
x_grad = x.zeros_like()
if v.ndim == 1:
v_grad = v.zeros_like()
else:
v_grad = theano.gradient.grad_not_implemented(
self, 1, v, "Grad is not implemented for inputs with "
"number of dimension other than 1.")
x_grad = gradient._float_zeros_like(x)
v_grad = gradient._float_zeros_like(v)
if num_ins == 3:
sorter_grad = theano.gradient.grad_undefined(
self, 2, sorter,
"searchsorted is not defined for non-integer sorter so "
"searchsorted(x, nb, sorter+eps), for eps > 0, "
"is undefined")
return [x_grad, v_grad, sorter_grad]
return [x_grad, v_grad, disconnected_type()]
else:
return [x_grad, v_grad]
......@@ -162,7 +173,7 @@ class SearchsortedOp(theano.Op):
def searchsorted(x, v, side='left', sorter=None):
"""Find indices where elements should be inserted to maintain order.
Wraping of numpy.searchsorted. Find the indices into a sorted array
Wrapping of numpy.searchsorted. Find the indices into a sorted array
`x` such that, if the corresponding elements in `v` were inserted
before the indices, the order of `x` would be preserved.
......@@ -171,7 +182,7 @@ def searchsorted(x, v, side='left', sorter=None):
x: 1-D tensor (array-like)
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
which sorts it.
v: tensor (array-like)
Contains the values to be inserted into `x`.
side: {'left', 'right'}, optional.
......@@ -183,13 +194,36 @@ def searchsorted(x, v, side='left', sorter=None):
Contains indices that sort array `x` into ascending order.
They are typically the result of argsort.
.. versionadded:: 0.8.2
Returns
-------
indices : tensor of integers (int64)
Array of insertion points with the same shape as `v`.
See Also
--------
`numpy.searchsorted <https://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.searchsorted.html>`_
Notes
-----
* Binary search is used to find the required insertion points.
* This Op is working **only on CPU** currently.
Examples
--------
>>> from theano import tensor
>>> x = tensor.dvector()
>>> idx = x.searchsorted(3)
>>> idx.eval({x: [1,2,3,4,5]})
array(2)
>>> tensor.extra_ops.searchsorted([1,2,3,4,5], 3).eval()
array(2)
>>> tensor.extra_ops.searchsorted([1,2,3,4,5], 3, side='right').eval()
array(3)
>>> tensor.extra_ops.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]).eval()
array([0, 5, 1, 2])
.. versionadded:: 0.9
"""
return SearchsortedOp(side=side)(x, v, sorter)
......
......@@ -61,24 +61,11 @@ class TestSearchsortedOp(utt.InferShapeTester):
self.idx_sorted = None
def test_searchsortedOp_on_sorted_input(self):
f = theano.function([self.x, self.v], searchsorted(self.x, self.v),
mode="DebugMode")
f = theano.function([self.x, self.v], searchsorted(self.x, self.v))
assert np.allclose(
np.searchsorted(self.a[self.idx_sorted], self.b),
f(self.a[self.idx_sorted], self.b))
def test_searchsortedOp_on_none_sorter(self):
# Current implementation of numpy.searchsorted
# does not raise an error if `x` is not sorted and sorter is None.
sorter = T.vector('sorter', dtype="int64")
f = theano.function([self.x, self.v, sorter],
searchsorted(self.x, self.v, sorter=sorter))
# assert np.allclose(
# np.searchsorted(self.a, self.b, sorter=None),
# f(self.a, self.b, sorter=None))
self.assertRaises(ValueError, f,
self.a[self.idx_sorted], self.b, None)
def test_searchsortedOp_on_float_sorter(self):
sorter = T.vector('sorter', dtype="float32")
self.assertRaises(TypeError, searchsorted,
......@@ -91,33 +78,18 @@ class TestSearchsortedOp(utt.InferShapeTester):
sorter = T.vector('sorter', dtype=dtype)
f = theano.function([self.x, self.v, sorter],
searchsorted(self.x, self.v, sorter=sorter),
mode="DebugMode", allow_input_downcast=True)
allow_input_downcast=True)
assert np.allclose(
np.searchsorted(self.a, self.b, sorter=self.idx_sorted),
f(self.a, self.b, self.idx_sorted))
def test_searchsortedOp_on_right_side(self):
f = theano.function([self.x, self.v],
searchsorted(self.x, self.v, side='right'),
mode="DebugMode")
searchsorted(self.x, self.v, side='right'))
assert np.allclose(
np.searchsorted(self.a, self.b, side='right'),
f(self.a, self.b))
def test_use_c_code(self):
f = theano.function([self.x, self.v], searchsorted(self.x, self.v),
mode="FAST_RUN")
assert np.allclose(
np.searchsorted(self.a[self.idx_sorted], self.b),
f(self.a[self.idx_sorted], self.b))
f = theano.function([self.x, self.v], searchsorted(self.x, self.v),
mode=theano.compile.Mode(linker="c",
optimizer='fast_run'))
assert np.allclose(
np.searchsorted(self.a[self.idx_sorted], self.b),
f(self.a[self.idx_sorted], self.b))
def test_infer_shape(self):
# Test using default parameters' value
self._compile_and_check([self.x, self.v],
......@@ -133,26 +105,14 @@ class TestSearchsortedOp(utt.InferShapeTester):
self.op_class)
# Test parameter ``side``
self.a = np.ones(10).astype(config.floatX)
self.b = np.ones(shape=(1, 2, 3)).astype(config.floatX)
la = np.ones(10).astype(config.floatX)
lb = np.ones(shape=(1, 2, 3)).astype(config.floatX)
self._compile_and_check([self.x, self.v],
[searchsorted(self.x, self.v, side='right')],
[self.a, self.b],
[la, lb],
self.op_class)
def test_grad(self):
self.a = np.random.random(100).astype(config.floatX)
self.b = np.random.random((1, 2, 5)).astype(config.floatX)
self.idx_sorted = np.argsort(self.a)
self.assertRaises(theano.gradient.NullTypeGradError,
utt.verify_grad, self.op,
[self.a[self.idx_sorted], self.b])
self.a = np.random.random(100).astype(config.floatX)
self.b = np.random.random(10).astype(config.floatX)
self.idx_sorted = np.argsort(self.a)
utt.verify_grad(self.op, [self.a[self.idx_sorted], self.b])
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论