提交 a0bf02ba authored 作者: Seon-Wook Park's avatar Seon-Wook Park

Add numpy.compress to theano.tensor

上级 e9384d66
......@@ -511,6 +511,75 @@ def squeeze(x):
return view
class CompressOp(theano.Op):
# See the compress function for docstring
def __init__(self, axis=None):
self.axis = axis
def __eq__(self, other):
return (type(self) == type(other) and
self.axis == other.axis)
def __hash__(self):
return hash(type(self)) ^ hash(self.axis)
def make_node(self, condition, x):
x = basic.as_tensor_variable(x)
condition = basic.as_tensor_variable(condition)
if condition.ndim != 1:
raise TypeError("Conditions cannot have a number of "
"dimension different of 1.")
return theano.Apply(self, [condition, x], [x.type()])
def perform(self, node, inputs, output_storage):
condition = inputs[0]
x = inputs[1]
z = output_storage[0]
z[0] = np.compress(condition.astype(bool), x, axis=self.axis)
print z[0]
def infer_shape(self, node, ins_shapes):
condition = node.inputs[0]
n = condition.ndim # TODO: Find way to get condition vector shape
if self.axis is None:
out_shape = (n,)
else:
out_shape = list(ins_shapes[1])
out_shape[self.axis] -= n
out_shape = tuple(out_shape)
print out_shape
return [out_shape]
def __str__(self):
return self.__class__.__name__
def compress(condition, x, axis=None, out=None):
"""Return selected slices of an array along given axis.
It returns the input array, but with the
broadcastable dimensions removed. This is
always `x` itself or a view into `x`.
Wrapping of numpy.compress
:param x: 1 dimension, bools
:param condition: array of the same shape as x with corresponding weights.
Optional.
:return: `x` with selected slices
.. versionadded:: 0.7
"""
# This is done to keep the same function signature then NumPy.
assert out is None
return CompressOp(axis=axis)(condition, x)
class RepeatOp(theano.Op):
# See the repeat function for docstring
......
......@@ -700,3 +700,35 @@ def norm(x, ord):
raise ValueError(0)
elif ndim > 2:
raise NotImplementedError("We don't support norm witn ndim > 2")
class TensorSolve(Op):
"""Computes the pseudo-inverse of a matrix :math:`A`.
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
Note that :math:`Ax=AA^+b`, so :math:`AA^+` is close to the identity matrix.
This method is not faster then `matrix_inverse`. Its strength comes from
that it works for non-square matrices.
If you have a square matrix though, `matrix_inverse` can be both more
exact and faster to compute. Also this op does not get optimized into a
solve op.
"""
__props__ = ()
def __init__(self):
pass
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim == 2
return Apply(self, [x], [x.type()])
def perform(self, node, (x,), (z, )):
z[0] = numpy.linalg.pinv(x).astype(x.dtype)
tensorsolve = TensorSolve()
......@@ -7,8 +7,8 @@ from theano.tests import unittest_tools as utt
from theano.tensor.extra_ops import (CumsumOp, cumsum, CumprodOp, cumprod,
BinCountOp, bincount, DiffOp, diff,
squeeze, RepeatOp, repeat,
Bartlett, bartlett,
squeeze, CompressOp, compress,
RepeatOp, repeat, Bartlett, bartlett,
FillDiagonal, fill_diagonal,
FillDiagonalOffset, fill_diagonal_offset,
to_one_hot)
......@@ -344,6 +344,45 @@ class SqueezeTester(utt.InferShapeTester):
assert numpy.allclose(tested, expected)
class TestCompressOp(utt.InferShapeTester):
def setUp(self):
super(TestCompressOp, self).setUp()
self.op_class = CompressOp
self.op = CompressOp()
def test_compressOp(self):
x = T.dmatrix()
cond = T.dvector()
cond_val = np.array([1, 0, 1, 0], dtype=bool)
a = np.random.random((3, 4)).astype(config.floatX)
f = theano.function([cond, x], compress(cond, x))
assert np.allclose(np.compress(cond_val, a), f(cond_val, a))
for axis in range(len(a.shape)):
g = theano.function([cond, x], compress(cond, x, axis=axis))
assert np.allclose(np.compress(cond_val, a, axis=axis), g(cond_val, a))
def test_infer_shape(self):
x = T.dmatrix()
cond = T.dvector()
cond_val = np.array([1, 0, 1, 0], dtype=bool)
a = np.random.random((3, 4)).astype(config.floatX)
self._compile_and_check([cond, x],
[compress(cond, x)],
[cond_val, a],
self.op_class)
for axis in range(len(a.shape)):
self._compile_and_check([cond, x],
[compress(cond, x, axis=axis)],
[cond_val, a],
self.op_class)
class TestRepeatOp(utt.InferShapeTester):
def _possible_axis(self, ndim):
return [None] + range(ndim) + [-i for i in range(ndim)]
......
......@@ -596,6 +596,11 @@ class _tensor_py_operators:
"""
return theano.tensor.extra_ops.squeeze(self)
def compress(self, a, axis=None, out=None):
"""Return selected slices only
"""
return theano.tensor.extra_ops.compress(self, a, axis=axis, out=None)
class TensorVariable(_tensor_py_operators, Variable):
"""Subclass to add the tensor operators to the basic `Variable` class."""
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论