提交 e17f9005 authored 作者: nouiz's avatar nouiz

Merge pull request #1058 from abalkin/eig

Issue #1057: Implemented linalg.eig
from ops import (cholesky, matrix_inverse, solve, from ops import (cholesky, matrix_inverse, solve,
diag, extract_diag, alloc_diag, diag, extract_diag, alloc_diag,
det, psd, det, psd, eig,
trace, spectral_radius_bound) trace, spectral_radius_bound)
...@@ -192,7 +192,7 @@ theano.compile.mode.optdb.register('HintsOpt', ...@@ -192,7 +192,7 @@ theano.compile.mode.optdb.register('HintsOpt',
def psd(v): def psd(v):
""" """
Apply a hint that the variable `v` is positive semi-definite, i.e. Apply a hint that the variable `v` is positive semi-definite, i.e.
it is a symmetric matrix and x^T A x >= for any vector x. it is a symmetric matrix and :math:`x^T A x \ge 0` for any vector x.
""" """
return Hint(psd=True, symmetric=True)(v) return Hint(psd=True, symmetric=True)(v)
...@@ -567,16 +567,16 @@ class MatrixInverse(Op): ...@@ -567,16 +567,16 @@ class MatrixInverse(Op):
raise raise
def grad(self, inputs, g_outputs): def grad(self, inputs, g_outputs):
"""The gradient function should return: r"""The gradient function should return
:math:`V\\frac{\partial X^{-1}}{\partial X}` .. math:: V\frac{\partial X^{-1}}{\partial X},
where :math:`V` corresponds to ``g_outputs`` and :math:`X` to where :math:`V` corresponds to ``g_outputs`` and :math:`X` to
``inputs``. Using the matrix cookbook ``inputs``. Using the `matrix cookbook
``http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=3274``, <http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=3274>`_,
once can deduce that the relation corresponds to : once can deduce that the relation corresponds to
:math:`(X^{-1} \cdot V^{T} \cdot X^{-1})^T` .. math:: (X^{-1} \cdot V^{T} \cdot X^{-1})^T.
""" """
x, = inputs x, = inputs
...@@ -586,16 +586,16 @@ class MatrixInverse(Op): ...@@ -586,16 +586,16 @@ class MatrixInverse(Op):
return [-matrix_dot(xi, gz.T, xi).T] return [-matrix_dot(xi, gz.T, xi).T]
def R_op(self, inputs, eval_points): def R_op(self, inputs, eval_points):
"""The gradient function should return: r"""The gradient function should return
:math:`\\frac{\partial X^{-1}}{\partial X}V` .. math:: \frac{\partial X^{-1}}{\partial X}V,
where :math:`V` corresponds to ``g_outputs`` and :math:`X` to where :math:`V` corresponds to ``g_outputs`` and :math:`X` to
``inputs``. Using the matrix cookbook ``inputs``. Using the `matrix cookbook
``http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=3274``, <http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=3274>`_,
once can deduce that the relation corresponds to : once can deduce that the relation corresponds to
:math:`X^{-1} \cdot V \cdot X^{-1}` .. math:: X^{-1} \cdot V \cdot X^{-1}.
""" """
x, = inputs x, = inputs
...@@ -875,3 +875,47 @@ class A_Xinv_b(Op): ...@@ -875,3 +875,47 @@ class A_Xinv_b(Op):
gX = -matrix_dot(iX.T, a, gz, b.T, iX.T) gX = -matrix_dot(iX.T, a, gz, b.T, iX.T)
gb = matrix_dot(ix.T, a.T, gz) gb = matrix_dot(ix.T, a.T, gz)
return [ga, gX, gb] return [ga, gX, gb]
class Eig(Op):
"""Compute the eigenvalues and right eigenvectors of a square array.
"""
def __init__(self):
pass
def props(self):
"""Function exposing different properties of each instance of the
op.
For the ``Eig`` op, there are no properties to be exposed.
"""
return ()
def __hash__(self):
return hash((type(self), self.props()))
def __eq__(self, other):
return (type(self) == type(other) and self.props() == other.props())
def make_node(self, x):
x = as_tensor_variable(x)
w = theano.tensor.vector(dtype=x.dtype)
v = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [w, v])
def perform(self, node, (x,), (w, v)):
try:
w[0], v[0] = [z.astype(x.dtype) for z in numpy.linalg.eig(x)]
except numpy.linalg.LinAlgError:
logger.debug('Failed to find eig of %s' % str(node.inputs[0]))
raise
def infer_shape(self, node, shapes):
n = shapes[0][0]
return [(n,), (n,n)]
def __str__(self):
return "Eig"
eig = Eig()
import numpy import numpy
import numpy.linalg import numpy.linalg
from numpy.testing import assert_array_almost_equal
import theano import theano
from theano import tensor, function from theano import tensor, function
...@@ -26,6 +27,7 @@ from theano.sandbox.linalg.ops import (cholesky, ...@@ -26,6 +27,7 @@ from theano.sandbox.linalg.ops import (cholesky,
matrix_dot, matrix_dot,
spectral_radius_bound, spectral_radius_bound,
imported_scipy, imported_scipy,
Eig,
) )
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
...@@ -467,3 +469,31 @@ class test_Solve(utt.InferShapeTester): ...@@ -467,3 +469,31 @@ class test_Solve(utt.InferShapeTester):
numpy.asarray(rng.rand(5), numpy.asarray(rng.rand(5),
dtype=config.floatX)], dtype=config.floatX)],
self.op_class) self.op_class)
class test_Eig(utt.InferShapeTester):
def setUp(self):
super(test_Eig, self).setUp()
self.op_class = Eig
self.op = Eig()
def test_infer_shape(self):
rng = numpy.random.RandomState(utt.fetch_seed())
A = theano.tensor.matrix()
X = numpy.asarray(rng.rand(5, 5),
dtype=config.floatX)
self._compile_and_check([A], # theano.function inputs
self.op(A), # theano.function outputs
# A must be square
[X.dot(X.T)],
self.op_class)
def test_eval(self):
import math
A = theano.tensor.matrix()
self.assertEquals([e.eval({A: [[1]]}) for e in self.op(A)],
[[1.0], [[1.0]]])
w, v = [e.eval({A: [[0, 1], [1, 0]]})
for e in self.op(A)]
assert_array_almost_equal(w, [1, -1])
x = math.sqrt(2)/2
assert_array_almost_equal(v, [[x, -x], [x, x]])
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论