提交 bb7f0aae authored 作者: Tanjay94's avatar Tanjay94

Moved kron into slinalg, Fixed nlinalg and slinalg code and doc.

上级 f0bfae92
......@@ -25,3 +25,5 @@ They are grouped into the following sections:
utils
extra_ops
io
slinalg
nlinalg
.. ../../../../theano/sandbox/linalg/ops.py
.. ../../../../theano/sandbox/nlinalg.py
.. _libdoc_linalg:
===================================================================
:mod:`tensor.slinalg` -- Linear Algebra Ops Using Numpy
:mod:`tensor.nlinalg` -- Linear Algebra Ops Using Numpy
===================================================================
.. module:: tensor.nlinalg
......@@ -14,7 +14,5 @@
API
===
.. automodule:: theano.tensor.nlinalg.ops
:members:
.. automodule:: theano.tensor.nlinalg.kron
.. automodule:: theano.tensor.nlinalg
:members:
.. ../../../../theano/sandbox/linalg/ops.py
.. ../../../../theano/sandbox/slinalg.py
.. _libdoc_linalg:
......@@ -14,7 +14,5 @@
API
===
.. automodule:: theano.tensor.slinalg.ops
:members:
.. automodule:: theano.tensor.slinalg.kron
.. automodule:: theano.tensor.slinalg
:members:
......@@ -54,11 +54,7 @@ class MatrixPinv(Op):
return Apply(self, [x], [x.type()])
def perform(self, node, (x,), (z, )):
try:
z[0] = numpy.linalg.pinv(x).astype(x.dtype)
except numpy.linalg.LinAlgError:
logger.debug('Failed to invert %s' % str(node.inputs[0]))
raise
z[0] = numpy.linalg.pinv(x).astype(x.dtype)
def __str__(self):
return "MatrixPseudoInverse"
......@@ -100,11 +96,7 @@ class MatrixInverse(Op):
return Apply(self, [x], [x.type()])
def perform(self, node, (x,), (z, )):
try:
z[0] = numpy.linalg.inv(x).astype(x.dtype)
except numpy.linalg.LinAlgError:
logger.debug('Failed to invert %s' % str(node.inputs[0]))
raise
z[0] = numpy.linalg.inv(x).astype(x.dtype)
def grad(self, inputs, g_outputs):
r"""The gradient function should return
......@@ -346,12 +338,7 @@ class Eig(Op):
return Apply(self, [x], [w, v])
def perform(self, node, (x,), (w, v)):
try:
w[0], v[0] = [z.astype(x.dtype) for z in self._numop(x)]
except numpy.linalg.LinAlgError:
logger.debug('Failed to find %s of %s' % (self._numop.__name__,
node.inputs[0]))
raise
w[0], v[0] = [z.astype(x.dtype) for z in self._numop(x)]
def infer_shape(self, node, shapes):
n = shapes[0][0]
......@@ -394,12 +381,7 @@ class Eigh(Eig):
return Apply(self, [x], [w, v])
def perform(self, node, (x,), (w, v)):
try:
w[0], v[0] = self._numop(x, self.UPLO)
except numpy.linalg.LinAlgError:
logger.debug('Failed to find %s of %s' % (self._numop.__name__,
node.inputs[0]))
raise
w[0], v[0] = self._numop(x, self.UPLO)
def grad(self, inputs, g_outputs):
r"""The gradient function should return
......
......@@ -355,3 +355,36 @@ class EigvalshGrad(Op):
def eigvalsh(a, b, lower=True):
return Eigvalsh(lower)(a, b)
def kron(a, b):
""" Kronecker product
Same as scipy.linalg.kron(a, b).
:note: numpy.kron(a, b) != scipy.linalg.kron(a, b)!
They don't have the same shape and order when
a.ndim != b.ndim != 2.
:param a: array_like
:param b: array_like
:return: array_like with a.ndim + b.ndim - 2 dimensions.
"""
a = tensor.as_tensor_variable(a)
b = tensor.as_tensor_variable(b)
if (a.ndim + b.ndim <= 2):
raise TypeError('kron: inputs dimensions must sum to 3 or more. '
'You passed %d and %d.' % (a.ndim, b.ndim))
o = tensor.outer(a, b)
o = o.reshape(tensor.concatenate((a.shape, b.shape)),
a.ndim + b.ndim)
shf = o.dimshuffle(0, 2, 1, * range(3, o.ndim))
if shf.ndim == 3:
shf = o.dimshuffle(1, 0, 2)
o = shf.flatten()
else:
o = shf.reshape((o.shape[0] * o.shape[2],
o.shape[1] * o.shape[3]) +
tuple([o.shape[i] for i in range(4, o.ndim)]))
return o
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论