提交 4deacacd authored 作者: Sudarsan Mansingh's avatar Sudarsan Mansingh 提交者: Ricardo Vieira

Removed CholeskyGrad Op

上级 ab13fe09
...@@ -129,73 +129,6 @@ class Cholesky(Op): ...@@ -129,73 +129,6 @@ class Cholesky(Op):
cholesky = Cholesky() cholesky = Cholesky()
class CholeskyGrad(Op):
""""""
__props__ = ("lower", "destructive")
def __init__(self, lower=True):
self.lower = lower
self.destructive = False
def make_node(self, x, l, dz):
x = as_tensor_variable(x)
l = as_tensor_variable(l)
dz = as_tensor_variable(dz)
assert x.ndim == 2
assert l.ndim == 2
assert dz.ndim == 2
assert (
l.owner.op.lower == self.lower
), "lower/upper mismatch between Cholesky op and CholeskyGrad op"
return Apply(self, [x, l, dz], [x.type()])
def perform(self, node, inputs, outputs):
"""
Implements the "reverse-mode" gradient [#]_ for the
Cholesky factorization of a positive-definite matrix.
References
----------
.. [#] S. P. Smith. "Differentiation of the Cholesky Algorithm".
Journal of Computational and Graphical Statistics,
Vol. 4, No. 2 (Jun.,1995), pp. 134-147
http://www.jstor.org/stable/1390762
"""
x = inputs[0]
L = inputs[1]
dz = inputs[2]
dx = outputs[0]
N = x.shape[0]
if self.lower:
F = np.tril(dz)
for k in range(N - 1, -1, -1):
for j in range(k + 1, N):
for i in range(j, N):
F[i, k] -= F[i, j] * L[j, k]
F[j, k] -= F[i, j] * L[i, k]
for j in range(k + 1, N):
F[j, k] /= L[k, k]
F[k, k] -= L[j, k] * F[j, k]
F[k, k] /= 2 * L[k, k]
else:
F = np.triu(dz)
for k in range(N - 1, -1, -1):
for j in range(k + 1, N):
for i in range(j, N):
F[k, i] -= F[j, i] * L[k, j]
F[k, j] -= F[j, i] * L[k, i]
for j in range(k + 1, N):
F[k, j] /= L[k, k]
F[k, k] -= L[k, j] * F[k, j]
F[k, k] /= 2 * L[k, k]
dx[0] = F
def infer_shape(self, fgraph, node, shapes):
return [shapes[0]]
class CholeskySolve(Op): class CholeskySolve(Op):
__props__ = ("lower", "check_finite") __props__ = ("lower", "check_finite")
......
...@@ -11,7 +11,6 @@ from pytensor import tensor as pt ...@@ -11,7 +11,6 @@ from pytensor import tensor as pt
from pytensor.configdefaults import config from pytensor.configdefaults import config
from pytensor.tensor.slinalg import ( from pytensor.tensor.slinalg import (
Cholesky, Cholesky,
CholeskyGrad,
CholeskySolve, CholeskySolve,
Solve, Solve,
SolveBase, SolveBase,
...@@ -122,22 +121,17 @@ def test_cholesky_grad_indef(): ...@@ -122,22 +121,17 @@ def test_cholesky_grad_indef():
@pytest.mark.slow @pytest.mark.slow
def test_cholesky_and_cholesky_grad_shape(): def test_cholesky_shape():
rng = np.random.default_rng(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
x = matrix() x = matrix()
for l in (cholesky(x), Cholesky(lower=True)(x), Cholesky(lower=False)(x)): for l in (cholesky(x), Cholesky(lower=True)(x), Cholesky(lower=False)(x)):
f_chol = pytensor.function([x], l.shape) f_chol = pytensor.function([x], l.shape)
g = pytensor.gradient.grad(l.sum(), x)
f_cholgrad = pytensor.function([x], g.shape)
topo_chol = f_chol.maker.fgraph.toposort() topo_chol = f_chol.maker.fgraph.toposort()
topo_cholgrad = f_cholgrad.maker.fgraph.toposort()
if config.mode != "FAST_COMPILE": if config.mode != "FAST_COMPILE":
assert sum(node.op.__class__ == Cholesky for node in topo_chol) == 0 assert sum(node.op.__class__ == Cholesky for node in topo_chol) == 0
assert sum(node.op.__class__ == CholeskyGrad for node in topo_cholgrad) == 0
for shp in [2, 3, 5]: for shp in [2, 3, 5]:
m = np.cov(rng.standard_normal((shp, shp + 10))).astype(config.floatX) m = np.cov(rng.standard_normal((shp, shp + 10))).astype(config.floatX)
np.testing.assert_equal(f_chol(m), (shp, shp)) np.testing.assert_equal(f_chol(m), (shp, shp))
np.testing.assert_equal(f_cholgrad(m), (shp, shp))
def test_eigvalsh(): def test_eigvalsh():
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论