提交 1d825dd6 authored 作者: Rob Zinkov's avatar Rob Zinkov 提交者: Ricardo Vieira

Add logsumexp to xtensor

上级 4312d8c6
......@@ -512,6 +512,11 @@ def softmax(x, dim=None):
return exp_x / exp_x.sum(dim=dim)
def logsumexp(x, dim=None):
"""Compute the logsumexp of an XTensorVariable along a specified dimension."""
return log(exp(x).sum(dim=dim))
class Dot(XOp):
"""Matrix multiplication between two XTensorVariables.
......
......@@ -7,6 +7,7 @@ pytest.importorskip("xarray")
import inspect
import numpy as np
from scipy.special import logsumexp as scipy_logsumexp
from xarray import DataArray
import pytensor.scalar as ps
......@@ -14,7 +15,7 @@ import pytensor.xtensor.math as pxm
from pytensor import function
from pytensor.scalar import ScalarOp
from pytensor.xtensor.basic import rename
from pytensor.xtensor.math import add, exp
from pytensor.xtensor.math import add, exp, logsumexp
from pytensor.xtensor.type import xtensor
from tests.xtensor.util import xr_arange_like, xr_assert_allclose, xr_function
......@@ -152,6 +153,28 @@ def test_cast():
yc64.astype("float64")
@pytest.mark.parametrize(
["shape", "dims", "axis"],
[
((3, 4), ("a", "b"), None),
((3, 4), "a", 0),
((3, 4), "b", 1),
],
)
def test_logsumexp(shape, dims, axis):
scipy_inp = np.zeros(shape)
scipy_out = scipy_logsumexp(scipy_inp, axis=axis)
pytensor_inp = DataArray(scipy_inp, dims=("a", "b"))
f = function([], logsumexp(pytensor_inp, dim=dims))
pytensor_out = f()
np.testing.assert_array_almost_equal(
pytensor_out,
scipy_out,
)
def test_dot():
"""Test basic dot product operations."""
# Test matrix-vector dot product (with multiple-letter dim names)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论