提交 51b3885f authored 作者: Ricardo Vieira's avatar Ricardo Vieira 提交者: Ricardo Vieira

Numba sparse: Remove codebase xfails

上级 1977c2c0
......@@ -3,14 +3,13 @@ import pytest
import scipy as sp
import pytensor.tensor as pt
from pytensor.compile import UnusedInputError, get_default_mode, get_mode
from pytensor.compile import UnusedInputError, get_mode
from pytensor.compile.function import function, pfunc
from pytensor.compile.function.pfunc import rebuild_collect_shared
from pytensor.compile.io import In
from pytensor.compile.sharedvalue import shared
from pytensor.configdefaults import config
from pytensor.graph.utils import MissingInputError
from pytensor.link.numba import NumbaLinker
from pytensor.sparse import SparseTensorType
from pytensor.tensor.math import sum as pt_sum
from pytensor.tensor.type import (
......@@ -766,10 +765,6 @@ class TestAliasingRules:
# rule #2 reading back from pytensor-managed memory
assert not np.may_share_memory(A.get_value(borrow=False), data_of(A))
@pytest.mark.xfail(
condition=isinstance(get_default_mode().linker, NumbaLinker),
reason="Numba does not support Sparse Ops yet",
)
def test_sparse_input_aliasing_affecting_inplace_operations(self):
# Note: to trigger this bug with pytensor rev 4586:2bc6fc7f218b,
# you need to make in inputs mutable (so that inplace
......
import pytest
from pytensor.compile import get_default_mode
from pytensor.link.numba import NumbaLinker
if isinstance(get_default_mode().linker, NumbaLinker):
pytest.skip(
reason="Numba does not support Sparse Ops yet",
allow_module_level=True,
)
......@@ -8,7 +8,9 @@ import scipy.sparse as scipy_sparse
import pytensor
import pytensor.sparse.math as psm
import pytensor.tensor as pt
from pytensor.compile import get_default_mode
from pytensor.configdefaults import config
from pytensor.link.numba import NumbaLinker
from pytensor.scalar import upcast
from pytensor.sparse.basic import (
CSR,
......@@ -427,33 +429,54 @@ class TestStructuredDot:
)
f(kernvals, imvals)
def test_dot_sparse_sparse(self):
@pytest.mark.parametrize(
"sparse_format_a",
(
"csc",
"csr",
pytest.param(
"bsr",
marks=pytest.mark.xfail(
isinstance(get_default_mode().linker, NumbaLinker),
reason="Numba does not support bsr",
),
),
),
)
@pytest.mark.parametrize(
"sparse_format_b",
(
"csc",
"csr",
pytest.param(
"bsr",
marks=pytest.mark.xfail(
isinstance(get_default_mode().linker, NumbaLinker),
reason="Numba does not support bsr",
),
),
),
)
def test_dot_sparse_sparse(self, sparse_format_a, sparse_format_b):
sparse_dtype = "float64"
sp_mat = {
"csc": scipy_sparse.csc_matrix,
"csr": scipy_sparse.csr_matrix,
"bsr": scipy_sparse.csr_matrix,
}
for sparse_format_a in ["csc", "csr", "bsr"]:
for sparse_format_b in ["csc", "csr", "bsr"]:
a = SparseTensorType(sparse_format_a, dtype=sparse_dtype)()
b = SparseTensorType(sparse_format_b, dtype=sparse_dtype)()
d = pt.dot(a, b)
f = pytensor.function([a, b], d)
for M, N, K, nnz in [
(4, 3, 2, 3),
(40, 30, 20, 3),
(40, 30, 20, 30),
(400, 3000, 200, 6000),
]:
a_val = sp_mat[sparse_format_a](
random_lil((M, N), sparse_dtype, nnz)
)
b_val = sp_mat[sparse_format_b](
random_lil((N, K), sparse_dtype, nnz)
)
f(a_val, b_val)
a = SparseTensorType(sparse_format_a, dtype=sparse_dtype)()
b = SparseTensorType(sparse_format_b, dtype=sparse_dtype)()
d = pt.dot(a, b)
f = pytensor.function([a, b], d)
for M, N, K, nnz in [
(4, 3, 2, 3),
(40, 30, 20, 3),
(40, 30, 20, 30),
(400, 3000, 200, 6000),
]:
a_val = sp_mat[sparse_format_a](random_lil((M, N), sparse_dtype, nnz))
b_val = sp_mat[sparse_format_b](random_lil((N, K), sparse_dtype, nnz))
f(a_val, b_val) # TODO: Test something
def test_tensor_dot_types(self):
x = csc_matrix("x")
......@@ -775,7 +798,7 @@ class TestUsmm:
f_b_out = f_b(z_data, a_data, x_data, y_data)
# To make it easier to check the toposort
mode = pytensor.compile.mode.get_default_mode().excluding("fusion")
mode = get_default_mode().excluding("fusion")
if inplace:
updates = [(z, z - a * psm.dot(x, y))]
......@@ -815,8 +838,7 @@ class TestUsmm:
y.type.dtype == up
and format1 == "csc"
and format2 == "dense"
and not fast_compile
and pytensor.config.cxx
and "cxx_only" not in f_a.maker.linker.incompatible_rewrites
and up in ("float32", "float64")
):
# The op UsmmCscDense should be inserted
......
......@@ -7,6 +7,7 @@ import pytensor.sparse.math as smath
from pytensor import sparse
from pytensor.compile.mode import Mode, get_default_mode
from pytensor.configdefaults import config
from pytensor.link.numba import NumbaLinker
from pytensor.sparse.rewriting import SamplingDotCSR, sd_csc
from pytensor.tensor.basic import as_tensor_variable
from pytensor.tensor.math import sum as pt_sum
......@@ -68,6 +69,10 @@ def test_local_csm_grad_c():
@pytest.mark.skipif(
not pytensor.config.cxx, reason="G++ not available, so we need to skip this test."
)
@pytest.mark.skipif(
isinstance(get_default_mode().linker, NumbaLinker),
reason="This is a C-specific test",
)
def test_local_mul_s_d():
for sp_format in sparse.sparse_formats:
inputs = [getattr(pytensor.sparse, sp_format + "_matrix")(), matrix()]
......@@ -83,6 +88,10 @@ def test_local_mul_s_d():
@pytest.mark.skipif(
not pytensor.config.cxx, reason="G++ not available, so we need to skip this test."
)
@pytest.mark.skipif(
isinstance(get_default_mode().linker, NumbaLinker),
reason="This is a C-specific test",
)
def test_local_mul_s_v():
mode = get_default_mode()
mode = mode.including("specialize", "local_mul_s_v")
......@@ -101,6 +110,10 @@ def test_local_mul_s_v():
@pytest.mark.skipif(
not pytensor.config.cxx, reason="G++ not available, so we need to skip this test."
)
@pytest.mark.skipif(
isinstance(get_default_mode().linker, NumbaLinker),
reason="This is a C-specific test",
)
def test_local_structured_add_s_v():
for sp_format in ["csr"]: # Not implemented for other format
inputs = [getattr(pytensor.sparse, sp_format + "_matrix")(), vector()]
......@@ -116,6 +129,10 @@ def test_local_structured_add_s_v():
@pytest.mark.skipif(
not pytensor.config.cxx, reason="G++ not available, so we need to skip this test."
)
@pytest.mark.skipif(
isinstance(get_default_mode().linker, NumbaLinker),
reason="This is a C-specific test",
)
def test_local_sampling_dot_csr():
for sp_format in ["csr"]: # Not implemented for other format
inputs = [
......
......@@ -4,10 +4,9 @@ import scipy.sparse
import pytensor
import pytensor.tensor as pt
from pytensor.compile.mode import OPT_FAST_RUN, Mode, get_default_mode
from pytensor.compile.mode import OPT_FAST_RUN, Mode
from pytensor.graph import vectorize_graph
from pytensor.graph.basic import Constant, equal_computations
from pytensor.link.numba import NumbaLinker
from pytensor.raise_op import Assert, CheckAndRaise, assert_op
from pytensor.scalar.basic import ScalarType, float64
from pytensor.sparse import as_sparse_variable
......@@ -182,10 +181,6 @@ class TestCheckAndRaiseInferShape(utt.InferShapeTester):
)
@pytest.mark.xfail(
condition=isinstance(get_default_mode().linker, NumbaLinker),
reason="Numba does not support Sparse Ops yet",
)
def test_CheckAndRaise_sparse_variable():
check_and_raise = CheckAndRaise(ValueError, "sparse_check")
......
......@@ -7,8 +7,6 @@ import scipy
import pytensor
import pytensor.typed_list
from pytensor import sparse
from pytensor.compile import get_default_mode
from pytensor.link.numba import NumbaLinker
from pytensor.tensor.type import (
TensorType,
integer_dtypes,
......@@ -454,10 +452,6 @@ class TestIndex:
assert f([[x, y], [x, y, y]], [x, y]) == 0
@pytest.mark.xfail(
condition=isinstance(get_default_mode().linker, NumbaLinker),
reason="Numba does not support Sparse Ops yet",
)
def test_sparse(self):
mySymbolicSparseList = TypedListType(
sparse.SparseTensorType("csr", pytensor.config.floatX)
......@@ -525,10 +519,6 @@ class TestCount:
assert f([[x, y], [x, y, y]], [x, y]) == 1
@pytest.mark.xfail(
condition=isinstance(get_default_mode().linker, NumbaLinker),
reason="Numba does not support Sparse Ops yet",
)
def test_sparse(self):
mySymbolicSparseList = TypedListType(
sparse.SparseTensorType("csr", pytensor.config.floatX)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论