提交 a602a8ec authored 作者: ricardoV94's avatar ricardoV94 提交者: Ricardo Vieira

XFAIL/SKIP Sparse tests

上级 0775880c
import numpy as np import numpy as np
import pytest import pytest
import scipy as sp
import pytensor.tensor as pt import pytensor.tensor as pt
from pytensor.compile import UnusedInputError, get_mode from pytensor.compile import UnusedInputError, get_default_mode, get_mode
from pytensor.compile.function import function, pfunc from pytensor.compile.function import function, pfunc
from pytensor.compile.function.pfunc import rebuild_collect_shared from pytensor.compile.function.pfunc import rebuild_collect_shared
from pytensor.compile.io import In from pytensor.compile.io import In
from pytensor.compile.sharedvalue import shared from pytensor.compile.sharedvalue import shared
from pytensor.configdefaults import config from pytensor.configdefaults import config
from pytensor.graph.utils import MissingInputError from pytensor.graph.utils import MissingInputError
from pytensor.link.numba import NumbaLinker
from pytensor.sparse import SparseTensorType
from pytensor.tensor.math import sum as pt_sum from pytensor.tensor.math import sum as pt_sum
from pytensor.tensor.type import ( from pytensor.tensor.type import (
bscalar, bscalar,
...@@ -763,18 +766,18 @@ class TestAliasingRules: ...@@ -763,18 +766,18 @@ class TestAliasingRules:
# rule #2 reading back from pytensor-managed memory # rule #2 reading back from pytensor-managed memory
assert not np.may_share_memory(A.get_value(borrow=False), data_of(A)) assert not np.may_share_memory(A.get_value(borrow=False), data_of(A))
@pytest.mark.xfail(
condition=isinstance(get_default_mode().linker, NumbaLinker),
reason="Numba does not support Sparse Ops yet",
)
def test_sparse_input_aliasing_affecting_inplace_operations(self): def test_sparse_input_aliasing_affecting_inplace_operations(self):
sp = pytest.importorskip("scipy", minversion="0.7.0")
from pytensor import sparse
# Note: to trigger this bug with pytensor rev 4586:2bc6fc7f218b, # Note: to trigger this bug with pytensor rev 4586:2bc6fc7f218b,
# you need to make in inputs mutable (so that inplace # you need to make in inputs mutable (so that inplace
# operations are used) and to break the elemwise composition # operations are used) and to break the elemwise composition
# with some non-elemwise op (here dot) # with some non-elemwise op (here dot)
x = sparse.SparseTensorType("csc", dtype="float64")() x = SparseTensorType("csc", dtype="float64")()
y = sparse.SparseTensorType("csc", dtype="float64")() y = SparseTensorType("csc", dtype="float64")()
f = function([In(x, mutable=True), In(y, mutable=True)], (x + y) + (x + y)) f = function([In(x, mutable=True), In(y, mutable=True)], (x + y) + (x + y))
# Test 1. If the same variable is given twice # Test 1. If the same variable is given twice
......
import pytest
from pytensor.compile import get_default_mode
from pytensor.link.numba import NumbaLinker
if isinstance(get_default_mode().linker, NumbaLinker):
pytest.skip(
reason="Numba does not support Sparse Ops yet",
allow_module_level=True,
)
...@@ -8,7 +8,6 @@ import pytensor ...@@ -8,7 +8,6 @@ import pytensor
import pytensor.sparse.math import pytensor.sparse.math
import pytensor.tensor as pt import pytensor.tensor as pt
from pytensor import sparse from pytensor import sparse
from pytensor.compile.function import function
from pytensor.compile.io import In from pytensor.compile.io import In
from pytensor.configdefaults import config from pytensor.configdefaults import config
from pytensor.gradient import GradientError from pytensor.gradient import GradientError
...@@ -87,19 +86,6 @@ def as_sparse_format(data, format): ...@@ -87,19 +86,6 @@ def as_sparse_format(data, format):
raise NotImplementedError() raise NotImplementedError()
def eval_outputs(outputs):
return function([], outputs)()[0]
# scipy 0.17 will return sparse values in all cases while previous
# version sometimes wouldn't. This will make everything dense so that
# we can use assert_allclose.
def as_ndarray(val):
if hasattr(val, "toarray"):
return val.toarray()
return val
def random_lil(shape, dtype, nnz): def random_lil(shape, dtype, nnz):
rval = scipy_sparse.lil_matrix(shape, dtype=dtype) rval = scipy_sparse.lil_matrix(shape, dtype=dtype)
huge = 2**30 huge = 2**30
...@@ -355,7 +341,7 @@ class TestTranspose: ...@@ -355,7 +341,7 @@ class TestTranspose:
assert ta.type.dtype == "float64", ta.type.dtype assert ta.type.dtype == "float64", ta.type.dtype
assert ta.type.format == "csr", ta.type.format assert ta.type.format == "csr", ta.type.format
vta = eval_outputs([ta]) vta = ta.eval()
assert vta.shape == (3, 5) assert vta.shape == (3, 5)
def test_transpose_csr(self): def test_transpose_csr(self):
...@@ -367,7 +353,7 @@ class TestTranspose: ...@@ -367,7 +353,7 @@ class TestTranspose:
assert ta.type.dtype == "float64", ta.type.dtype assert ta.type.dtype == "float64", ta.type.dtype
assert ta.type.format == "csc", ta.type.format assert ta.type.format == "csc", ta.type.format
vta = eval_outputs([ta]) vta = ta.eval()
assert vta.shape == (3, 5) assert vta.shape == (3, 5)
...@@ -544,13 +530,13 @@ class TestConversion: ...@@ -544,13 +530,13 @@ class TestConversion:
test_val = np.random.random((5,)).astype(config.floatX) test_val = np.random.random((5,)).astype(config.floatX)
a = pt.as_tensor_variable(test_val) a = pt.as_tensor_variable(test_val)
s = csc_from_dense(a) s = csc_from_dense(a)
val = eval_outputs([s]) val = s.eval()
assert str(val.dtype) == config.floatX assert str(val.dtype) == config.floatX
assert val.format == "csc" assert val.format == "csc"
a = pt.as_tensor_variable(test_val) a = pt.as_tensor_variable(test_val)
s = csr_from_dense(a) s = csr_from_dense(a)
val = eval_outputs([s]) val = s.eval()
assert str(val.dtype) == config.floatX assert str(val.dtype) == config.floatX
assert val.format == "csr" assert val.format == "csr"
...@@ -573,7 +559,7 @@ class TestConversion: ...@@ -573,7 +559,7 @@ class TestConversion:
s = t(scipy_sparse.identity(5)) s = t(scipy_sparse.identity(5))
s = as_sparse_variable(s) s = as_sparse_variable(s)
d = dense_from_sparse(s) d = dense_from_sparse(s)
val = eval_outputs([d]) val = d.eval()
assert str(val.dtype) == s.dtype assert str(val.dtype) == s.dtype
assert np.all(val[0] == [1, 0, 0, 0, 0]) assert np.all(val[0] == [1, 0, 0, 0, 0])
...@@ -583,7 +569,7 @@ class TestConversion: ...@@ -583,7 +569,7 @@ class TestConversion:
s = t(scipy_sparse.identity(5)) s = t(scipy_sparse.identity(5))
s = as_sparse_variable(s) s = as_sparse_variable(s)
d = s.toarray() d = s.toarray()
val = eval_outputs([d]) val = d.eval()
assert str(val.dtype) == s.dtype assert str(val.dtype) == s.dtype
assert np.all(val[0] == [1, 0, 0, 0, 0]) assert np.all(val[0] == [1, 0, 0, 0, 0])
......
...@@ -54,7 +54,6 @@ from pytensor.tensor.type import ( ...@@ -54,7 +54,6 @@ from pytensor.tensor.type import (
) )
from tests import unittest_tools as utt from tests import unittest_tools as utt
from tests.sparse.test_basic import ( from tests.sparse.test_basic import (
as_ndarray,
as_sparse_format, as_sparse_format,
random_lil, random_lil,
sparse_random_inputs, sparse_random_inputs,
...@@ -1020,7 +1019,7 @@ class TestSamplingDot(utt.InferShapeTester): ...@@ -1020,7 +1019,7 @@ class TestSamplingDot(utt.InferShapeTester):
tested = f(*self.a) tested = f(*self.a)
x, y, p = self.a x, y, p = self.a
expected = p.multiply(np.dot(x, y.T)) expected = p.multiply(np.dot(x, y.T))
utt.assert_allclose(as_ndarray(expected), tested.toarray()) utt.assert_allclose(expected.toarray(), tested.toarray())
assert tested.format == "csr" assert tested.format == "csr"
assert tested.dtype == expected.dtype assert tested.dtype == expected.dtype
...@@ -1030,7 +1029,7 @@ class TestSamplingDot(utt.InferShapeTester): ...@@ -1030,7 +1029,7 @@ class TestSamplingDot(utt.InferShapeTester):
tested = f(*a2) tested = f(*a2)
x, y, p = a2 x, y, p = a2
expected = p.multiply(np.dot(x, y.T)) expected = p.multiply(np.dot(x, y.T))
utt.assert_allclose(as_ndarray(expected), tested.toarray()) utt.assert_allclose(expected.toarray(), tested.toarray())
assert tested.format == "csr" assert tested.format == "csr"
assert tested.dtype == expected.dtype assert tested.dtype == expected.dtype
...@@ -1098,7 +1097,7 @@ class TestStructuredAddSV: ...@@ -1098,7 +1097,7 @@ class TestStructuredAddSV:
out = f(spmat, mat) out = f(spmat, mat)
utt.assert_allclose( utt.assert_allclose(
as_ndarray(spones.multiply(spmat + mat)), out.toarray() spones.multiply(spmat + mat).toarray(), out.toarray()
) )
......
...@@ -4,9 +4,10 @@ import scipy.sparse ...@@ -4,9 +4,10 @@ import scipy.sparse
import pytensor import pytensor
import pytensor.tensor as pt import pytensor.tensor as pt
from pytensor.compile.mode import OPT_FAST_RUN, Mode from pytensor.compile.mode import OPT_FAST_RUN, Mode, get_default_mode
from pytensor.graph import vectorize_graph from pytensor.graph import vectorize_graph
from pytensor.graph.basic import Constant, equal_computations from pytensor.graph.basic import Constant, equal_computations
from pytensor.link.numba import NumbaLinker
from pytensor.raise_op import Assert, CheckAndRaise, assert_op from pytensor.raise_op import Assert, CheckAndRaise, assert_op
from pytensor.scalar.basic import ScalarType, float64 from pytensor.scalar.basic import ScalarType, float64
from pytensor.sparse import as_sparse_variable from pytensor.sparse import as_sparse_variable
...@@ -181,6 +182,10 @@ class TestCheckAndRaiseInferShape(utt.InferShapeTester): ...@@ -181,6 +182,10 @@ class TestCheckAndRaiseInferShape(utt.InferShapeTester):
) )
@pytest.mark.xfail(
condition=isinstance(get_default_mode().linker, NumbaLinker),
reason="Numba does not support Sparse Ops yet",
)
def test_CheckAndRaise_sparse_variable(): def test_CheckAndRaise_sparse_variable():
check_and_raise = CheckAndRaise(ValueError, "sparse_check") check_and_raise = CheckAndRaise(ValueError, "sparse_check")
......
...@@ -7,6 +7,8 @@ import scipy ...@@ -7,6 +7,8 @@ import scipy
import pytensor import pytensor
import pytensor.typed_list import pytensor.typed_list
from pytensor import sparse from pytensor import sparse
from pytensor.compile import get_default_mode
from pytensor.link.numba import NumbaLinker
from pytensor.tensor.type import ( from pytensor.tensor.type import (
TensorType, TensorType,
integer_dtypes, integer_dtypes,
...@@ -452,6 +454,10 @@ class TestIndex: ...@@ -452,6 +454,10 @@ class TestIndex:
assert f([[x, y], [x, y, y]], [x, y]) == 0 assert f([[x, y], [x, y, y]], [x, y]) == 0
@pytest.mark.xfail(
condition=isinstance(get_default_mode().linker, NumbaLinker),
reason="Numba does not support Sparse Ops yet",
)
def test_sparse(self): def test_sparse(self):
mySymbolicSparseList = TypedListType( mySymbolicSparseList = TypedListType(
sparse.SparseTensorType("csr", pytensor.config.floatX) sparse.SparseTensorType("csr", pytensor.config.floatX)
...@@ -519,6 +525,10 @@ class TestCount: ...@@ -519,6 +525,10 @@ class TestCount:
assert f([[x, y], [x, y, y]], [x, y]) == 1 assert f([[x, y], [x, y, y]], [x, y]) == 1
@pytest.mark.xfail(
condition=isinstance(get_default_mode().linker, NumbaLinker),
reason="Numba does not support Sparse Ops yet",
)
def test_sparse(self): def test_sparse(self):
mySymbolicSparseList = TypedListType( mySymbolicSparseList = TypedListType(
sparse.SparseTensorType("csr", pytensor.config.floatX) sparse.SparseTensorType("csr", pytensor.config.floatX)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论