提交 40313aac authored 作者: Brandon T. Willard's avatar Brandon T. Willard 提交者: Brandon T. Willard

Remove SciPy conditional logic

上级 2c91b5a3
......@@ -17,7 +17,7 @@ from aesara.tensor.math import Dot, Prod, dot, log
from aesara.tensor.math import pow as aet_pow
from aesara.tensor.math import prod
from aesara.tensor.nlinalg import MatrixInverse, det, matrix_inverse, trace
from aesara.tensor.slinalg import Cholesky, Solve, cholesky, imported_scipy, solve
from aesara.tensor.slinalg import Cholesky, Solve, cholesky, solve
logger = logging.getLogger(__name__)
......@@ -229,8 +229,6 @@ def transinv_to_invtrans(fgraph, node):
@register_stabilize
@local_optimizer([Dot, Dot22])
def inv_as_solve(fgraph, node):
if not imported_scipy:
return False
if isinstance(node.op, (Dot, Dot22)):
l, r = node.inputs
if l.owner and l.owner.op == matrix_inverse:
......
......@@ -7,6 +7,8 @@ As SciPy is not always available, we treat them separately.
import os
import numpy as np
import scipy.special
import scipy.stats
from aesara.configdefaults import config
from aesara.gradient import grad_not_implemented
......@@ -25,26 +27,11 @@ from aesara.scalar.basic import (
)
imported_scipy_special = False
try:
import scipy.special
import scipy.stats
imported_scipy_special = True
# Importing scipy.special may raise ValueError.
# See http://projects.scipy.org/scipy/ticket/1739
except (ImportError, ValueError):
pass
class Erf(UnaryScalarOp):
nfunc_spec = ("scipy.special.erf", 1, 1)
def impl(self, x):
if imported_scipy_special:
return scipy.special.erf(x)
else:
super().impl(x)
return scipy.special.erf(x)
def L_op(self, inputs, outputs, grads):
(x,) = inputs
......@@ -78,10 +65,7 @@ class Erfc(UnaryScalarOp):
nfunc_spec = ("scipy.special.erfc", 1, 1)
def impl(self, x):
if imported_scipy_special:
return scipy.special.erfc(x)
else:
super().impl(x)
return scipy.special.erfc(x)
def L_op(self, inputs, outputs, grads):
(x,) = inputs
......@@ -130,10 +114,7 @@ class Erfcx(UnaryScalarOp):
nfunc_spec = ("scipy.special.erfcx", 1, 1)
def impl(self, x):
if imported_scipy_special:
return scipy.special.erfcx(x)
else:
super().impl(x)
return scipy.special.erfcx(x)
def L_op(self, inputs, outputs, grads):
(x,) = inputs
......@@ -195,10 +176,7 @@ class Erfinv(UnaryScalarOp):
nfunc_spec = ("scipy.special.erfinv", 1, 1)
def impl(self, x):
if imported_scipy_special:
return scipy.special.erfinv(x)
else:
super().impl(x)
return scipy.special.erfinv(x)
def L_op(self, inputs, outputs, grads):
(x,) = inputs
......@@ -232,10 +210,7 @@ class Erfcinv(UnaryScalarOp):
nfunc_spec = ("scipy.special.erfcinv", 1, 1)
def impl(self, x):
if imported_scipy_special:
return scipy.special.erfcinv(x)
else:
super().impl(x)
return scipy.special.erfcinv(x)
def L_op(self, inputs, outputs, grads):
(x,) = inputs
......@@ -273,10 +248,7 @@ class Gamma(UnaryScalarOp):
return scipy.special.gamma(x)
def impl(self, x):
if imported_scipy_special:
return Gamma.st_impl(x)
else:
super().impl(x)
return Gamma.st_impl(x)
def L_op(self, inputs, outputs, gout):
(x,) = inputs
......@@ -315,10 +287,7 @@ class GammaLn(UnaryScalarOp):
return scipy.special.gammaln(x)
def impl(self, x):
if imported_scipy_special:
return GammaLn.st_impl(x)
else:
super().impl(x)
return GammaLn.st_impl(x)
def L_op(self, inputs, outputs, grads):
(x,) = inputs
......@@ -362,10 +331,7 @@ class Psi(UnaryScalarOp):
return scipy.special.psi(x)
def impl(self, x):
if imported_scipy_special:
return Psi.st_impl(x)
else:
super().impl(x)
return Psi.st_impl(x)
def L_op(self, inputs, outputs, grads):
(x,) = inputs
......@@ -456,10 +422,7 @@ class TriGamma(UnaryScalarOp):
return scipy.special.polygamma(1, x)
def impl(self, x):
if imported_scipy_special:
return TriGamma.st_impl(x)
else:
super().impl(x)
return TriGamma.st_impl(x)
def grad(self, inputs, outputs_gradients):
raise NotImplementedError()
......@@ -545,10 +508,7 @@ class Chi2SF(BinaryScalarOp):
return scipy.stats.chi2.sf(x, k)
def impl(self, x, k):
if imported_scipy_special:
return Chi2SF.st_impl(x, k)
else:
super().impl(x, k)
return Chi2SF.st_impl(x, k)
def c_support_code(self, **kwargs):
with open(os.path.join(os.path.dirname(__file__), "c_code", "gamma.c")) as f:
......@@ -589,10 +549,7 @@ class GammaInc(BinaryScalarOp):
return scipy.special.gammainc(k, x)
def impl(self, k, x):
if imported_scipy_special:
return GammaInc.st_impl(k, x)
else:
super().impl(k, x)
return GammaInc.st_impl(k, x)
def c_support_code(self, **kwargs):
with open(os.path.join(os.path.dirname(__file__), "c_code", "gamma.c")) as f:
......@@ -633,10 +590,7 @@ class GammaIncC(BinaryScalarOp):
return scipy.special.gammaincc(x, k)
def impl(self, k, x):
if imported_scipy_special:
return GammaIncC.st_impl(k, x)
else:
super().impl(k, x)
return GammaIncC.st_impl(k, x)
def c_support_code(self, **kwargs):
with open(os.path.join(os.path.dirname(__file__), "c_code", "gamma.c")) as f:
......@@ -677,10 +631,7 @@ class GammaU(BinaryScalarOp):
return scipy.special.gammaincc(k, x) * scipy.special.gamma(k)
def impl(self, k, x):
if imported_scipy_special:
return GammaU.st_impl(k, x)
else:
super().impl(k, x)
return GammaU.st_impl(k, x)
def c_support_code(self, **kwargs):
with open(os.path.join(os.path.dirname(__file__), "c_code", "gamma.c")) as f:
......@@ -721,10 +672,7 @@ class GammaL(BinaryScalarOp):
return scipy.special.gammainc(k, x) * scipy.special.gamma(k)
def impl(self, k, x):
if imported_scipy_special:
return GammaL.st_impl(k, x)
else:
super().impl(k, x)
return GammaL.st_impl(k, x)
def c_support_code(self, **kwargs):
with open(os.path.join(os.path.dirname(__file__), "c_code", "gamma.c")) as f:
......@@ -765,10 +713,7 @@ class Jv(BinaryScalarOp):
return scipy.special.jv(v, x)
def impl(self, v, x):
if imported_scipy_special:
return self.st_impl(v, x)
else:
super().impl(v, x)
return self.st_impl(v, x)
def grad(self, inputs, grads):
v, x = inputs
......@@ -794,10 +739,7 @@ class J1(UnaryScalarOp):
return scipy.special.j1(x)
def impl(self, x):
if imported_scipy_special:
return self.st_impl(x)
else:
super().impl(x)
return self.st_impl(x)
def grad(self, inputs, grads):
(x,) = inputs
......@@ -828,10 +770,7 @@ class J0(UnaryScalarOp):
return scipy.special.j0(x)
def impl(self, x):
if imported_scipy_special:
return self.st_impl(x)
else:
super().impl(x)
return self.st_impl(x)
def grad(self, inp, grads):
(x,) = inp
......@@ -862,10 +801,7 @@ class Iv(BinaryScalarOp):
return scipy.special.iv(v, x)
def impl(self, v, x):
if imported_scipy_special:
return self.st_impl(v, x)
else:
super().impl(v, x)
return self.st_impl(v, x)
def grad(self, inputs, grads):
v, x = inputs
......@@ -891,10 +827,7 @@ class I1(UnaryScalarOp):
return scipy.special.i1(x)
def impl(self, x):
if imported_scipy_special:
return self.st_impl(x)
else:
super().impl(x)
return self.st_impl(x)
def grad(self, inputs, grads):
(x,) = inputs
......@@ -917,10 +850,7 @@ class I0(UnaryScalarOp):
return scipy.special.i0(x)
def impl(self, x):
if imported_scipy_special:
return self.st_impl(x)
else:
super().impl(x)
return self.st_impl(x)
def grad(self, inp, grads):
(x,) = inp
......@@ -939,10 +869,7 @@ class Sigmoid(UnaryScalarOp):
nfunc_spec = ("scipy.special.expit", 1, 1)
def impl(self, x):
if imported_scipy_special:
return scipy.special.expit(x)
else:
super().impl(x)
return scipy.special.expit(x)
def grad(self, inp, grads):
(x,) = inp
......
import numpy as np
try:
import scipy.sparse
imported_scipy = True
except ImportError:
imported_scipy = False
import scipy.sparse
import aesara
from aesara.graph.type import Type
......@@ -54,12 +46,11 @@ class SparseType(Type):
"""
if imported_scipy:
format_cls = {
"csr": scipy.sparse.csr_matrix,
"csc": scipy.sparse.csc_matrix,
"bsr": scipy.sparse.bsr_matrix,
}
format_cls = {
"csr": scipy.sparse.csr_matrix,
"csc": scipy.sparse.csc_matrix,
"bsr": scipy.sparse.bsr_matrix,
}
dtype_set = {
"int8",
"int16",
......@@ -81,10 +72,6 @@ class SparseType(Type):
Constant = None
def __init__(self, format, dtype):
if not imported_scipy:
raise Exception(
"You can't make SparseType object as SciPy" " is not available."
)
dtype = str(dtype)
if dtype in self.dtype_set:
self.dtype = dtype
......
......@@ -15,6 +15,8 @@ except ImportError:
import warnings
import numpy as np
from scipy.signal.signaltools import _bvalfromboundary, _valfrommode, convolve
from scipy.signal.sigtools import _convolve2d
import aesara
from aesara import tensor as aet
......@@ -31,15 +33,6 @@ from aesara.tensor.exceptions import NotScalarConstantError
from aesara.tensor.var import TensorConstant, TensorVariable
try:
from scipy.signal.signaltools import _bvalfromboundary, _valfrommode, convolve
from scipy.signal.sigtools import _convolve2d
imported_scipy_signal = True
except ImportError:
imported_scipy_signal = False
__docformat__ = "restructuredtext en"
_logger = logging.getLogger("aesara.tensor.nnet.abstract_conv")
......@@ -2342,11 +2335,6 @@ class BaseAbstractConv(Op):
"""
Basic slow Python 2D or 3D convolution for DebugMode
"""
if not imported_scipy_signal:
raise NotImplementedError(
"AbstractConv perform requires the python package"
" for scipy.signal to be installed."
)
if not (mode in ("valid", "full")):
raise ValueError(
"invalid mode {}, which must be either "
......
......@@ -13,6 +13,8 @@ import logging
import warnings
import numpy as np
from scipy.signal.signaltools import _bvalfromboundary, _valfrommode
from scipy.signal.sigtools import _convolve2d
import aesara
from aesara.graph.basic import Apply
......@@ -28,16 +30,6 @@ from aesara.tensor.nnet.abstract_conv import get_conv_output_shape, get_conv_sha
from aesara.tensor.type import discrete_dtypes, tensor
try:
# TODO: move these back out to global scope when they no longer
# cause an atexit error
from scipy.signal.signaltools import _bvalfromboundary, _valfrommode
from scipy.signal.sigtools import _convolve2d
imported_scipy_signal = True
except ImportError:
imported_scipy_signal = False
__docformat__ = "restructuredtext en"
_logger = logging.getLogger("aesara.tensor.nnet.conv")
......@@ -808,15 +800,6 @@ class ConvOp(OpenMPOp):
"""
img2d, filtersflipped = inp
(z,) = out
if not imported_scipy_signal:
raise aesara.graph.utils.MethodNotDefined(
"c_headers",
type(self),
self.__class__.__name__,
"Need the python package for scipy.signal to be installed "
"for the python implementation. You can use the C"
" implementation instead.",
)
# TODO: move these back out to global scope when they no longer
# cause an atexit error
......
......@@ -2,15 +2,7 @@ import logging
import warnings
import numpy as np
try:
import scipy.linalg
imported_scipy = True
except ImportError:
# some ops (e.g. Cholesky, Solve, A_Xinv_b) won't work
imported_scipy = False
import scipy.linalg
import aesara.tensor
import aesara.tensor.basic as aet
......@@ -69,9 +61,6 @@ class Cholesky(Op):
return [shapes[0]]
def make_node(self, x):
assert (
imported_scipy
), "Scipy not available. Scipy is needed for the Cholesky op"
x = as_tensor_variable(x)
assert x.ndim == 2
return Apply(self, [x], [x.type()])
......@@ -233,7 +222,6 @@ class Solve(Op):
return "Solve{%s}" % str(self._props())
def make_node(self, A, b):
assert imported_scipy, "Scipy not available. Scipy is needed for the Solve op"
A = as_tensor_variable(A)
b = as_tensor_variable(b)
assert A.ndim == 2
......@@ -346,10 +334,6 @@ class Eigvalsh(Op):
self.lower = lower
def make_node(self, a, b):
assert (
imported_scipy
), "Scipy not available. Scipy is needed for the Eigvalsh op"
if b == aesara.tensor.type_other.NoneConst:
a = as_tensor_variable(a)
assert a.ndim == 2
......@@ -412,9 +396,6 @@ class EigvalshGrad(Op):
self.tri1 = lambda a: np.tril(a, -1)
def make_node(self, a, b, gw):
assert (
imported_scipy
), "Scipy not available. Scipy is needed for the GEigvalsh op"
a = as_tensor_variable(a)
b = as_tensor_variable(b)
gw = as_tensor_variable(gw)
......@@ -498,8 +479,6 @@ class Expm(Op):
__props__ = ()
def make_node(self, A):
assert imported_scipy, "Scipy not available. Scipy is needed for the Expm op"
A = as_tensor_variable(A)
assert A.ndim == 2
expm = matrix(dtype=A.dtype)
......@@ -536,7 +515,6 @@ class ExpmGrad(Op):
__props__ = ()
def make_node(self, A, gw):
assert imported_scipy, "Scipy not available. Scipy is needed for the Expm op"
A = as_tensor_variable(A)
assert A.ndim == 2
out = matrix(dtype=A.dtype)
......
......@@ -31,38 +31,6 @@ particular, see the following fixes:
and `impl() methods related to SciPy
<https://github.com/Theano/Theano/commit/08d16c0aa6681fc53d8d0f40342551eb47ff536e>`_.
.. _scipy_ops:
SciPy Ops
=========
We can wrap SciPy functions in Aesara. But SciPy is an optional dependency.
Here is some code that allows the Op to be optional:
.. code-block:: python
try:
import scipy.linalg
imported_scipy = True
except ImportError:
# some ops (e.g. Cholesky, Solve, A_Xinv_b) won't work
imported_scipy = False
class SomeOp(Op):
...
def make_node(self, x):
assert imported_scipy, (
"SciPy not available. SciPy is needed for the SomeOp op.")
...
class TestSomeOp(utt.InferShapeTester):
...
@pytest.mark.skipif(not imported_scipy, reason="SciPy needed for the SomeOp op.")
def test_infer_shape(self):
...
.. _sparse_ops:
Sparse Ops
......
......@@ -23,7 +23,7 @@ from aesara.gpuarray.linalg import (
gpu_svd,
)
from aesara.tensor.nlinalg import SVD, MatrixInverse, QRFull, eigh, matrix_inverse, qr
from aesara.tensor.slinalg import Cholesky, cholesky, imported_scipy
from aesara.tensor.slinalg import Cholesky, cholesky
from aesara.tensor.type import fmatrix, matrix, tensor3, vector
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu
......@@ -183,9 +183,6 @@ class TestGpuCholesky:
chol_A_res = np.array(res)
utt.assert_allclose(chol_A_res, chol_A_val)
@pytest.mark.skipif(
not imported_scipy, reason="SciPy is not enabled, skipping test"
)
def test_gpu_cholesky_opt(self):
A = matrix("A", dtype="float32")
fn = aesara.function([A], cholesky(A), mode=mode_with_gpu)
......@@ -281,9 +278,6 @@ class TestGpuCholesky64:
chol_A_res = np.array(res)
utt.assert_allclose(chol_A_res, chol_A_val)
@pytest.mark.skipif(
not imported_scipy, reason="SciPy is not enabled, skipping test"
)
def test_gpu_cholesky_opt(self):
A = matrix("A", dtype="float64")
fn = aesara.function([A], cholesky(A), mode=mode_with_gpu)
......
......@@ -700,9 +700,7 @@ def test_no_complex():
@utt.assertFailure_fast
@pytest.mark.skipif(
not cusolver_available or not slinalg.imported_scipy, reason="No cuSolver or SciPy"
)
@pytest.mark.skipif(not cusolver_available, reason="No cuSolver or SciPy")
def test_local_lift_solve():
A = fmatrix()
b = fmatrix()
......@@ -722,9 +720,7 @@ def test_local_lift_solve():
assert _check_stack_trace(f_gpu)
@pytest.mark.skipif(
not cusolver_available or not slinalg.imported_scipy, reason="No cuSolver or SciPy"
)
@pytest.mark.skipif(not cusolver_available, reason="No cuSolver or SciPy")
def test_gpu_solve_not_inplace():
A = fmatrix()
b = fmatrix()
......@@ -746,9 +742,7 @@ def test_gpu_solve_not_inplace():
@utt.assertFailure_fast
@pytest.mark.skipif(
not cusolver_available or not slinalg.imported_scipy, reason="No cuSolver or SciPy"
)
@pytest.mark.skipif(not cusolver_available, reason="No cuSolver or SciPy")
def test_local_lift_cholesky():
A = fmatrix()
o = slinalg.cholesky(A)
......@@ -768,9 +762,7 @@ def test_local_lift_cholesky():
utt.assert_allclose(f_cpu(A_val), f_gpu(A_val))
@pytest.mark.skipif(
not cusolver_available or not slinalg.imported_scipy, reason="No cuSolver or SciPy"
)
@pytest.mark.skipif(not cusolver_available, reason="No cuSolver or SciPy")
def test_gpu_cholesky_not_inplace():
A = fmatrix()
A_squared = A ** 2
......
import numpy as np
import numpy.linalg
import pytest
import aesara
from aesara import function
......@@ -11,7 +10,6 @@ from aesara.configdefaults import config
from aesara.sandbox.linalg.ops import Cholesky # PSD_hint,; op class
from aesara.sandbox.linalg.ops import (
Solve,
imported_scipy,
inv_as_solve,
matrix_inverse,
solve,
......@@ -134,8 +132,6 @@ def test_transinv_to_invtrans():
def test_tag_solve_triangular():
if not imported_scipy:
pytest.skip("Scipy needed for the Cholesky op.")
cholesky_lower = Cholesky(lower=True)
cholesky_upper = Cholesky(lower=False)
A = matrix("A")
......@@ -157,8 +153,6 @@ def test_tag_solve_triangular():
def test_matrix_inverse_solve():
if not imported_scipy:
pytest.skip("Scipy needed for the Solve op.")
A = dmatrix("A")
b = dmatrix("b")
node = matrix_inverse(A).dot(b).owner
......
......@@ -1030,7 +1030,7 @@ class TestCorrConv2d(BaseTestConv2d):
@pytest.mark.skipif(
config.cxx == "" or not aesara.tensor.nnet.abstract_conv.imported_scipy_signal,
config.cxx == "",
reason="SciPy and cxx needed",
)
class TestAbstractConvNoOptim(BaseTestConv2d):
......@@ -2050,7 +2050,7 @@ class TestConv2dGrads:
@pytest.mark.skipif(
config.cxx == "" or not aesara.tensor.nnet.abstract_conv.imported_scipy_signal,
config.cxx == "",
reason="SciPy and cxx needed",
)
class TestGroupedConvNoOptim:
......@@ -2260,7 +2260,7 @@ class TestGroupedConvNoOptim:
@pytest.mark.skipif(
config.cxx == "" or not aesara.tensor.nnet.abstract_conv.imported_scipy_signal,
config.cxx == "",
reason="SciPy and cxx needed",
)
class TestGroupedConv3dNoOptim(TestGroupedConvNoOptim):
......@@ -2499,7 +2499,7 @@ class TestSeparableConv:
@pytest.mark.skipif(
config.cxx == "" or not aesara.tensor.nnet.abstract_conv.imported_scipy_signal,
config.cxx == "",
reason="SciPy and cxx needed",
)
class TestUnsharedConv:
......@@ -2750,7 +2750,7 @@ class TestAsymmetricPadding:
border_mode = [((1, 2), (2, 1)), ((1, 1), (0, 3)), ((2, 1), (0, 0))]
@pytest.mark.skipif(
config.cxx == "" or not aesara.tensor.nnet.abstract_conv.imported_scipy_signal,
config.cxx == "",
reason="SciPy and cxx needed",
)
def test_fwd(self):
......@@ -2800,7 +2800,7 @@ class TestAsymmetricPadding:
utt.verify_grad(asymmetric_conv_op, [img, kern], mode=self.mode, eps=1)
@pytest.mark.skipif(
config.cxx == "" or not aesara.tensor.nnet.abstract_conv.imported_scipy_signal,
config.cxx == "",
reason="SciPy and cxx needed",
)
def test_gradweight(self):
......@@ -2857,7 +2857,7 @@ class TestAsymmetricPadding:
utt.verify_grad(conv_gradweight, [img, top], mode=self.mode, eps=1)
@pytest.mark.skipif(
config.cxx == "" or not aesara.tensor.nnet.abstract_conv.imported_scipy_signal,
config.cxx == "",
reason="SciPy and cxx needed",
)
def test_gradinput(self):
......@@ -2934,7 +2934,7 @@ class TestCausalConv:
).astype(config.floatX)
@pytest.mark.skipif(
config.cxx == "" or not aesara.tensor.nnet.abstract_conv.imported_scipy_signal,
config.cxx == "",
reason="SciPy and cxx needed",
)
def test_interface(self):
......
......@@ -14,7 +14,7 @@ from tests import unittest_tools as utt
@pytest.mark.skipif(
not conv.imported_scipy_signal and aesara.config.cxx == "",
aesara.config.cxx == "",
reason="conv2d tests need SciPy or a c++ compiler",
)
class TestConv2D(utt.InferShapeTester):
......
......@@ -3,7 +3,7 @@ import pytest
import aesara
import aesara.tensor as aet
from aesara.tensor.nnet import conv, corr
from aesara.tensor.nnet import corr
from aesara.tensor.type import dmatrix, dtensor3, dtensor4, dvector, tensor4
from tests import unittest_tools as utt
from tests.tensor.nnet.test_abstract_conv import (
......@@ -15,7 +15,7 @@ from tests.tensor.nnet.test_abstract_conv import (
@pytest.mark.skipif(
aesara.config.cxx == "" or not conv.imported_scipy_signal,
aesara.config.cxx == "",
reason="SciPy and cxx needed",
)
class TestCorr2D(utt.InferShapeTester):
......@@ -323,7 +323,7 @@ class TestCorr2D(utt.InferShapeTester):
@pytest.mark.slow
@pytest.mark.skipif(
aesara.config.cxx == "" or not conv.imported_scipy_signal,
aesara.config.cxx == "",
reason="SciPy and cxx needed",
)
def test_infer_shape_forward(self):
......@@ -371,9 +371,7 @@ class TestCorr2D(utt.InferShapeTester):
@pytest.mark.slow
@pytest.mark.skipif(
aesara.config.mode == "FAST_COMPILE"
or aesara.config.cxx == ""
or not conv.imported_scipy_signal,
aesara.config.mode == "FAST_COMPILE" or aesara.config.cxx == "",
reason="SciPy and cxx needed",
)
def test_infer_shape_gradW(self):
......
......@@ -3,14 +3,14 @@ import pytest
import aesara
import aesara.tensor as aet
from aesara.tensor.nnet import conv, corr3d
from aesara.tensor.nnet import corr3d
from aesara.tensor.type import dmatrix, dtensor3, dtensor4, dtensor5, tensor5, vector
from tests import unittest_tools as utt
from tests.tensor.nnet.test_abstract_conv import TestGroupedConv3dNoOptim
@pytest.mark.skipif(
aesara.config.cxx == "" or not conv.imported_scipy_signal,
aesara.config.cxx == "",
reason="SciPy and cxx needed",
)
class TestCorr3D(utt.InferShapeTester):
......
......@@ -2671,7 +2671,7 @@ class TestLocalSwitchSink:
@pytest.mark.skipif(
config.cxx == "" and not aes.math.imported_scipy_special,
config.cxx == "",
reason="erf need a c++ compiler or scipy",
)
class TestLocalErf:
......@@ -2763,7 +2763,7 @@ class TestLocalErf:
@pytest.mark.skipif(
config.cxx == "" and not aes.math.imported_scipy_special,
config.cxx == "",
reason="erf need a c++ compiler or scipy",
)
class TestLocalErfc:
......
......@@ -6,6 +6,9 @@ scipy = pytest.importorskip("scipy")
from functools import partial
import scipy.special
import scipy.stats
from aesara import tensor as aet
from aesara.compile.mode import get_default_mode
from aesara.configdefaults import config
......@@ -31,16 +34,7 @@ from tests.tensor.utils import (
)
imported_scipy_special = False
mode_no_scipy = get_default_mode()
try:
import scipy.special
import scipy.stats
imported_scipy_special = True
except ImportError:
if config.mode == "FAST_COMPILE":
mode_no_scipy = "FAST_RUN"
def scipy_special_gammau(k, x):
......@@ -51,60 +45,30 @@ def scipy_special_gammal(k, x):
return scipy.special.gammainc(k, x) * scipy.special.gamma(k)
# We can't test it if scipy is not installed!
# Precomputing the result is brittle(it have been broken!)
# As if we do any modification to random number here,
# The input random number will change and the output!
if imported_scipy_special:
expected_erf = scipy.special.erf
expected_erfc = scipy.special.erfc
expected_erfinv = scipy.special.erfinv
expected_erfcinv = scipy.special.erfcinv
expected_gamma = scipy.special.gamma
expected_gammaln = scipy.special.gammaln
expected_psi = scipy.special.psi
expected_tri_gamma = partial(scipy.special.polygamma, 1)
expected_chi2sf = scipy.stats.chi2.sf
expected_gammainc = scipy.special.gammainc
expected_gammaincc = scipy.special.gammaincc
expected_gammau = scipy_special_gammau
expected_gammal = scipy_special_gammal
expected_j0 = scipy.special.j0
expected_j1 = scipy.special.j1
expected_jv = scipy.special.jv
expected_i0 = scipy.special.i0
expected_i1 = scipy.special.i1
expected_iv = scipy.special.iv
expected_erfcx = scipy.special.erfcx
expected_sigmoid = scipy.special.expit
skip_scipy = False
else:
expected_erf = []
expected_erfc = []
expected_erfcx = []
expected_erfinv = []
expected_erfcinv = []
expected_gamma = []
expected_gammaln = []
expected_psi = []
expected_tri_gamma = []
expected_chi2sf = []
expected_gammainc = []
expected_gammaincc = []
expected_gammau = []
expected_gammal = []
expected_j0 = []
expected_j1 = []
expected_jv = []
expected_i0 = []
expected_i1 = []
expected_iv = []
expected_sigmoid = (
upcast_int8_nfunc(
lambda inputs: check_floatX(inputs, np.log1p(np.exp(inputs)))
),
)
skip_scipy = "scipy is not present"
expected_erf = scipy.special.erf
expected_erfc = scipy.special.erfc
expected_erfinv = scipy.special.erfinv
expected_erfcinv = scipy.special.erfcinv
expected_gamma = scipy.special.gamma
expected_gammaln = scipy.special.gammaln
expected_psi = scipy.special.psi
expected_tri_gamma = partial(scipy.special.polygamma, 1)
expected_chi2sf = scipy.stats.chi2.sf
expected_gammainc = scipy.special.gammainc
expected_gammaincc = scipy.special.gammaincc
expected_gammau = scipy_special_gammau
expected_gammal = scipy_special_gammal
expected_j0 = scipy.special.j0
expected_j1 = scipy.special.j1
expected_jv = scipy.special.jv
expected_i0 = scipy.special.i0
expected_i1 = scipy.special.i1
expected_iv = scipy.special.iv
expected_erfcx = scipy.special.erfcx
expected_sigmoid = scipy.special.expit
TestErfBroadcast = makeBroadcastTester(
op=aet.erf,
......@@ -113,7 +77,6 @@ TestErfBroadcast = makeBroadcastTester(
grad=_grad_broadcast_unary_normal,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestErfInplaceBroadcast = makeBroadcastTester(
op=inplace.erf_inplace,
......@@ -122,7 +85,6 @@ TestErfInplaceBroadcast = makeBroadcastTester(
mode=mode_no_scipy,
eps=2e-10,
inplace=True,
skip=skip_scipy,
)
TestErfcBroadcast = makeBroadcastTester(
......@@ -132,7 +94,6 @@ TestErfcBroadcast = makeBroadcastTester(
grad=_grad_broadcast_unary_normal,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestErfcInplaceBroadcast = makeBroadcastTester(
op=inplace.erfc_inplace,
......@@ -141,7 +102,6 @@ TestErfcInplaceBroadcast = makeBroadcastTester(
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestErfcxBroadcast = makeBroadcastTester(
......@@ -151,7 +111,6 @@ TestErfcxBroadcast = makeBroadcastTester(
grad=_grad_broadcast_unary_normal_small_neg_range,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestErfcxInplaceBroadcast = makeBroadcastTester(
op=inplace.erfcx_inplace,
......@@ -160,7 +119,6 @@ TestErfcxInplaceBroadcast = makeBroadcastTester(
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestErfinvBroadcast = makeBroadcastTester(
......@@ -173,7 +131,6 @@ TestErfinvBroadcast = makeBroadcastTester(
grad=_grad_broadcast_unary_abs1_no_complex,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestErfcinvBroadcast = makeBroadcastTester(
......@@ -186,7 +143,6 @@ TestErfcinvBroadcast = makeBroadcastTester(
grad=_grad_broadcast_unary_0_2_no_complex,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
_good_broadcast_unary_gammaln = dict(
......@@ -209,7 +165,6 @@ TestGammaBroadcast = makeBroadcastTester(
grad=_grad_broadcast_unary_gammaln,
mode=mode_no_scipy,
eps=1e-5,
skip=skip_scipy,
)
TestGammaInplaceBroadcast = makeBroadcastTester(
op=inplace.gamma_inplace,
......@@ -218,7 +173,6 @@ TestGammaInplaceBroadcast = makeBroadcastTester(
mode=mode_no_scipy,
eps=1e-5,
inplace=True,
skip=skip_scipy,
)
TestGammalnBroadcast = makeBroadcastTester(
......@@ -228,7 +182,6 @@ TestGammalnBroadcast = makeBroadcastTester(
grad=_grad_broadcast_unary_gammaln,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestGammalnInplaceBroadcast = makeBroadcastTester(
op=inplace.gammaln_inplace,
......@@ -237,7 +190,6 @@ TestGammalnInplaceBroadcast = makeBroadcastTester(
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
_good_broadcast_unary_psi = dict(
......@@ -254,7 +206,6 @@ TestPsiBroadcast = makeBroadcastTester(
good=_good_broadcast_unary_psi,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestPsiInplaceBroadcast = makeBroadcastTester(
op=inplace.psi_inplace,
......@@ -263,7 +214,6 @@ TestPsiInplaceBroadcast = makeBroadcastTester(
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
_good_broadcast_unary_tri_gamma = _good_broadcast_unary_psi
......@@ -274,7 +224,6 @@ TestTriGammaBroadcast = makeBroadcastTester(
good=_good_broadcast_unary_psi,
eps=2e-8,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestTriGammaInplaceBroadcast = makeBroadcastTester(
op=inplace.tri_gamma_inplace,
......@@ -283,7 +232,6 @@ TestTriGammaInplaceBroadcast = makeBroadcastTester(
eps=2e-8,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestChi2SFBroadcast = makeBroadcastTester(
......@@ -292,7 +240,6 @@ TestChi2SFBroadcast = makeBroadcastTester(
good=_good_broadcast_unary_chi2sf,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
name="Chi2SF",
)
......@@ -303,7 +250,6 @@ TestChi2SFInplaceBroadcast = makeBroadcastTester(
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
name="Chi2SF",
)
......@@ -331,7 +277,6 @@ TestGammaIncBroadcast = makeBroadcastTester(
good=_good_broadcast_binary_gamma,
eps=2e-8,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestGammaIncInplaceBroadcast = makeBroadcastTester(
......@@ -341,7 +286,6 @@ TestGammaIncInplaceBroadcast = makeBroadcastTester(
eps=2e-8,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestGammaInccBroadcast = makeBroadcastTester(
......@@ -350,7 +294,6 @@ TestGammaInccBroadcast = makeBroadcastTester(
good=_good_broadcast_binary_gamma,
eps=2e-8,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestGammaInccInplaceBroadcast = makeBroadcastTester(
......@@ -360,7 +303,6 @@ TestGammaInccInplaceBroadcast = makeBroadcastTester(
eps=2e-8,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestGammaUBroadcast = makeBroadcastTester(
......@@ -369,7 +311,6 @@ TestGammaUBroadcast = makeBroadcastTester(
good=_good_broadcast_binary_gamma,
eps=2e-8,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestGammaUInplaceBroadcast = makeBroadcastTester(
......@@ -379,7 +320,6 @@ TestGammaUInplaceBroadcast = makeBroadcastTester(
eps=2e-8,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestGammaLBroadcast = makeBroadcastTester(
......@@ -388,7 +328,6 @@ TestGammaLBroadcast = makeBroadcastTester(
good=_good_broadcast_binary_gamma,
eps=2e-8,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestGammaLInplaceBroadcast = makeBroadcastTester(
......@@ -398,7 +337,6 @@ TestGammaLInplaceBroadcast = makeBroadcastTester(
eps=2e-8,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
_good_broadcast_unary_bessel = dict(
......@@ -438,7 +376,6 @@ TestJ0Broadcast = makeBroadcastTester(
grad=_grad_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestJ0InplaceBroadcast = makeBroadcastTester(
......@@ -448,7 +385,6 @@ TestJ0InplaceBroadcast = makeBroadcastTester(
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestJ1Broadcast = makeBroadcastTester(
......@@ -458,7 +394,6 @@ TestJ1Broadcast = makeBroadcastTester(
grad=_grad_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestJ1InplaceBroadcast = makeBroadcastTester(
......@@ -468,7 +403,6 @@ TestJ1InplaceBroadcast = makeBroadcastTester(
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestJvBroadcast = makeBroadcastTester(
......@@ -477,7 +411,6 @@ TestJvBroadcast = makeBroadcastTester(
good=_good_broadcast_binary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestJvInplaceBroadcast = makeBroadcastTester(
......@@ -487,7 +420,6 @@ TestJvInplaceBroadcast = makeBroadcastTester(
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
......@@ -510,7 +442,6 @@ TestI0Broadcast = makeBroadcastTester(
grad=_grad_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestI0InplaceBroadcast = makeBroadcastTester(
......@@ -520,7 +451,6 @@ TestI0InplaceBroadcast = makeBroadcastTester(
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestI1Broadcast = makeBroadcastTester(
......@@ -530,7 +460,6 @@ TestI1Broadcast = makeBroadcastTester(
grad=_grad_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestI1InplaceBroadcast = makeBroadcastTester(
......@@ -540,7 +469,6 @@ TestI1InplaceBroadcast = makeBroadcastTester(
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestIvBroadcast = makeBroadcastTester(
......@@ -549,7 +477,6 @@ TestIvBroadcast = makeBroadcastTester(
good=_good_broadcast_binary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestIvInplaceBroadcast = makeBroadcastTester(
......@@ -559,7 +486,6 @@ TestIvInplaceBroadcast = makeBroadcastTester(
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
......
......@@ -3,6 +3,7 @@ import itertools
import numpy as np
import numpy.linalg
import pytest
import scipy
import aesara
from aesara import function, grad
......@@ -39,7 +40,6 @@ def check_upper_triangular(pd, ch_f):
def test_cholesky():
pytest.importorskip("scipy")
rng = np.random.default_rng(utt.fetch_seed())
r = rng.standard_normal((5, 5)).astype(config.floatX)
pd = np.dot(r, r.T)
......@@ -62,7 +62,6 @@ def test_cholesky():
def test_cholesky_indef():
scipy = pytest.importorskip("scipy")
x = matrix()
mat = np.array([[1, 0.2], [0.2, -2]]).astype(config.floatX)
cholesky = Cholesky(lower=True, on_error="raise")
......@@ -75,8 +74,6 @@ def test_cholesky_indef():
def test_cholesky_grad():
pytest.importorskip("scipy")
rng = np.random.default_rng(utt.fetch_seed())
r = rng.standard_normal((5, 5)).astype(config.floatX)
......@@ -106,7 +103,6 @@ def test_cholesky_grad():
def test_cholesky_grad_indef():
scipy = pytest.importorskip("scipy")
x = matrix()
mat = np.array([[1, 0.2], [0.2, -2]]).astype(config.floatX)
cholesky = Cholesky(lower=True, on_error="raise")
......@@ -120,8 +116,6 @@ def test_cholesky_grad_indef():
@pytest.mark.slow
def test_cholesky_and_cholesky_grad_shape():
pytest.importorskip("scipy")
rng = np.random.default_rng(utt.fetch_seed())
x = matrix()
for l in (cholesky(x), Cholesky(lower=True)(x), Cholesky(lower=False)(x)):
......@@ -142,8 +136,6 @@ def test_cholesky_and_cholesky_grad_shape():
def test_eigvalsh():
scipy = pytest.importorskip("scipy")
A = dmatrix("a")
B = dmatrix("b")
f = function([A, B], eigvalsh(A, B))
......@@ -167,8 +159,6 @@ def test_eigvalsh():
def test_eigvalsh_grad():
pytest.importorskip("scipy")
rng = np.random.default_rng(utt.fetch_seed())
a = rng.standard_normal((5, 5))
a = a + a.T
......@@ -185,7 +175,6 @@ class TestSolve(utt.InferShapeTester):
super().setup_method()
def test_infer_shape(self):
pytest.importorskip("scipy")
rng = np.random.default_rng(utt.fetch_seed())
A = matrix()
b = matrix()
......@@ -216,7 +205,6 @@ class TestSolve(utt.InferShapeTester):
)
def test_solve_correctness(self):
scipy = pytest.importorskip("scipy")
rng = np.random.default_rng(utt.fetch_seed())
A = matrix()
b = matrix()
......@@ -258,8 +246,6 @@ class TestSolve(utt.InferShapeTester):
)
def test_solve_dtype(self):
pytest.importorskip("scipy")
dtypes = [
"uint8",
"uint16",
......@@ -305,8 +291,6 @@ class TestSolve(utt.InferShapeTester):
solve_op = Solve(A_structure=A_structure, lower=lower)
utt.verify_grad(solve_op, [A_val, b_val], 3, rng, eps=eps)
def test_solve_grad(self):
pytest.importorskip("scipy")
rng = np.random.default_rng(utt.fetch_seed())
structures = ["general", "lower_triangular", "upper_triangular"]
for A_structure in structures:
......@@ -336,7 +320,6 @@ def test_expm():
def test_expm_grad_1():
# with symmetric matrix (real eigenvectors)
pytest.importorskip("scipy")
rng = np.random.default_rng(utt.fetch_seed())
# Always test in float64 for better numerical stability.
A = rng.standard_normal((5, 5))
......@@ -347,7 +330,6 @@ def test_expm_grad_1():
def test_expm_grad_2():
# with non-symmetric matrix with real eigenspecta
pytest.importorskip("scipy")
rng = np.random.default_rng(utt.fetch_seed())
# Always test in float64 for better numerical stability.
A = rng.standard_normal((5, 5))
......@@ -360,7 +342,6 @@ def test_expm_grad_2():
def test_expm_grad_3():
# with non-symmetric matrix (complex eigenvectors)
pytest.importorskip("scipy")
rng = np.random.default_rng(utt.fetch_seed())
# Always test in float64 for better numerical stability.
A = rng.standard_normal((5, 5))
......@@ -377,8 +358,6 @@ class TestKron(utt.InferShapeTester):
super().setup_method()
def test_perform(self):
scipy = pytest.importorskip("scipy")
for shp0 in [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]:
x = tensor(dtype="floatX", broadcastable=(False,) * len(shp0))
a = np.asarray(self.rng.random(shp0)).astype(config.floatX)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论