提交 856b81b9 authored 作者: ricardoV94's avatar ricardoV94 提交者: Ricardo Vieira

Tweak test errors and requirements

上级 b887bd78
......@@ -3,6 +3,7 @@ import pickle
import numpy as np
import pytest
from numba import TypingError
import pytensor.tensor as pt
from pytensor.compile import shared
......@@ -1001,10 +1002,14 @@ class TestPicklefunction:
raise
assert f.trust_input is g.trust_input
f(np.asarray(2.0))
with pytest.raises((ValueError, AttributeError, InvalidValueError)):
with pytest.raises(
(ValueError, AttributeError, InvalidValueError, TypingError)
):
f(2.0)
g(np.asarray(2.0))
with pytest.raises((ValueError, AttributeError, InvalidValueError)):
with pytest.raises(
(ValueError, AttributeError, InvalidValueError, TypingError)
):
g(2.0)
def test_output_keys(self):
......
......@@ -4,6 +4,7 @@ import pytest
import pytensor
import pytensor.tensor as pt
from pytensor import function, scan, shared
from pytensor.compile import Function
from pytensor.compile.builders import OpFromGraph
from pytensor.compile.io import In
from pytensor.compile.mode import get_default_mode, get_mode
......@@ -13,6 +14,7 @@ from pytensor.graph.basic import Constant, equal_computations
from pytensor.graph.fg import FunctionGraph
from pytensor.graph.replace import clone_replace
from pytensor.graph.traversal import ancestors
from pytensor.link.basic import JITLinker
from pytensor.scan.op import Scan
from pytensor.scan.rewriting import ScanInplaceOptimizer, ScanMerge
from pytensor.scan.utils import until
......@@ -860,7 +862,10 @@ class TestScanMerge:
@staticmethod
def count_scans(fn):
nodes = fn.maker.fgraph.apply_nodes
if isinstance(fn, Function):
nodes = fn.maker.fgraph.apply_nodes
else:
nodes = fn.apply_nodes
scans = [node for node in nodes if isinstance(node.op, Scan)]
return len(scans)
......@@ -1068,7 +1073,7 @@ class TestScanMerge:
[scan_node] = [
node for node in f.maker.fgraph.apply_nodes if isinstance(node.op, Scan)
]
inner_f = scan_node.op.fn
inner_f = scan_node.op.fgraph
assert self.count_scans(inner_f) == 1
......@@ -1529,13 +1534,15 @@ class TestSaveMem:
on_unused_input="ignore",
allow_input_downcast=True,
)(v_u, [0, 0], 0, [0, 0], 0)
# ScanSaveMem keeps +1 entries to handle taps with preallocated outputs
# ScanSaveMem keeps +1 entries to handle taps with preallocated outputs, unless we are using a JITLinker
maybe_one = 0 if isinstance(f.maker.linker, JITLinker) else 1
assert [int(i) for i in buffer_lengths] == [
7, # entry -7 of a map variable is kept, we need at least that many
3, # entries [-3, -2] of a map variable are kept, we need at least 3
6, # last six entries of a map variable are kept
2 + 1, # last entry of a double tap variable is kept
1 + 1, # last entry of a single tap variable is kept
2 + maybe_one, # last entry of a double tap variable is kept
1 + maybe_one, # last entry of a single tap variable is kept
]
def test_savemem_does_not_duplicate_number_of_scan_nodes(self):
......
......@@ -4,6 +4,7 @@ import pytest
import pytensor.tensor as pt
from pytensor import config, function, grad, shared
from pytensor.compile.mode import FAST_RUN
from pytensor.link.basic import JITLinker
from pytensor.scan.views import filter as pt_filter
from pytensor.scan.views import foldl, foldr
from pytensor.scan.views import map as pt_map
......@@ -79,7 +80,8 @@ def test_reduce_memory_consumption():
# 1) provided to the inner function. Now, because of the memory-reuse
# feature in Scan it can be 2 because SaveMem needs to keep a
# larger buffer to avoid aliasing between the inputs and the outputs.
if config.scan__allow_output_prealloc:
# JIT linkers don't do this optimization so it's still 1
if not isinstance(mode.linker, JITLinker) and config.scan__allow_output_prealloc:
assert f1().shape[0] == 2
else:
assert f1().shape[0] == 1
......@@ -119,7 +121,8 @@ def test_foldl_memory_consumption(return_updates):
# 1) provided to the inner function. Now, because of the memory-reuse
# feature in Scan it can be 2 because SaveMem needs to keep a
# larger buffer to avoid aliasing between the inputs and the outputs.
if config.scan__allow_output_prealloc:
# JIT linkers don't do this optimization so it's still 1
if not isinstance(mode.linker, JITLinker) and config.scan__allow_output_prealloc:
assert f1().shape[0] == 2
else:
assert f1().shape[0] == 1
......@@ -159,7 +162,8 @@ def test_foldr_memory_consumption(return_updates):
# 1) provided to the inner function. Now, because of the memory-reuse
# feature in Scan it can be 2 because SaveMem needs to keep a
# larger buffer to avoid aliasing between the inputs and the outputs.
if config.scan__allow_output_prealloc:
# JIT linkers don't do this optimization so it's still 1
if not isinstance(mode.linker, JITLinker) and config.scan__allow_output_prealloc:
assert f1().shape[0] == 2
else:
assert f1().shape[0] == 1
......
......@@ -248,7 +248,11 @@ def test_decomposition_reused_preserves_check_finite(assume_a, counter):
assert fn_opt(
A_valid, b1_valid, b2_valid * np.nan
) # Should not raise (also fine on most LAPACK implementations?)
with pytest.raises(ValueError, match="array must not contain infs or NaNs"):
err_msg = (
"(array must not contain infs or NaNs"
r"|Non-numeric values \(nan or inf\))"
)
with pytest.raises((ValueError, np.linalg.LinAlgError), match=err_msg):
assert fn_opt(A_valid, b1_valid * np.nan, b2_valid)
with pytest.raises(ValueError, match="array must not contain infs or NaNs"):
with pytest.raises((ValueError, np.linalg.LinAlgError), match=err_msg):
assert fn_opt(A_valid * np.nan, b1_valid, b2_valid)
......@@ -306,7 +306,10 @@ class TestLocalCanonicalizeAlloc:
# Error raised by Alloc Op
with pytest.raises(
ValueError,
match=r"could not broadcast input array from shape \(3,7\) into shape \(6,7\)",
match=(
r"(could not broadcast input array from shape \(3,7\) into shape \(6,7\)"
r"|cannot assign slice of shape \(3, 7\) from input of shape \(6, 7\))"
),
):
f()
......
......@@ -43,6 +43,7 @@ from pytensor.tensor.blas import Dot22, Gemv
from pytensor.tensor.blas_c import CGemv
from pytensor.tensor.blockwise import Blockwise
from pytensor.tensor.elemwise import DimShuffle, Elemwise
from pytensor.tensor.math import Dot
from pytensor.tensor.math import sum as pt_sum
from pytensor.tensor.rewriting.subtensor_lift import (
local_subtensor_make_vector,
......@@ -241,7 +242,7 @@ def test_local_subtensor_of_dot():
f = function([m1, m2], pt.dot(m1, m2)[1:2], mode=mode)
topo = f.maker.fgraph.toposort()
assert test_equality(f(d1, d2), np.dot(d1, d2)[1:2])
assert isinstance(topo[-1].op, Dot22)
assert isinstance(topo[-1].op, Dot | Dot22)
m1 = tensor3()
m2 = tensor3()
......
......@@ -12,7 +12,7 @@ import pytensor.tensor.math as ptm
from pytensor import compile, config, function, shared
from pytensor.compile import SharedVariable
from pytensor.compile.io import In, Out
from pytensor.compile.mode import Mode, get_default_mode
from pytensor.compile.mode import Mode, get_default_mode, get_mode
from pytensor.compile.ops import DeepCopyOp
from pytensor.gradient import grad, hessian
from pytensor.graph.basic import Apply, equal_computations
......@@ -731,7 +731,7 @@ def check_alloc_runtime_broadcast(mode):
class TestAlloc:
dtype = config.floatX
mode = mode_opt
mode = get_mode(mode_opt)
shared = staticmethod(pytensor.shared)
allocs = [Alloc()] * 3
......@@ -745,41 +745,47 @@ class TestAlloc:
def setup_method(self):
self.rng = np.random.default_rng(seed=utt.fetch_seed())
def test_alloc_constant_folding(self):
@pytest.mark.parametrize(
"subtensor_fn, expected_grad_n_alloc",
[
# IncSubtensor1
(lambda x: x[:60], 1),
# AdvancedIncSubtensor1
(lambda x: x[np.arange(60)], 1),
# AdvancedIncSubtensor
(lambda x: x[np.arange(50), np.arange(50)], 1),
],
)
def test_alloc_constant_folding(self, subtensor_fn, expected_grad_n_alloc):
test_params = np.asarray(self.rng.standard_normal(50 * 60), self.dtype)
some_vector = vector("some_vector", dtype=self.dtype)
some_matrix = some_vector.reshape((60, 50))
variables = self.shared(np.ones((50,), dtype=self.dtype))
idx = constant(np.arange(50))
for alloc_, (subtensor, n_alloc) in zip(
self.allocs,
[
# IncSubtensor1
(some_matrix[:60], 2),
# AdvancedIncSubtensor1
(some_matrix[arange(60)], 2),
# AdvancedIncSubtensor
(some_matrix[idx, idx], 1),
],
strict=True,
):
derp = pt_sum(dense_dot(subtensor, variables))
fobj = pytensor.function([some_vector], derp, mode=self.mode)
grad_derp = pytensor.grad(derp, some_vector)
fgrad = pytensor.function([some_vector], grad_derp, mode=self.mode)
subtensor = subtensor_fn(some_matrix)
topo_obj = fobj.maker.fgraph.toposort()
assert sum(isinstance(node.op, type(alloc_)) for node in topo_obj) == 0
derp = pt_sum(dense_dot(subtensor, variables))
fobj = pytensor.function(
[some_vector], derp, mode=get_mode(self.mode).excluding("BlasOpt")
)
assert (
sum(isinstance(node.op, Alloc) for node in fobj.maker.fgraph.apply_nodes)
== 0
)
# TODO: Assert something about the value if we bothered to call it?
fobj(test_params)
topo_grad = fgrad.maker.fgraph.toposort()
assert (
sum(isinstance(node.op, type(alloc_)) for node in topo_grad) == n_alloc
), (alloc_, subtensor, n_alloc, topo_grad)
fobj(test_params)
fgrad(test_params)
grad_derp = pytensor.grad(derp, some_vector)
fgrad = pytensor.function(
[some_vector], grad_derp, mode=self.mode.excluding("BlasOpt")
)
assert (
sum(isinstance(node.op, Alloc) for node in fgrad.maker.fgraph.apply_nodes)
== expected_grad_n_alloc
)
# TODO: Assert something about the value if we bothered to call it?
fgrad(test_params)
def test_alloc_output(self):
val = constant(self.rng.standard_normal((1, 1)), dtype=self.dtype)
......
......@@ -137,6 +137,12 @@ def check_blockwise_runtime_broadcasting(mode):
fn(*valid_test_values), np.full((batch_dim, 3, 3), 5.0)
)
possible_err_messages = [
"Runtime broadcasting not allowed",
"has an incompatible shape in axis",
"Incompatible vectorized shapes",
]
err_msg = f"({'|'.join(possible_err_messages)})"
for invalid_test_values in [
(
np.ones((1, 3, 5)).astype(config.floatX),
......@@ -147,7 +153,7 @@ def check_blockwise_runtime_broadcasting(mode):
np.ones((1, 5, 3)).astype(config.floatX),
),
]:
with pytest.raises(ValueError, match="Runtime broadcasting not allowed"):
with pytest.raises(ValueError, match=err_msg):
fn(*invalid_test_values)
invalid_test_values = (
......
......@@ -185,7 +185,7 @@ class TestDimShuffle(unittest_tools.InferShapeTester):
x = self.type(self.dtype, shape=())()
y = x.dimshuffle(("x",) * (numpy_maxdims + 1))
with pytest.raises(ValueError):
with pytest.raises((ValueError, SystemError)):
y.eval({x: 0})
def test_c_views(self):
......
......@@ -1437,7 +1437,8 @@ class TestMinMax:
) # It's not failing in all the CIs but we have XPASS(strict) enabled
@pytest.mark.xfail(
condition=config.mode != "FAST_COMPILE", reason="Fails due to #770"
condition=getattr(get_default_mode().linker, "c_thunks", False),
reason="Fails due to #770",
)
def test_uint64_special_value(self):
"""Example from issue #770"""
......
......@@ -335,6 +335,7 @@ def test_dot_errors():
y_test = DataArray(np.ones((4, 5)), dims=("b", "c"))
# Doesn't fail until the rewrite
with pytest.raises(
ValueError, match="Input operand 1 has a mismatch in its core dimension 0"
ValueError,
match=r"(Input operand 1 has a mismatch in its core dimension 0|incompatible array sizes for np.dot)",
):
fn(x_test, y_test)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论