提交 18e6c04d authored 作者: Brendan Murphy's avatar Brendan Murphy 提交者: Ricardo Vieira

Replace use of `np.MAXDIMS`

`np.MAXDIMS` was removed from the public API and no replacement is given in the migration docs. In numpy <= 1.26, the value of `np.MAXDIMS` was 32. This was often used as a flag to mean `axis=None`. In numpy >= 2.0, the maximum number of dims of an array has been increased to 64; simultaneously, a constant `NPY_RAVEL_AXIS` was added to the C-API to indicate that `axis=None`. In most cases, the use of `np.MAXDIMS` to check for `axis=None` can be replaced by the new constant `NPY_RAVEL_AXIS`. To make this constant accessible when using numpy <= 1.26, I added a function to insert `npy_2_compat.h` into the support code for the affected ops.
上级 46a235ab
......@@ -46,10 +46,21 @@ else:
ndarray_c_version = np.core._multiarray_umath._get_ndarray_c_version() # type: ignore[attr-defined]
# used in tests: the type of error thrown if a value is too large for the specified
# numpy data type is different in numpy 2.x
UintOverflowError = OverflowError if using_numpy_2 else TypeError
# to patch up some of the C code, we need to use these special values...
if using_numpy_2:
UintOverflowError = OverflowError
numpy_axis_is_none_flag = np.iinfo(np.int32).min # the value of "NPY_RAVEL_AXIS"
else:
UintOverflowError = TypeError
# 32 is the value used to mark axis = None in Numpy C-API prior to version 2.0
numpy_axis_is_none_flag = 32
# max number of dims is 64 in numpy 2.x; 32 in older versions
numpy_maxdims = 64 if using_numpy_2 else 32
def npy_2_compat_header() -> str:
......
......@@ -2,7 +2,6 @@ import warnings
from collections.abc import Collection, Iterable
import numpy as np
from numpy.exceptions import AxisError
import pytensor
import pytensor.scalar.basic as ps
......@@ -19,10 +18,11 @@ from pytensor.link.c.params_type import ParamsType
from pytensor.link.c.type import EnumList, Generic
from pytensor.npy_2_compat import (
normalize_axis_index,
normalize_axis_tuple,
npy_2_compat_header,
numpy_axis_is_none_flag,
)
from pytensor.raise_op import Assert
from pytensor.scalar import int32 as int_t
from pytensor.scalar import int64 as int_t
from pytensor.scalar import upcast
from pytensor.tensor import TensorLike, as_tensor_variable
from pytensor.tensor import basic as ptb
......@@ -47,6 +47,7 @@ from pytensor.tensor.math import sum as pt_sum
from pytensor.tensor.shape import Shape_i
from pytensor.tensor.subtensor import advanced_inc_subtensor1, set_subtensor
from pytensor.tensor.type import TensorType, dvector, int_dtypes, integer_dtypes, vector
from pytensor.tensor.utils import normalize_reduce_axis
from pytensor.tensor.variable import TensorVariable
from pytensor.utils import LOCAL_BITWIDTH, PYTHON_INT_BITWIDTH
......@@ -302,7 +303,11 @@ class CumOp(COp):
self.axis = axis
self.mode = mode
c_axis = property(lambda self: np.MAXDIMS if self.axis is None else self.axis)
@property
def c_axis(self) -> int:
if self.axis is None:
return numpy_axis_is_none_flag
return self.axis
def make_node(self, x):
x = ptb.as_tensor_variable(x)
......@@ -359,24 +364,37 @@ class CumOp(COp):
return shapes
def c_support_code_apply(self, node: Apply, name: str) -> str:
"""Needed to define NPY_RAVEL_AXIS"""
return npy_2_compat_header()
def c_code(self, node, name, inames, onames, sub):
(x,) = inames
(z,) = onames
fail = sub["fail"]
params = sub["params"]
code = f"""
int axis = {params}->c_axis;
if self.axis is None:
axis_code = "int axis = NPY_RAVEL_AXIS;\n"
else:
axis_code = f"int axis = {params}->c_axis;\n"
code = (
axis_code
+ f"""
#undef NPY_UF_DBG_TRACING
#define NPY_UF_DBG_TRACING 1
if (axis == 0 && PyArray_NDIM({x}) == 1)
axis = NPY_MAXDIMS;
axis = NPY_RAVEL_AXIS;
npy_intp shape[1] = {{ PyArray_SIZE({x}) }};
if(axis == NPY_MAXDIMS && !({z} && PyArray_DIMS({z})[0] == shape[0]))
if(axis == NPY_RAVEL_AXIS && !({z} && PyArray_DIMS({z})[0] == shape[0]))
{{
Py_XDECREF({z});
{z} = (PyArrayObject*) PyArray_SimpleNew(1, shape, PyArray_TYPE((PyArrayObject*) py_{x}));
{z} = (PyArrayObject*) PyArray_SimpleNew(1, shape, PyArray_TYPE({x}));
}}
else if(axis != NPY_MAXDIMS && !({z} && PyArray_CompareLists(PyArray_DIMS({z}), PyArray_DIMS({x}), PyArray_NDIM({x}))))
else if(axis != NPY_RAVEL_AXIS && !({z} && PyArray_CompareLists(PyArray_DIMS({z}), PyArray_DIMS({x}), PyArray_NDIM({x}))))
{{
Py_XDECREF({z});
{z} = (PyArrayObject*) PyArray_SimpleNew(PyArray_NDIM({x}), PyArray_DIMS({x}), PyArray_TYPE({x}));
......@@ -403,11 +421,12 @@ class CumOp(COp):
Py_XDECREF(t);
}}
"""
)
return code
def c_code_cache_version(self):
return (8,)
return (9,)
def __str__(self):
return f"{self.__class__.__name__}{{{self.axis}, {self.mode}}}"
......@@ -598,11 +617,7 @@ def squeeze(x, axis=None):
elif not isinstance(axis, Collection):
axis = (axis,)
# scalar inputs are treated as 1D regarding axis in this `Op`
try:
axis = normalize_axis_tuple(axis, ndim=max(1, _x.ndim))
except AxisError:
raise AxisError(axis, ndim=_x.ndim)
axis = normalize_reduce_axis(axis, ndim=_x.ndim)
if not axis:
# Nothing to do
......
......@@ -13,7 +13,11 @@ from pytensor.graph.op import Op
from pytensor.graph.replace import _vectorize_node
from pytensor.link.c.op import COp
from pytensor.link.c.params_type import ParamsType
from pytensor.npy_2_compat import normalize_axis_tuple
from pytensor.npy_2_compat import (
normalize_axis_tuple,
npy_2_compat_header,
numpy_axis_is_none_flag,
)
from pytensor.printing import pprint
from pytensor.raise_op import Assert
from pytensor.scalar.basic import BinaryScalarOp
......@@ -160,7 +164,7 @@ class Argmax(COp):
c_axis = np.int64(self.axis[0])
else:
# The value here doesn't matter, it won't be used
c_axis = np.int64(-1)
c_axis = numpy_axis_is_none_flag
return self.params_type.get_params(c_axis=c_axis)
def make_node(self, x):
......@@ -203,13 +207,17 @@ class Argmax(COp):
max_idx[0] = np.asarray(np.argmax(reshaped_x, axis=-1), dtype="int64")
def c_support_code_apply(self, node: Apply, name: str) -> str:
"""Needed to define NPY_RAVEL_AXIS"""
return npy_2_compat_header()
def c_code(self, node, name, inp, out, sub):
(x,) = inp
(argmax,) = out
fail = sub["fail"]
params = sub["params"]
if self.axis is None:
axis_code = "axis = NPY_MAXDIMS;"
axis_code = "axis = NPY_RAVEL_AXIS;"
else:
if len(self.axis) != 1:
raise NotImplementedError()
......
......@@ -6,6 +6,7 @@ import scipy
from pytensor.graph.basic import Apply
from pytensor.graph.replace import _vectorize_node
from pytensor.link.c.op import COp
from pytensor.npy_2_compat import npy_2_compat_header
from pytensor.tensor.basic import as_tensor_variable
from pytensor.tensor.elemwise import get_normalized_batch_axes
from pytensor.tensor.math import gamma, gammaln, log, neg, sum
......@@ -60,12 +61,16 @@ class SoftmaxGrad(COp):
return [shape[1]]
def c_code_cache_version(self):
return (4,)
return (5,)
def c_support_code_apply(self, node: Apply, name: str) -> str:
# return super().c_support_code_apply(node, name)
return npy_2_compat_header()
def c_code(self, node, name, inp, out, sub):
dy, sm = inp
(dx,) = out
axis = self.axis if self.axis is not None else np.MAXDIMS
axis = self.axis if self.axis is not None else "NPY_RAVEL_AXIS"
fail = sub["fail"]
return dedent(
......@@ -79,7 +84,7 @@ class SoftmaxGrad(COp):
int sm_ndim = PyArray_NDIM({sm});
int axis = {axis};
int iterate_axis = !(axis == NPY_MAXDIMS || sm_ndim == 1);
int iterate_axis = !(axis == NPY_RAVEL_AXIS || sm_ndim == 1);
// Validate inputs
if ((PyArray_TYPE({dy}) != NPY_DOUBLE) &&
......@@ -95,13 +100,15 @@ class SoftmaxGrad(COp):
{fail};
}}
if (axis < 0) axis = sm_ndim + axis;
if ((axis < 0) || (iterate_axis && (axis > sm_ndim)))
if (iterate_axis)
{{
PyErr_SetString(PyExc_ValueError, "invalid axis in SoftmaxGrad");
{fail};
if (axis < 0) axis = sm_ndim + axis;
if ((axis < 0) || (iterate_axis && (axis > sm_ndim)))
{{
PyErr_SetString(PyExc_ValueError, "invalid axis in SoftmaxGrad");
{fail};
}}
}}
if (({dx} == NULL)
|| !(PyArray_CompareLists(PyArray_DIMS({dx}), PyArray_DIMS({sm}), sm_ndim)))
{{
......@@ -289,10 +296,14 @@ class Softmax(COp):
def c_headers(self, **kwargs):
return ["<iostream>", "<cmath>"]
def c_support_code_apply(self, node: Apply, name: str) -> str:
"""Needed to define NPY_RAVEL_AXIS"""
return npy_2_compat_header()
def c_code(self, node, name, inp, out, sub):
(x,) = inp
(sm,) = out
axis = self.axis if self.axis is not None else np.MAXDIMS
axis = self.axis if self.axis is not None else "NPY_RAVEL_AXIS"
fail = sub["fail"]
# dtype = node.inputs[0].type.dtype_specs()[1]
# TODO: put this into a templated function, in the support code
......@@ -309,7 +320,7 @@ class Softmax(COp):
int x_ndim = PyArray_NDIM({x});
int axis = {axis};
int iterate_axis = !(axis == NPY_MAXDIMS || x_ndim == 1);
int iterate_axis = !(axis == NPY_RAVEL_AXIS || x_ndim == 1);
// Validate inputs
if ((PyArray_TYPE({x}) != NPY_DOUBLE) &&
......@@ -319,11 +330,14 @@ class Softmax(COp):
{fail}
}}
if (axis < 0) axis = x_ndim + axis;
if ((axis < 0) || (iterate_axis && (axis > x_ndim)))
if (iterate_axis)
{{
PyErr_SetString(PyExc_ValueError, "invalid axis in Softmax");
{fail}
if (axis < 0) axis = x_ndim + axis;
if ((axis < 0) || (iterate_axis && (axis > x_ndim)))
{{
PyErr_SetString(PyExc_ValueError, "invalid axis in Softmax");
{fail}
}}
}}
// Allocate Output Array
......@@ -481,7 +495,7 @@ class Softmax(COp):
@staticmethod
def c_code_cache_version():
return (4,)
return (5,)
def softmax(c, axis=None):
......@@ -541,10 +555,14 @@ class LogSoftmax(COp):
def c_headers(self, **kwargs):
return ["<cmath>"]
def c_support_code_apply(self, node: Apply, name: str) -> str:
"""Needed to define NPY_RAVEL_AXIS"""
return npy_2_compat_header()
def c_code(self, node, name, inp, out, sub):
(x,) = inp
(sm,) = out
axis = self.axis if self.axis is not None else np.MAXDIMS
axis = self.axis if self.axis is not None else "NPY_RAVEL_AXIS"
fail = sub["fail"]
return dedent(
......@@ -558,7 +576,7 @@ class LogSoftmax(COp):
int x_ndim = PyArray_NDIM({x});
int axis = {axis};
int iterate_axis = !(axis == NPY_MAXDIMS || x_ndim == 1);
int iterate_axis = !(axis == NPY_RAVEL_AXIS || x_ndim == 1);
// Validate inputs
if ((PyArray_TYPE({x}) != NPY_DOUBLE) &&
......@@ -568,13 +586,15 @@ class LogSoftmax(COp):
{fail}
}}
if (axis < 0) axis = x_ndim + axis;
if ((axis < 0) || (iterate_axis && (axis > x_ndim)))
if (iterate_axis)
{{
PyErr_SetString(PyExc_ValueError, "invalid axis in LogSoftmax");
{fail}
if (axis < 0) axis = x_ndim + axis;
if ((axis < 0) || (iterate_axis && (axis > x_ndim)))
{{
PyErr_SetString(PyExc_ValueError, "invalid axis in LogSoftmax");
{fail}
}}
}}
// Allocate Output Array
if (({sm}) == NULL || !(PyArray_CompareLists(PyArray_DIMS({sm}), PyArray_DIMS({x}), x_ndim)))
{{
......@@ -730,7 +750,7 @@ class LogSoftmax(COp):
@staticmethod
def c_code_cache_version():
return (1,)
return (2,)
def log_softmax(c, axis=None):
......
......@@ -18,7 +18,7 @@ from pytensor.graph.type import Type
from pytensor.graph.utils import MethodNotDefined
from pytensor.link.c.op import COp
from pytensor.link.c.params_type import ParamsType
from pytensor.npy_2_compat import numpy_version, using_numpy_2
from pytensor.npy_2_compat import npy_2_compat_header, numpy_version, using_numpy_2
from pytensor.printing import Printer, pprint, set_precedence
from pytensor.scalar.basic import ScalarConstant, ScalarVariable
from pytensor.tensor import (
......@@ -2149,7 +2149,7 @@ class AdvancedSubtensor1(COp):
def c_support_code(self, **kwargs):
# In some versions of numpy, NPY_MIN_INTP is defined as MIN_LONG,
# which is not defined. It should be NPY_MIN_LONG instead in that case.
return dedent(
return npy_2_compat_header() + dedent(
"""\
#ifndef MIN_LONG
#define MIN_LONG NPY_MIN_LONG
......@@ -2174,7 +2174,7 @@ class AdvancedSubtensor1(COp):
if (!PyArray_CanCastSafely(i_type, NPY_INTP) &&
PyArray_SIZE({i_name}) > 0) {{
npy_int64 min_val, max_val;
PyObject* py_min_val = PyArray_Min({i_name}, NPY_MAXDIMS,
PyObject* py_min_val = PyArray_Min({i_name}, NPY_RAVEL_AXIS,
NULL);
if (py_min_val == NULL) {{
{fail};
......@@ -2184,7 +2184,7 @@ class AdvancedSubtensor1(COp):
if (min_val == -1 && PyErr_Occurred()) {{
{fail};
}}
PyObject* py_max_val = PyArray_Max({i_name}, NPY_MAXDIMS,
PyObject* py_max_val = PyArray_Max({i_name}, NPY_RAVEL_AXIS,
NULL);
if (py_max_val == NULL) {{
{fail};
......@@ -2243,7 +2243,7 @@ class AdvancedSubtensor1(COp):
"""
def c_code_cache_version(self):
return (0, 1, 2)
return (0, 1, 2, 3)
advanced_subtensor1 = AdvancedSubtensor1()
......
......@@ -18,6 +18,7 @@ from pytensor.graph.fg import FunctionGraph
from pytensor.graph.replace import vectorize_node
from pytensor.link.basic import PerformLinker
from pytensor.link.c.basic import CLinker, OpWiseCLinker
from pytensor.npy_2_compat import numpy_maxdims
from pytensor.tensor import as_tensor_variable
from pytensor.tensor.basic import get_scalar_constant_value, second
from pytensor.tensor.elemwise import CAReduce, DimShuffle, Elemwise
......@@ -121,7 +122,8 @@ class TestDimShuffle(unittest_tools.InferShapeTester):
def test_too_big_rank(self):
x = self.type(self.dtype, shape=())()
y = x.dimshuffle(("x",) * (np.MAXDIMS + 1))
y = x.dimshuffle(("x",) * (numpy_maxdims + 1))
with pytest.raises(ValueError):
y.eval({x: 0})
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论