提交 ca089790 authored 作者: Virgile Andreani's avatar Virgile Andreani 提交者: Ricardo Vieira

Changes for numpy 2.0 deprecations

- Replace np.cast with np.asarray: in numpy 2.0, `np.cast[new_dtype](arr)` is deprecated. The literal replacement is `np.asarray(arr, dtype=new_dtype)`. - Replace np.sctype2char and np.obj2sctype. Added try/except to handle change in behavior of `np.dtype` - Replace np.find_common_type with np.result_type Further changes to `TensorType`: TensorType.dtype must be a string, so the code has been changed from `self.dtype = np.dtype(dtype).type`, where the right-hand side is of type `np.generic`, to `self.dtype = str(np.dtype(dtype))`, where the right-hand side is a string that satisfies: `self.dtype == str(np.dtype(self.dtype))` This doesn't change the behavior of `np.array(..., dtype=self.dtype)` etc.
上级 e036caf9
...@@ -2966,7 +2966,7 @@ class Log2(UnaryScalarOp): ...@@ -2966,7 +2966,7 @@ class Log2(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
return (gz / (x * np.asarray(math.log(2.0)).astype(x.dtype)),) return (gz / (x * np.array(math.log(2.0), dtype=x.dtype)),)
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
...@@ -3009,7 +3009,7 @@ class Log10(UnaryScalarOp): ...@@ -3009,7 +3009,7 @@ class Log10(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
return (gz / (x * np.asarray(math.log(10.0)).astype(x.dtype)),) return (gz / (x * np.array(math.log(10.0), dtype=x.dtype)),)
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
...@@ -3124,7 +3124,7 @@ class Exp2(UnaryScalarOp): ...@@ -3124,7 +3124,7 @@ class Exp2(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
return (gz * exp2(x) * log(np.cast[x.type](2)),) return (gz * exp2(x) * log(np.array(2, dtype=x.type)),)
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
...@@ -3263,7 +3263,7 @@ class Deg2Rad(UnaryScalarOp): ...@@ -3263,7 +3263,7 @@ class Deg2Rad(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
return (gz * np.asarray(np.pi / 180, gz.type),) return (gz * np.array(np.pi / 180, dtype=gz.type),)
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
...@@ -3298,7 +3298,7 @@ class Rad2Deg(UnaryScalarOp): ...@@ -3298,7 +3298,7 @@ class Rad2Deg(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
return (gz * np.asarray(180.0 / np.pi, gz.type),) return (gz * np.array(180.0 / np.pi, dtype=gz.type),)
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
...@@ -3371,7 +3371,7 @@ class ArcCos(UnaryScalarOp): ...@@ -3371,7 +3371,7 @@ class ArcCos(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
return (-gz / sqrt(np.cast[x.type](1) - sqr(x)),) return (-gz / sqrt(np.array(1, dtype=x.type) - sqr(x)),)
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
...@@ -3445,7 +3445,7 @@ class ArcSin(UnaryScalarOp): ...@@ -3445,7 +3445,7 @@ class ArcSin(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
return (gz / sqrt(np.cast[x.type](1) - sqr(x)),) return (gz / sqrt(np.array(1, dtype=x.type) - sqr(x)),)
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
...@@ -3517,7 +3517,7 @@ class ArcTan(UnaryScalarOp): ...@@ -3517,7 +3517,7 @@ class ArcTan(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
return (gz / (np.cast[x.type](1) + sqr(x)),) return (gz / (np.array(1, dtype=x.type) + sqr(x)),)
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
...@@ -3640,7 +3640,7 @@ class ArcCosh(UnaryScalarOp): ...@@ -3640,7 +3640,7 @@ class ArcCosh(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
return (gz / sqrt(sqr(x) - np.cast[x.type](1)),) return (gz / sqrt(sqr(x) - np.array(1, dtype=x.type)),)
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
...@@ -3717,7 +3717,7 @@ class ArcSinh(UnaryScalarOp): ...@@ -3717,7 +3717,7 @@ class ArcSinh(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
return (gz / sqrt(sqr(x) + np.cast[x.type](1)),) return (gz / sqrt(sqr(x) + np.array(1, dtype=x.type)),)
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
...@@ -3795,7 +3795,7 @@ class ArcTanh(UnaryScalarOp): ...@@ -3795,7 +3795,7 @@ class ArcTanh(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
return (gz / (np.cast[x.type](1) - sqr(x)),) return (gz / (np.array(1, dtype=x.type) - sqr(x)),)
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
......
...@@ -668,7 +668,7 @@ class Elemwise(OpenMPOp): ...@@ -668,7 +668,7 @@ class Elemwise(OpenMPOp):
and isinstance(self.nfunc, np.ufunc) and isinstance(self.nfunc, np.ufunc)
and node.inputs[0].dtype in discrete_dtypes and node.inputs[0].dtype in discrete_dtypes
): ):
char = np.sctype2char(out_dtype) char = np.dtype(out_dtype).char
sig = char * node.nin + "->" + char * node.nout sig = char * node.nin + "->" + char * node.nout
node.tag.sig = sig node.tag.sig = sig
node.tag.fake_node = Apply( node.tag.fake_node = Apply(
......
...@@ -4,6 +4,7 @@ from collections.abc import Iterable ...@@ -4,6 +4,7 @@ from collections.abc import Iterable
from typing import TYPE_CHECKING, Literal, Optional from typing import TYPE_CHECKING, Literal, Optional
import numpy as np import numpy as np
import numpy.typing as npt
import pytensor import pytensor
from pytensor import scalar as ps from pytensor import scalar as ps
...@@ -69,7 +70,7 @@ class TensorType(CType[np.ndarray], HasDataType, HasShape): ...@@ -69,7 +70,7 @@ class TensorType(CType[np.ndarray], HasDataType, HasShape):
def __init__( def __init__(
self, self,
dtype: str | np.dtype, dtype: str | npt.DTypeLike,
shape: Iterable[bool | int | None] | None = None, shape: Iterable[bool | int | None] | None = None,
name: str | None = None, name: str | None = None,
broadcastable: Iterable[bool] | None = None, broadcastable: Iterable[bool] | None = None,
...@@ -101,11 +102,11 @@ class TensorType(CType[np.ndarray], HasDataType, HasShape): ...@@ -101,11 +102,11 @@ class TensorType(CType[np.ndarray], HasDataType, HasShape):
if str(dtype) == "floatX": if str(dtype) == "floatX":
self.dtype = config.floatX self.dtype = config.floatX
else: else:
if np.obj2sctype(dtype) is None: try:
self.dtype = str(np.dtype(dtype))
except TypeError:
raise TypeError(f"Invalid dtype: {dtype}") raise TypeError(f"Invalid dtype: {dtype}")
self.dtype = np.dtype(dtype).name
def parse_bcast_and_shape(s): def parse_bcast_and_shape(s):
if isinstance(s, bool | np.bool_): if isinstance(s, bool | np.bool_):
return 1 if s else None return 1 if s else None
...@@ -789,14 +790,16 @@ def tensor( ...@@ -789,14 +790,16 @@ def tensor(
**kwargs, **kwargs,
) -> "TensorVariable": ) -> "TensorVariable":
if name is not None: if name is not None:
# Help catching errors with the new tensor API try:
# Many single letter strings are valid sctypes # Help catching errors with the new tensor API
if str(name) == "floatX" or (len(str(name)) > 1 and np.obj2sctype(name)): # Many single letter strings are valid sctypes
np.obj2sctype(name) if str(name) == "floatX" or (len(str(name)) > 1 and np.dtype(name).type):
raise ValueError( raise ValueError(
f"The first and only positional argument of tensor is now `name`. Got {name}.\n" f"The first and only positional argument of tensor is now `name`. Got {name}.\n"
"This name looks like a dtype, which you should pass as a keyword argument only." "This name looks like a dtype, which you should pass as a keyword argument only."
) )
except TypeError:
pass
if dtype is None: if dtype is None:
dtype = config.floatX dtype = config.floatX
......
...@@ -673,7 +673,7 @@ class TestPushOutAddScan: ...@@ -673,7 +673,7 @@ class TestPushOutAddScan:
zi = tensor3("zi") zi = tensor3("zi")
zi_value = x_value zi_value = x_value
init = pt.alloc(np.cast[config.floatX](0), batch_size, dim) init = pt.alloc(np.asarray(0, dtype=config.floatX), batch_size, dim)
def rnn_step1( def rnn_step1(
# sequences # sequences
......
...@@ -708,7 +708,7 @@ class TestFillDiagonal(utt.InferShapeTester): ...@@ -708,7 +708,7 @@ class TestFillDiagonal(utt.InferShapeTester):
y = scalar() y = scalar()
f = function([x, y], fill_diagonal(x, y)) f = function([x, y], fill_diagonal(x, y))
a = rng.random(shp).astype(config.floatX) a = rng.random(shp).astype(config.floatX)
val = np.cast[config.floatX](rng.random()) val = rng.random(dtype=config.floatX)
out = f(a, val) out = f(a, val)
# We can't use np.fill_diagonal as it is bugged. # We can't use np.fill_diagonal as it is bugged.
assert np.allclose(np.diag(out), val) assert np.allclose(np.diag(out), val)
...@@ -720,7 +720,7 @@ class TestFillDiagonal(utt.InferShapeTester): ...@@ -720,7 +720,7 @@ class TestFillDiagonal(utt.InferShapeTester):
x = tensor3() x = tensor3()
y = scalar() y = scalar()
f = function([x, y], fill_diagonal(x, y)) f = function([x, y], fill_diagonal(x, y))
val = np.cast[config.floatX](rng.random() + 10) val = rng.random(dtype=config.floatX) + 10
out = f(a, val) out = f(a, val)
# We can't use np.fill_diagonal as it is bugged. # We can't use np.fill_diagonal as it is bugged.
assert out[0, 0, 0] == val assert out[0, 0, 0] == val
...@@ -782,7 +782,7 @@ class TestFillDiagonalOffset(utt.InferShapeTester): ...@@ -782,7 +782,7 @@ class TestFillDiagonalOffset(utt.InferShapeTester):
f = function([x, y, z], fill_diagonal_offset(x, y, z)) f = function([x, y, z], fill_diagonal_offset(x, y, z))
a = rng.random(shp).astype(config.floatX) a = rng.random(shp).astype(config.floatX)
val = np.cast[config.floatX](rng.random()) val = rng.random(dtype=config.floatX)
out = f(a, val, test_offset) out = f(a, val, test_offset)
# We can't use np.fill_diagonal as it is bugged. # We can't use np.fill_diagonal as it is bugged.
assert np.allclose(np.diag(out, test_offset), val) assert np.allclose(np.diag(out, test_offset), val)
......
...@@ -152,7 +152,7 @@ def upcast_float16_ufunc(fn): ...@@ -152,7 +152,7 @@ def upcast_float16_ufunc(fn):
""" """
def ret(*args, **kwargs): def ret(*args, **kwargs):
out_dtype = np.find_common_type([a.dtype for a in args], [np.float16]) out_dtype = np.result_type(np.float16, *args)
if out_dtype == "float16": if out_dtype == "float16":
# Force everything to float32 # Force everything to float32
sig = "f" * fn.nin + "->" + "f" * fn.nout sig = "f" * fn.nin + "->" + "f" * fn.nout
......
...@@ -481,12 +481,12 @@ class TestGrad: ...@@ -481,12 +481,12 @@ class TestGrad:
int_type = imatrix().dtype int_type = imatrix().dtype
float_type = "float64" float_type = "float64"
X = np.cast[int_type](rng.standard_normal((m, d)) * 127.0) X = np.asarray(rng.standard_normal((m, d)) * 127.0, dtype=int_type)
W = np.cast[W.dtype](rng.standard_normal((d, n))) W = rng.standard_normal((d, n), dtype=W.dtype)
b = np.cast[b.dtype](rng.standard_normal(n)) b = rng.standard_normal(n, dtype=b.dtype)
int_result = int_func(X, W, b) int_result = int_func(X, W, b)
float_result = float_func(np.cast[float_type](X), W, b) float_result = float_func(np.asarray(X, dtype=float_type), W, b)
assert np.allclose(int_result, float_result), (int_result, float_result) assert np.allclose(int_result, float_result), (int_result, float_result)
...@@ -508,7 +508,7 @@ class TestGrad: ...@@ -508,7 +508,7 @@ class TestGrad:
# the output # the output
f = pytensor.function([x], g) f = pytensor.function([x], g)
rng = np.random.default_rng([2012, 9, 5]) rng = np.random.default_rng([2012, 9, 5])
x = np.cast[x.dtype](rng.standard_normal(3)) x = rng.standard_normal(3, dtype=x.dtype)
g = f(x) g = f(x)
assert np.allclose(g, np.ones(x.shape, dtype=x.dtype)) assert np.allclose(g, np.ones(x.shape, dtype=x.dtype))
...@@ -631,7 +631,8 @@ def test_known_grads(): ...@@ -631,7 +631,8 @@ def test_known_grads():
rng = np.random.default_rng([2012, 11, 15]) rng = np.random.default_rng([2012, 11, 15])
values = [rng.standard_normal(10), rng.integers(10), rng.standard_normal()] values = [rng.standard_normal(10), rng.integers(10), rng.standard_normal()]
values = [ values = [
np.cast[ipt.dtype](value) for ipt, value in zip(inputs, values, strict=True) np.asarray(value, dtype=ipt.dtype)
for ipt, value in zip(inputs, values, strict=True)
] ]
true_grads = grad(cost, inputs, disconnected_inputs="ignore") true_grads = grad(cost, inputs, disconnected_inputs="ignore")
...@@ -679,7 +680,7 @@ def test_known_grads_integers(): ...@@ -679,7 +680,7 @@ def test_known_grads_integers():
f = pytensor.function([g_expected], g_grad) f = pytensor.function([g_expected], g_grad)
x = -3 x = -3
gv = np.cast[config.floatX](0.6) gv = np.asarray(0.6, dtype=config.floatX)
g_actual = f(gv) g_actual = f(gv)
...@@ -746,7 +747,8 @@ def test_subgraph_grad(): ...@@ -746,7 +747,8 @@ def test_subgraph_grad():
rng = np.random.default_rng([2012, 11, 15]) rng = np.random.default_rng([2012, 11, 15])
values = [rng.standard_normal(2), rng.standard_normal(3)] values = [rng.standard_normal(2), rng.standard_normal(3)]
values = [ values = [
np.cast[ipt.dtype](value) for ipt, value in zip(inputs, values, strict=True) np.asarray(value, dtype=ipt.dtype)
for ipt, value in zip(inputs, values, strict=True)
] ]
wrt = [w2, w1] wrt = [w2, w1]
...@@ -1031,21 +1033,21 @@ def test_jacobian_scalar(): ...@@ -1031,21 +1033,21 @@ def test_jacobian_scalar():
# test when the jacobian is called with a tensor as wrt # test when the jacobian is called with a tensor as wrt
Jx = jacobian(y, x) Jx = jacobian(y, x)
f = pytensor.function([x], Jx) f = pytensor.function([x], Jx)
vx = np.cast[pytensor.config.floatX](rng.uniform()) vx = np.asarray(rng.uniform(), dtype=pytensor.config.floatX)
assert np.allclose(f(vx), 2) assert np.allclose(f(vx), 2)
# test when the jacobian is called with a tuple as wrt # test when the jacobian is called with a tuple as wrt
Jx = jacobian(y, (x,)) Jx = jacobian(y, (x,))
assert isinstance(Jx, tuple) assert isinstance(Jx, tuple)
f = pytensor.function([x], Jx[0]) f = pytensor.function([x], Jx[0])
vx = np.cast[pytensor.config.floatX](rng.uniform()) vx = np.asarray(rng.uniform(), dtype=pytensor.config.floatX)
assert np.allclose(f(vx), 2) assert np.allclose(f(vx), 2)
# test when the jacobian is called with a list as wrt # test when the jacobian is called with a list as wrt
Jx = jacobian(y, [x]) Jx = jacobian(y, [x])
assert isinstance(Jx, list) assert isinstance(Jx, list)
f = pytensor.function([x], Jx[0]) f = pytensor.function([x], Jx[0])
vx = np.cast[pytensor.config.floatX](rng.uniform()) vx = np.asarray(rng.uniform(), dtype=pytensor.config.floatX)
assert np.allclose(f(vx), 2) assert np.allclose(f(vx), 2)
# test when the jacobian is called with a list of two elements # test when the jacobian is called with a list of two elements
...@@ -1053,8 +1055,8 @@ def test_jacobian_scalar(): ...@@ -1053,8 +1055,8 @@ def test_jacobian_scalar():
y = x * z y = x * z
Jx = jacobian(y, [x, z]) Jx = jacobian(y, [x, z])
f = pytensor.function([x, z], Jx) f = pytensor.function([x, z], Jx)
vx = np.cast[pytensor.config.floatX](rng.uniform()) vx = np.asarray(rng.uniform(), dtype=pytensor.config.floatX)
vz = np.cast[pytensor.config.floatX](rng.uniform()) vz = np.asarray(rng.uniform(), dtype=pytensor.config.floatX)
vJx = f(vx, vz) vJx = f(vx, vz)
assert np.allclose(vJx[0], vz) assert np.allclose(vJx[0], vz)
......
...@@ -577,10 +577,10 @@ class TestMakeList: ...@@ -577,10 +577,10 @@ class TestMakeList:
x = tensor3() x = tensor3()
y = tensor3() y = tensor3()
A = np.cast[pytensor.config.floatX](np.random.random((5, 3))) A = np.random.random((5, 3)).astype(pytensor.config.floatX)
B = np.cast[pytensor.config.floatX](np.random.random((7, 2))) B = np.random.random((7, 2)).astype(pytensor.config.floatX)
X = np.cast[pytensor.config.floatX](np.random.random((5, 6, 1))) X = np.random.random((5, 6, 1)).astype(pytensor.config.floatX)
Y = np.cast[pytensor.config.floatX](np.random.random((1, 9, 3))) Y = np.random.random((1, 9, 3)).astype(pytensor.config.floatX)
make_list((3.0, 4.0)) make_list((3.0, 4.0))
c = make_list((a, b)) c = make_list((a, b))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论