提交 f62401a0 authored 作者: ferres's avatar ferres 提交者: Maxim Kochurov

maintanance: unpin scipy

fix: cast to elemwise outputs to their respective dtypes fix: Relax scipy dependency, should work in both cases style: black wrap with asarray fix: make elemwise test check against dtype in the graph fix scalar issues Update pytensor/scalar/basic.py Co-authored-by: 's avatarRicardo Vieira <28983449+ricardoV94@users.noreply.github.com> fix test add a clarifying comment to checking nan fix: bool is deprecated in numpy deps: bound scipy version improve test
上级 4d0103bd
...@@ -10,7 +10,7 @@ dependencies: ...@@ -10,7 +10,7 @@ dependencies:
- python=>3.10 - python=>3.10
- compilers - compilers
- numpy>=1.17.0,<2 - numpy>=1.17.0,<2
- scipy>=0.14,<1.14.0 - scipy>=1,<2
- filelock>=3.15 - filelock>=3.15
- etuples - etuples
- logical-unification - logical-unification
......
...@@ -10,7 +10,7 @@ dependencies: ...@@ -10,7 +10,7 @@ dependencies:
- python>=3.10 - python>=3.10
- compilers - compilers
- numpy>=1.17.0,<2 - numpy>=1.17.0,<2
- scipy>=0.14,<1.14.0 - scipy>=1,<2
- filelock>=3.15 - filelock>=3.15
- etuples - etuples
- logical-unification - logical-unification
......
...@@ -47,7 +47,7 @@ keywords = [ ...@@ -47,7 +47,7 @@ keywords = [
] ]
dependencies = [ dependencies = [
"setuptools>=59.0.0", "setuptools>=59.0.0",
"scipy>=0.14,<1.14", "scipy>=1,<2",
"numpy>=1.17.0,<2", "numpy>=1.17.0,<2",
"filelock>=3.15", "filelock>=3.15",
"etuples", "etuples",
......
...@@ -1140,14 +1140,25 @@ class ScalarOp(COp): ...@@ -1140,14 +1140,25 @@ class ScalarOp(COp):
else: else:
raise NotImplementedError(f"Cannot calculate the output types for {self}") raise NotImplementedError(f"Cannot calculate the output types for {self}")
@staticmethod
def _cast_scalar(x, dtype):
if hasattr(x, "astype"):
return x.astype(dtype)
elif dtype == "bool":
return np.bool_(x)
else:
return getattr(np, dtype)(x)
def perform(self, node, inputs, output_storage): def perform(self, node, inputs, output_storage):
if self.nout == 1: if self.nout == 1:
output_storage[0][0] = self.impl(*inputs) dtype = node.outputs[0].dtype
output_storage[0][0] = self._cast_scalar(self.impl(*inputs), dtype)
else: else:
variables = from_return_values(self.impl(*inputs)) variables = from_return_values(self.impl(*inputs))
assert len(variables) == len(output_storage) assert len(variables) == len(output_storage)
for storage, variable in zip(output_storage, variables): for out, storage, variable in zip(node.outputs, output_storage, variables):
storage[0] = variable dtype = out.dtype
storage[0] = self._cast_scalar(variable, dtype)
def impl(self, *inputs): def impl(self, *inputs):
raise MethodNotDefined("impl", type(self), self.__class__.__name__) raise MethodNotDefined("impl", type(self), self.__class__.__name__)
......
...@@ -767,34 +767,16 @@ class Elemwise(OpenMPOp): ...@@ -767,34 +767,16 @@ class Elemwise(OpenMPOp):
for i, (variable, storage, nout) in enumerate( for i, (variable, storage, nout) in enumerate(
zip(variables, output_storage, node.outputs) zip(variables, output_storage, node.outputs)
): ):
if getattr(variable, "dtype", "") == "object": storage[0] = variable = np.asarray(variable, dtype=nout.dtype)
# Since numpy 1.6, function created with numpy.frompyfunc
# always return an ndarray with dtype object
variable = np.asarray(variable, dtype=nout.dtype)
if i in self.inplace_pattern: if i in self.inplace_pattern:
odat = inputs[self.inplace_pattern[i]] odat = inputs[self.inplace_pattern[i]]
odat[...] = variable odat[...] = variable
storage[0] = odat storage[0] = odat
# Sometimes NumPy return a Python type.
# Some PyTensor op return a different dtype like floor, ceil,
# trunc, eq, ...
elif not isinstance(variable, np.ndarray) or variable.dtype != nout.dtype:
variable = np.asarray(variable, nout.dtype)
# The next line is needed for numpy 1.9. Otherwise
# there are tests that fail in DebugMode.
# Normally we would call pytensor.misc._asarray, but it
# is faster to inline the code. We know that the dtype
# are the same string, just different typenum.
if np.dtype(nout.dtype).num != variable.dtype.num:
variable = variable.view(dtype=nout.dtype)
storage[0] = variable
# numpy.real return a view! # numpy.real return a view!
elif not variable.flags.owndata: if not variable.flags.owndata:
storage[0] = variable.copy() storage[0] = variable.copy()
else:
storage[0] = variable
@staticmethod @staticmethod
def _check_runtime_broadcast(node, inputs): def _check_runtime_broadcast(node, inputs):
......
...@@ -212,12 +212,17 @@ def test_inner_composite(mode): ...@@ -212,12 +212,17 @@ def test_inner_composite(mode):
y16 = op(n_steps, x16) y16 = op(n_steps, x16)
assert y16.type.dtype == "float16" assert y16.type.dtype == "float16"
fn32 = function([n_steps, x16], y16, mode=mode) fn16 = function([n_steps, x16], y16, mode=mode)
out16 = fn16(n_steps=3, x16=np.array(4.73, dtype="float16"))
np.testing.assert_allclose( np.testing.assert_allclose(
fn32(n_steps=9, x16=np.array(4.73, dtype="float16")), out16,
4.73 + 9, 4.73 + 3,
rtol=1e-3, rtol=1e-3,
) )
out16overflow = fn16(n_steps=9, x16=np.array(4.73, dtype="float16"))
assert out16overflow.dtype == "float16"
# with this dtype overflow happens
assert np.isnan(out16overflow)
@mode @mode
...@@ -243,8 +248,10 @@ def test_inner_loop(mode): ...@@ -243,8 +248,10 @@ def test_inner_loop(mode):
y16 = outer_loop_op(n_steps, x16, n_steps) y16 = outer_loop_op(n_steps, x16, n_steps)
assert y16.type.dtype == "float16" assert y16.type.dtype == "float16"
fn32 = function([n_steps, x16], y16, mode=mode) fn16 = function([n_steps, x16], y16, mode=mode)
out16 = fn16(n_steps=3, x16=np.array(2.5, dtype="float16"))
assert out16.dtype == "float16"
np.testing.assert_allclose( np.testing.assert_allclose(
fn32(n_steps=3, x16=np.array(2.5, dtype="float16")), out16,
3**2 + 2.5, 3**2 + 2.5,
) )
...@@ -508,15 +508,17 @@ def makeTester( ...@@ -508,15 +508,17 @@ def makeTester(
if not isinstance(expecteds, list | tuple): if not isinstance(expecteds, list | tuple):
expecteds = (expecteds,) expecteds = (expecteds,)
for i, (variable, expected) in enumerate(zip(variables, expecteds)): for i, (variable, expected, out_symbol) in enumerate(
zip(variables, expecteds, node.outputs)
):
condition = ( condition = (
variable.dtype != expected.dtype variable.dtype != out_symbol.type.dtype
or variable.shape != expected.shape or variable.shape != expected.shape
or not np.allclose(variable, expected, atol=eps, rtol=eps) or not np.allclose(variable, expected, atol=eps, rtol=eps)
) )
assert not condition, ( assert not condition, (
f"Test {self.op}::{testname}: Output {i} gave the wrong" f"Test {self.op}::{testname}: Output {i} gave the wrong"
f" value. With inputs {inputs}, expected {expected} (dtype {expected.dtype})," f" value. With inputs {inputs}, expected {expected} (dtype {out_symbol.type.dtype}),"
f" got {variable} (dtype {variable.dtype}). eps={eps:f}" f" got {variable} (dtype {variable.dtype}). eps={eps:f}"
f" np.allclose returns {np.allclose(variable, expected, atol=eps, rtol=eps)} {np.allclose(variable, expected)}" f" np.allclose returns {np.allclose(variable, expected, atol=eps, rtol=eps)} {np.allclose(variable, expected)}"
) )
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论