提交 5bccb970 authored 作者: Brandon T. Willard's avatar Brandon T. Willard 提交者: Brandon T. Willard

Move Numba elemwise tests to test_elemwise

上级 5413462f
......@@ -12,14 +12,12 @@ import aesara.scalar.basic as aesb
import aesara.scalar.math as aesm
import aesara.tensor as at
import aesara.tensor.basic as atb
import aesara.tensor.inplace as ati
import aesara.tensor.math as aem
import aesara.tensor.nnet.basic as nnetb
import aesara.tensor.random.basic as aer
from aesara import config, shared
from aesara.compile.function import function
from aesara.compile.mode import Mode
from aesara.compile.ops import ViewOp, deep_copy_op
from aesara.compile.ops import ViewOp
from aesara.compile.sharedvalue import SharedVariable
from aesara.graph.basic import Apply, Constant
from aesara.graph.fg import FunctionGraph
......@@ -34,12 +32,9 @@ from aesara.raise_op import assert_op
from aesara.scalar.basic import Composite
from aesara.scan.basic import scan
from aesara.scan.utils import until
from aesara.tensor import blas
from aesara.tensor import elemwise as at_elemwise
from aesara.tensor import extra_ops, nlinalg, slinalg
from aesara.tensor import blas, extra_ops, nlinalg, slinalg
from aesara.tensor import subtensor as at_subtensor
from aesara.tensor.elemwise import Elemwise
from aesara.tensor.math import All, Any, Max, Mean, Min, Prod, ProdWithoutZeros, Sum
from aesara.tensor.shape import Reshape, Shape, Shape_i, SpecifyShape, Unbroadcast
......@@ -319,93 +314,6 @@ def test_box_unbox(input, wrapper_fn, check_fn):
assert check_fn(res, input)
@pytest.mark.parametrize(
"inputs, input_vals, output_fn, exc",
[
(
[at.vector()],
[rng.uniform(size=100).astype(config.floatX)],
lambda x: at.gammaln(x),
None,
),
(
[at.vector()],
[rng.standard_normal(100).astype(config.floatX)],
lambda x: at.sigmoid(x),
None,
),
(
[at.vector()],
[rng.standard_normal(100).astype(config.floatX)],
lambda x: at.log1mexp(x),
None,
),
(
[at.vector()],
[rng.standard_normal(100).astype(config.floatX)],
lambda x: at.erf(x),
None,
),
(
[at.vector()],
[rng.standard_normal(100).astype(config.floatX)],
lambda x: at.erfc(x),
None,
),
(
[at.vector() for i in range(4)],
[rng.standard_normal(100).astype(config.floatX) for i in range(4)],
lambda x, y, x1, y1: (x + y) * (x1 + y1) * y,
None,
),
(
[at.matrix(), at.scalar()],
[rng.normal(size=(2, 2)).astype(config.floatX), 0.0],
lambda a, b: at.switch(a, b, a),
None,
),
(
[at.scalar(), at.scalar()],
[
np.array(1.0, dtype=config.floatX),
np.array(1.0, dtype=config.floatX),
],
lambda x, y: ati.add_inplace(deep_copy_op(x), deep_copy_op(y)),
None,
),
(
[at.vector(), at.vector()],
[
rng.standard_normal(100).astype(config.floatX),
rng.standard_normal(100).astype(config.floatX),
],
lambda x, y: ati.add_inplace(deep_copy_op(x), deep_copy_op(y)),
None,
),
(
[at.vector(), at.vector()],
[
rng.standard_normal(100).astype(config.floatX),
rng.standard_normal(100).astype(config.floatX),
],
lambda x, y: my_multi_out(x, y),
NotImplementedError,
),
],
)
def test_Elemwise(inputs, input_vals, output_fn, exc):
outputs = output_fn(*inputs)
out_fg = FunctionGraph(
outputs=[outputs] if not isinstance(outputs, list) else outputs
)
cm = contextlib.suppress() if exc is None else pytest.raises(exc)
with cm:
compare_numba_and_py(out_fg, input_vals)
@pytest.mark.parametrize(
"inputs, input_values, scalar_fn",
[
......@@ -693,78 +601,6 @@ def test_AllocDiag(v, offset):
)
@pytest.mark.parametrize(
"v, new_order",
[
# `{'drop': [], 'shuffle': [], 'augment': [0, 1]}`
(
set_test_value(
at.lscalar(name="a"),
np.array(1, dtype=np.int64),
),
("x", "x"),
),
# I.e. `a_at.T`
# `{'drop': [], 'shuffle': [1, 0], 'augment': []}`
(
set_test_value(
at.matrix("a"), np.array([[1.0, 2.0], [3.0, 4.0]], dtype=config.floatX)
),
(1, 0),
),
# `{'drop': [], 'shuffle': [0, 1], 'augment': [2]}`
(
set_test_value(
at.matrix("a"), np.array([[1.0, 2.0], [3.0, 4.0]], dtype=config.floatX)
),
(1, 0, "x"),
),
# `{'drop': [1], 'shuffle': [2, 0], 'augment': [0, 2, 4]}`
(
set_test_value(
at.tensor(config.floatX, [False, True, False], name="a"),
np.array([[[1.0, 2.0]], [[3.0, 4.0]]], dtype=config.floatX),
),
("x", 2, "x", 0, "x"),
),
# I.e. `a_at.dimshuffle((0,))`
# `{'drop': [1], 'shuffle': [0], 'augment': []}`
(
set_test_value(
at.tensor(config.floatX, [False, True], name="a"),
np.array([[1.0], [2.0], [3.0], [4.0]], dtype=config.floatX),
),
(0,),
),
(
set_test_value(
at.tensor(config.floatX, [False, True], name="a"),
np.array([[1.0], [2.0], [3.0], [4.0]], dtype=config.floatX),
),
(0,),
),
(
set_test_value(
at.tensor(config.floatX, [True, True, True], name="a"),
np.array([[[1.0]]], dtype=config.floatX),
),
(),
),
],
)
def test_Dimshuffle(v, new_order):
g = at_elemwise.DimShuffle(v.broadcastable, new_order)(v)
g_fg = FunctionGraph(outputs=[g])
compare_numba_and_py(
g_fg,
[
i.tag.test_value
for i in g_fg.inputs
if not isinstance(i, (SharedVariable, Constant))
],
)
@pytest.mark.parametrize(
"v", [set_test_value(aes.float64(), np.array(1.0, dtype="float64"))]
)
......@@ -996,17 +832,6 @@ def test_Clip(v, min, max):
)
def test_scalar_Elemwise_Clip():
a = at.scalar("a")
b = at.scalar("b")
z = at.switch(1, a, b)
c = at.clip(z, 1, 3)
c_fg = FunctionGraph(outputs=[c])
compare_numba_and_py(c_fg, [1, 1])
@pytest.mark.parametrize(
"vals, dtype",
[
......@@ -1074,159 +899,6 @@ def test_ARange(start, stop, step, dtype):
)
@pytest.mark.parametrize(
"careduce_fn, axis, v",
[
(
lambda x, axis=None, dtype=None, acc_dtype=None: Sum(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
0,
set_test_value(at.vector(), np.arange(3, dtype=config.floatX)),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: All(axis)(x),
0,
set_test_value(at.vector(), np.arange(3, dtype=config.floatX)),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Any(axis)(x),
0,
set_test_value(at.vector(), np.arange(3, dtype=config.floatX)),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Mean(axis)(x),
0,
set_test_value(at.vector(), np.arange(3, dtype=config.floatX)),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Mean(axis)(x),
0,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Sum(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
0,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Sum(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
(0, 1),
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Sum(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
(1, 0),
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Sum(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
None,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Sum(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
1,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Prod(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
0,
set_test_value(at.vector(), np.arange(3, dtype=config.floatX)),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: ProdWithoutZeros(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
0,
set_test_value(at.vector(), np.arange(3, dtype=config.floatX)),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Prod(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
0,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Prod(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
1,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Max(axis)(x),
None,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Max(axis)(x),
None,
set_test_value(
at.lmatrix(), np.arange(3 * 2, dtype=np.int64).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Min(axis)(x),
None,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Min(axis)(x),
None,
set_test_value(
at.lmatrix(), np.arange(3 * 2, dtype=np.int64).reshape((3, 2))
),
),
],
)
def test_CAReduce(careduce_fn, axis, v):
g = careduce_fn(v, axis=axis)
g_fg = FunctionGraph(outputs=[g])
compare_numba_and_py(
g_fg,
[
i.tag.test_value
for i in g_fg.inputs
if not isinstance(i, (SharedVariable, Constant))
],
)
@pytest.mark.parametrize(
"vals, axis",
[
......@@ -2024,123 +1696,6 @@ def test_Dot(x, y, exc):
)
@pytest.mark.parametrize(
"dy, sm, axis, exc",
[
(
set_test_value(
at.matrix(), np.array([[1, 1, 1], [0, 0, 0]], dtype=config.floatX)
),
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
None,
None,
),
(
set_test_value(
at.matrix(), np.array([[1, 1, 1], [0, 0, 0]], dtype=config.floatX)
),
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
0,
None,
),
(
set_test_value(
at.matrix(), np.array([[1, 1, 1], [0, 0, 0]], dtype=config.floatX)
),
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
1,
None,
),
],
)
def test_SoftmaxGrad(dy, sm, axis, exc):
g = nnetb.SoftmaxGrad(axis=axis)(dy, sm)
g_fg = FunctionGraph(outputs=[g])
cm = contextlib.suppress() if exc is None else pytest.warns(exc)
with cm:
compare_numba_and_py(
g_fg,
[
i.tag.test_value
for i in g_fg.inputs
if not isinstance(i, (SharedVariable, Constant))
],
)
@pytest.mark.parametrize(
"x, axis, exc",
[
(
set_test_value(at.vector(), rng.random(size=(2,)).astype(config.floatX)),
None,
None,
),
(
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
None,
None,
),
(
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
0,
None,
),
],
)
def test_Softmax(x, axis, exc):
g = nnetb.Softmax(axis=axis)(x)
g_fg = FunctionGraph(outputs=[g])
cm = contextlib.suppress() if exc is None else pytest.warns(exc)
with cm:
compare_numba_and_py(
g_fg,
[
i.tag.test_value
for i in g_fg.inputs
if not isinstance(i, (SharedVariable, Constant))
],
)
@pytest.mark.parametrize(
"x, axis, exc",
[
(
set_test_value(at.vector(), rng.random(size=(2,)).astype(config.floatX)),
None,
None,
),
(
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
0,
None,
),
(
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
1,
None,
),
],
)
def test_LogSoftmax(x, axis, exc):
g = nnetb.LogSoftmax(axis=axis)(x)
g_fg = FunctionGraph(outputs=[g])
cm = contextlib.suppress() if exc is None else pytest.warns(exc)
with cm:
compare_numba_and_py(
g_fg,
[
i.tag.test_value
for i in g_fg.inputs
if not isinstance(i, (SharedVariable, Constant))
],
)
@pytest.mark.parametrize(
"x, exc",
[
......@@ -2186,51 +1741,6 @@ def test_Softplus(x, exc):
)
@pytest.mark.parametrize(
"x, axes, exc",
[
(
set_test_value(at.dscalar(), np.array(0.0, dtype="float64")),
[],
None,
),
(
set_test_value(at.dvector(), rng.random(size=(3,)).astype("float64")),
[0],
None,
),
(
set_test_value(at.dmatrix(), rng.random(size=(3, 2)).astype("float64")),
[0],
None,
),
(
set_test_value(at.dmatrix(), rng.random(size=(3, 2)).astype("float64")),
[0, 1],
None,
),
],
)
def test_MaxAndArgmax(x, axes, exc):
g = aem.MaxAndArgmax(axes)(x)
if isinstance(g, list):
g_fg = FunctionGraph(outputs=g)
else:
g_fg = FunctionGraph(outputs=[g])
cm = contextlib.suppress() if exc is None else pytest.warns(exc)
with cm:
compare_numba_and_py(
g_fg,
[
i.tag.test_value
for i in g_fg.inputs
if not isinstance(i, (SharedVariable, Constant))
],
)
@pytest.mark.parametrize(
"x, lower, exc",
[
......
import contextlib
import numpy as np
import pytest
import aesara.tensor as at
import aesara.tensor.inplace as ati
import aesara.tensor.math as aem
import aesara.tensor.nnet.basic as nnetb
from aesara import config
from aesara.compile.ops import deep_copy_op
from aesara.compile.sharedvalue import SharedVariable
from aesara.graph.basic import Constant
from aesara.graph.fg import FunctionGraph
from aesara.tensor import elemwise as at_elemwise
from aesara.tensor.math import All, Any, Max, Mean, Min, Prod, ProdWithoutZeros, Sum
from tests.link.numba.test_basic import (
compare_numba_and_py,
my_multi_out,
set_test_value,
)
rng = np.random.default_rng(42849)
@pytest.mark.parametrize(
"inputs, input_vals, output_fn, exc",
[
(
[at.vector()],
[rng.uniform(size=100).astype(config.floatX)],
lambda x: at.gammaln(x),
None,
),
(
[at.vector()],
[rng.standard_normal(100).astype(config.floatX)],
lambda x: at.sigmoid(x),
None,
),
(
[at.vector()],
[rng.standard_normal(100).astype(config.floatX)],
lambda x: at.log1mexp(x),
None,
),
(
[at.vector()],
[rng.standard_normal(100).astype(config.floatX)],
lambda x: at.erf(x),
None,
),
(
[at.vector()],
[rng.standard_normal(100).astype(config.floatX)],
lambda x: at.erfc(x),
None,
),
(
[at.vector() for i in range(4)],
[rng.standard_normal(100).astype(config.floatX) for i in range(4)],
lambda x, y, x1, y1: (x + y) * (x1 + y1) * y,
None,
),
(
[at.matrix(), at.scalar()],
[rng.normal(size=(2, 2)).astype(config.floatX), 0.0],
lambda a, b: at.switch(a, b, a),
None,
),
(
[at.scalar(), at.scalar()],
[
np.array(1.0, dtype=config.floatX),
np.array(1.0, dtype=config.floatX),
],
lambda x, y: ati.add_inplace(deep_copy_op(x), deep_copy_op(y)),
None,
),
(
[at.vector(), at.vector()],
[
rng.standard_normal(100).astype(config.floatX),
rng.standard_normal(100).astype(config.floatX),
],
lambda x, y: ati.add_inplace(deep_copy_op(x), deep_copy_op(y)),
None,
),
(
[at.vector(), at.vector()],
[
rng.standard_normal(100).astype(config.floatX),
rng.standard_normal(100).astype(config.floatX),
],
lambda x, y: my_multi_out(x, y),
NotImplementedError,
),
],
)
def test_Elemwise(inputs, input_vals, output_fn, exc):
outputs = output_fn(*inputs)
out_fg = FunctionGraph(
outputs=[outputs] if not isinstance(outputs, list) else outputs
)
cm = contextlib.suppress() if exc is None else pytest.raises(exc)
with cm:
compare_numba_and_py(out_fg, input_vals)
@pytest.mark.parametrize(
"v, new_order",
[
# `{'drop': [], 'shuffle': [], 'augment': [0, 1]}`
(
set_test_value(
at.lscalar(name="a"),
np.array(1, dtype=np.int64),
),
("x", "x"),
),
# I.e. `a_at.T`
# `{'drop': [], 'shuffle': [1, 0], 'augment': []}`
(
set_test_value(
at.matrix("a"), np.array([[1.0, 2.0], [3.0, 4.0]], dtype=config.floatX)
),
(1, 0),
),
# `{'drop': [], 'shuffle': [0, 1], 'augment': [2]}`
(
set_test_value(
at.matrix("a"), np.array([[1.0, 2.0], [3.0, 4.0]], dtype=config.floatX)
),
(1, 0, "x"),
),
# `{'drop': [1], 'shuffle': [2, 0], 'augment': [0, 2, 4]}`
(
set_test_value(
at.tensor(config.floatX, [False, True, False], name="a"),
np.array([[[1.0, 2.0]], [[3.0, 4.0]]], dtype=config.floatX),
),
("x", 2, "x", 0, "x"),
),
# I.e. `a_at.dimshuffle((0,))`
# `{'drop': [1], 'shuffle': [0], 'augment': []}`
(
set_test_value(
at.tensor(config.floatX, [False, True], name="a"),
np.array([[1.0], [2.0], [3.0], [4.0]], dtype=config.floatX),
),
(0,),
),
(
set_test_value(
at.tensor(config.floatX, [False, True], name="a"),
np.array([[1.0], [2.0], [3.0], [4.0]], dtype=config.floatX),
),
(0,),
),
(
set_test_value(
at.tensor(config.floatX, [True, True, True], name="a"),
np.array([[[1.0]]], dtype=config.floatX),
),
(),
),
],
)
def test_Dimshuffle(v, new_order):
g = at_elemwise.DimShuffle(v.broadcastable, new_order)(v)
g_fg = FunctionGraph(outputs=[g])
compare_numba_and_py(
g_fg,
[
i.tag.test_value
for i in g_fg.inputs
if not isinstance(i, (SharedVariable, Constant))
],
)
@pytest.mark.parametrize(
"careduce_fn, axis, v",
[
(
lambda x, axis=None, dtype=None, acc_dtype=None: Sum(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
0,
set_test_value(at.vector(), np.arange(3, dtype=config.floatX)),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: All(axis)(x),
0,
set_test_value(at.vector(), np.arange(3, dtype=config.floatX)),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Any(axis)(x),
0,
set_test_value(at.vector(), np.arange(3, dtype=config.floatX)),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Mean(axis)(x),
0,
set_test_value(at.vector(), np.arange(3, dtype=config.floatX)),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Mean(axis)(x),
0,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Sum(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
0,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Sum(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
(0, 1),
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Sum(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
(1, 0),
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Sum(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
None,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Sum(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
1,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Prod(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
0,
set_test_value(at.vector(), np.arange(3, dtype=config.floatX)),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: ProdWithoutZeros(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
0,
set_test_value(at.vector(), np.arange(3, dtype=config.floatX)),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Prod(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
0,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Prod(
axis=axis, dtype=dtype, acc_dtype=acc_dtype
)(x),
1,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Max(axis)(x),
None,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Max(axis)(x),
None,
set_test_value(
at.lmatrix(), np.arange(3 * 2, dtype=np.int64).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Min(axis)(x),
None,
set_test_value(
at.matrix(), np.arange(3 * 2, dtype=config.floatX).reshape((3, 2))
),
),
(
lambda x, axis=None, dtype=None, acc_dtype=None: Min(axis)(x),
None,
set_test_value(
at.lmatrix(), np.arange(3 * 2, dtype=np.int64).reshape((3, 2))
),
),
],
)
def test_CAReduce(careduce_fn, axis, v):
g = careduce_fn(v, axis=axis)
g_fg = FunctionGraph(outputs=[g])
compare_numba_and_py(
g_fg,
[
i.tag.test_value
for i in g_fg.inputs
if not isinstance(i, (SharedVariable, Constant))
],
)
def test_scalar_Elemwise_Clip():
a = at.scalar("a")
b = at.scalar("b")
z = at.switch(1, a, b)
c = at.clip(z, 1, 3)
c_fg = FunctionGraph(outputs=[c])
compare_numba_and_py(c_fg, [1, 1])
@pytest.mark.parametrize(
"dy, sm, axis, exc",
[
(
set_test_value(
at.matrix(), np.array([[1, 1, 1], [0, 0, 0]], dtype=config.floatX)
),
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
None,
None,
),
(
set_test_value(
at.matrix(), np.array([[1, 1, 1], [0, 0, 0]], dtype=config.floatX)
),
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
0,
None,
),
(
set_test_value(
at.matrix(), np.array([[1, 1, 1], [0, 0, 0]], dtype=config.floatX)
),
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
1,
None,
),
],
)
def test_SoftmaxGrad(dy, sm, axis, exc):
g = nnetb.SoftmaxGrad(axis=axis)(dy, sm)
g_fg = FunctionGraph(outputs=[g])
cm = contextlib.suppress() if exc is None else pytest.warns(exc)
with cm:
compare_numba_and_py(
g_fg,
[
i.tag.test_value
for i in g_fg.inputs
if not isinstance(i, (SharedVariable, Constant))
],
)
@pytest.mark.parametrize(
"x, axis, exc",
[
(
set_test_value(at.vector(), rng.random(size=(2,)).astype(config.floatX)),
None,
None,
),
(
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
None,
None,
),
(
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
0,
None,
),
],
)
def test_Softmax(x, axis, exc):
g = nnetb.Softmax(axis=axis)(x)
g_fg = FunctionGraph(outputs=[g])
cm = contextlib.suppress() if exc is None else pytest.warns(exc)
with cm:
compare_numba_and_py(
g_fg,
[
i.tag.test_value
for i in g_fg.inputs
if not isinstance(i, (SharedVariable, Constant))
],
)
@pytest.mark.parametrize(
"x, axis, exc",
[
(
set_test_value(at.vector(), rng.random(size=(2,)).astype(config.floatX)),
None,
None,
),
(
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
0,
None,
),
(
set_test_value(at.matrix(), rng.random(size=(2, 3)).astype(config.floatX)),
1,
None,
),
],
)
def test_LogSoftmax(x, axis, exc):
g = nnetb.LogSoftmax(axis=axis)(x)
g_fg = FunctionGraph(outputs=[g])
cm = contextlib.suppress() if exc is None else pytest.warns(exc)
with cm:
compare_numba_and_py(
g_fg,
[
i.tag.test_value
for i in g_fg.inputs
if not isinstance(i, (SharedVariable, Constant))
],
)
@pytest.mark.parametrize(
"x, axes, exc",
[
(
set_test_value(at.dscalar(), np.array(0.0, dtype="float64")),
[],
None,
),
(
set_test_value(at.dvector(), rng.random(size=(3,)).astype("float64")),
[0],
None,
),
(
set_test_value(at.dmatrix(), rng.random(size=(3, 2)).astype("float64")),
[0],
None,
),
(
set_test_value(at.dmatrix(), rng.random(size=(3, 2)).astype("float64")),
[0, 1],
None,
),
],
)
def test_MaxAndArgmax(x, axes, exc):
g = aem.MaxAndArgmax(axes)(x)
if isinstance(g, list):
g_fg = FunctionGraph(outputs=g)
else:
g_fg = FunctionGraph(outputs=[g])
cm = contextlib.suppress() if exc is None else pytest.warns(exc)
with cm:
compare_numba_and_py(
g_fg,
[
i.tag.test_value
for i in g_fg.inputs
if not isinstance(i, (SharedVariable, Constant))
],
)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论