提交 39c11472 authored 作者: kc611's avatar kc611 提交者: Brandon T. Willard

Refactor remaining tests to use NumPy Generator

上级 1ff4b9d3
...@@ -64,7 +64,7 @@ class TestPfunc: ...@@ -64,7 +64,7 @@ class TestPfunc:
def test_shared(self): def test_shared(self):
# CHECK: two functions (f1 and f2) can share w # CHECK: two functions (f1 and f2) can share w
w = shared(np.random.rand(2, 2), "w") w = shared(np.random.random((2, 2)), "w")
wval = w.get_value(borrow=False) wval = w.get_value(borrow=False)
x = dmatrix() x = dmatrix()
...@@ -72,7 +72,7 @@ class TestPfunc: ...@@ -72,7 +72,7 @@ class TestPfunc:
out2 = w * x out2 = w * x
f1 = pfunc([x], [out1]) f1 = pfunc([x], [out1])
f2 = pfunc([x], [out2]) f2 = pfunc([x], [out2])
xval = np.random.rand(2, 2) xval = np.random.random((2, 2))
assert np.all(f1(xval) == xval + wval) assert np.all(f1(xval) == xval + wval)
assert np.all(f2(xval) == xval * wval) assert np.all(f2(xval) == xval * wval)
...@@ -89,7 +89,7 @@ class TestPfunc: ...@@ -89,7 +89,7 @@ class TestPfunc:
def test_no_shared_as_input(self): def test_no_shared_as_input(self):
# Test that shared variables cannot be used as function inputs. # Test that shared variables cannot be used as function inputs.
w_init = np.random.rand(2, 2) w_init = np.random.random((2, 2))
w = shared(w_init.copy(), "w") w = shared(w_init.copy(), "w")
with pytest.raises( with pytest.raises(
TypeError, match=r"^Cannot use a shared variable \(w\) as explicit input" TypeError, match=r"^Cannot use a shared variable \(w\) as explicit input"
...@@ -100,8 +100,8 @@ class TestPfunc: ...@@ -100,8 +100,8 @@ class TestPfunc:
# Ensure it is possible to (implicitly) use a shared variable in a # Ensure it is possible to (implicitly) use a shared variable in a
# function, as a 'state' that can be updated at will. # function, as a 'state' that can be updated at will.
rng = np.random.RandomState(1827) rng = np.random.default_rng(1827)
w_init = rng.rand(5) w_init = rng.random((5))
w = shared(w_init.copy(), "w") w = shared(w_init.copy(), "w")
reg = aet_sum(w * w) reg = aet_sum(w * w)
f = pfunc([], reg) f = pfunc([], reg)
...@@ -127,8 +127,8 @@ class TestPfunc: ...@@ -127,8 +127,8 @@ class TestPfunc:
out = a + b out = a + b
f = pfunc([In(a, strict=False)], [out]) f = pfunc([In(a, strict=False)], [out])
# works, rand generates float64 by default # works, random( generates float64 by default
f(np.random.rand(8)) f(np.random.random((8)))
# works, casting is allowed # works, casting is allowed
f(np.array([1, 2, 3, 4], dtype="int32")) f(np.array([1, 2, 3, 4], dtype="int32"))
...@@ -145,14 +145,14 @@ class TestPfunc: ...@@ -145,14 +145,14 @@ class TestPfunc:
# using mutable=True will let fip change the value in aval # using mutable=True will let fip change the value in aval
fip = pfunc([In(a, mutable=True)], [a_out], mode="FAST_RUN") fip = pfunc([In(a, mutable=True)], [a_out], mode="FAST_RUN")
aval = np.random.rand(10) aval = np.random.random((10))
aval2 = aval.copy() aval2 = aval.copy()
assert np.all(fip(aval) == (aval2 * 2)) assert np.all(fip(aval) == (aval2 * 2))
assert not np.all(aval == aval2) assert not np.all(aval == aval2)
# using mutable=False should leave the input untouched # using mutable=False should leave the input untouched
f = pfunc([In(a, mutable=False)], [a_out], mode="FAST_RUN") f = pfunc([In(a, mutable=False)], [a_out], mode="FAST_RUN")
aval = np.random.rand(10) aval = np.random.random((10))
aval2 = aval.copy() aval2 = aval.copy()
assert np.all(f(aval) == (aval2 * 2)) assert np.all(f(aval) == (aval2 * 2))
assert np.all(aval == aval2) assert np.all(aval == aval2)
...@@ -375,7 +375,7 @@ class TestPfunc: ...@@ -375,7 +375,7 @@ class TestPfunc:
def test_update_err_broadcast(self): def test_update_err_broadcast(self):
# Test that broadcastable dimensions raise error # Test that broadcastable dimensions raise error
data = np.random.rand(10, 10).astype("float32") data = np.random.random((10, 10)).astype("float32")
output_var = shared(name="output", value=data) output_var = shared(name="output", value=data)
# the update_var has type matrix, and the update expression # the update_var has type matrix, and the update expression
......
...@@ -736,7 +736,7 @@ class VecAsRowAndCol(Op): ...@@ -736,7 +736,7 @@ class VecAsRowAndCol(Op):
class TestPreallocatedOutput: class TestPreallocatedOutput:
def setup_method(self): def setup_method(self):
self.rng = np.random.RandomState(seed=utt.fetch_seed()) self.rng = np.random.default_rng(seed=utt.fetch_seed())
def test_f_contiguous(self): def test_f_contiguous(self):
a = fmatrix("a") a = fmatrix("a")
...@@ -745,8 +745,8 @@ class TestPreallocatedOutput: ...@@ -745,8 +745,8 @@ class TestPreallocatedOutput:
# In this test, we do not want z to be an output of the graph. # In this test, we do not want z to be an output of the graph.
out = dot(z, np.eye(7)) out = dot(z, np.eye(7))
a_val = self.rng.randn(7, 7).astype("float32") a_val = self.rng.standard_normal((7, 7)).astype("float32")
b_val = self.rng.randn(7, 7).astype("float32") b_val = self.rng.standard_normal((7, 7)).astype("float32")
# Should work # Should work
mode = DebugMode(check_preallocated_output=["c_contiguous"]) mode = DebugMode(check_preallocated_output=["c_contiguous"])
...@@ -776,8 +776,8 @@ class TestPreallocatedOutput: ...@@ -776,8 +776,8 @@ class TestPreallocatedOutput:
b = fmatrix("b") b = fmatrix("b")
out = BrokenCImplementationAdd()(a, b) out = BrokenCImplementationAdd()(a, b)
a_val = self.rng.randn(7, 7).astype("float32") a_val = self.rng.standard_normal((7, 7)).astype("float32")
b_val = self.rng.randn(7, 7).astype("float32") b_val = self.rng.standard_normal((7, 7)).astype("float32")
# Should work # Should work
mode = DebugMode(check_preallocated_output=["c_contiguous"]) mode = DebugMode(check_preallocated_output=["c_contiguous"])
...@@ -805,5 +805,5 @@ class TestPreallocatedOutput: ...@@ -805,5 +805,5 @@ class TestPreallocatedOutput:
c, r = VecAsRowAndCol()(v) c, r = VecAsRowAndCol()(v)
f = function([v], [c, r]) f = function([v], [c, r])
v_val = self.rng.randn(5).astype("float32") v_val = self.rng.standard_normal((5)).astype("float32")
f(v_val) f(v_val)
...@@ -54,8 +54,8 @@ class NNet: ...@@ -54,8 +54,8 @@ class NNet:
def test_nnet(): def test_nnet():
rng = np.random.RandomState(1827) rng = np.random.default_rng(279)
data = rng.rand(10, 4) data = rng.random((10, 4))
nnet = NNet(n_input=3, n_hidden=10) nnet = NNet(n_input=3, n_hidden=10)
for epoch in range(3): for epoch in range(3):
mean_cost = 0 mean_cost = 0
...@@ -66,7 +66,8 @@ def test_nnet(): ...@@ -66,7 +66,8 @@ def test_nnet():
mean_cost += cost mean_cost += cost
mean_cost /= float(len(data)) mean_cost /= float(len(data))
# print 'Mean cost at epoch %s: %s' % (epoch, mean_cost) # print 'Mean cost at epoch %s: %s' % (epoch, mean_cost)
assert abs(mean_cost - 0.20588975452) < 1e-6 # Seed based test
assert abs(mean_cost - 0.2301901) < 1e-6
# Just call functions to make sure they do not crash. # Just call functions to make sure they do not crash.
nnet.compute_output(input) nnet.compute_output(input)
nnet.output_from_hidden(np.ones(10)) nnet.output_from_hidden(np.ones(10))
...@@ -11,7 +11,7 @@ class Mlp: ...@@ -11,7 +11,7 @@ class Mlp:
if rng is None: if rng is None:
rng = 0 rng = 0
if isinstance(rng, int): if isinstance(rng, int):
rng = np.random.RandomState(rng) rng = np.random.default_rng(rng)
self.rng = rng self.rng = rng
self.nfeatures = nfeatures self.nfeatures = nfeatures
self.noutputs = noutputs self.noutputs = noutputs
......
...@@ -19,7 +19,7 @@ if not pydot_imported: ...@@ -19,7 +19,7 @@ if not pydot_imported:
class TestD3Viz: class TestD3Viz:
def setup_method(self): def setup_method(self):
self.rng = np.random.RandomState(0) self.rng = np.random.default_rng(0)
self.data_dir = pt.join("data", "test_d3viz") self.data_dir = pt.join("data", "test_d3viz")
def check(self, f, reference=None, verbose=False): def check(self, f, reference=None, verbose=False):
......
...@@ -13,7 +13,7 @@ from tests.d3viz import models ...@@ -13,7 +13,7 @@ from tests.d3viz import models
class TestPyDotFormatter: class TestPyDotFormatter:
def setup_method(self): def setup_method(self):
self.rng = np.random.RandomState(0) self.rng = np.random.default_rng(0)
def node_counts(self, graph): def node_counts(self, graph):
node_types = [node.get_attributes()["node_type"] for node in graph.get_nodes()] node_types = [node.get_attributes()["node_type"] for node in graph.get_nodes()]
......
...@@ -218,6 +218,8 @@ def test_jax_compile_ops(): ...@@ -218,6 +218,8 @@ def test_jax_compile_ops():
def test_jax_basic(): def test_jax_basic():
rng = np.random.default_rng(28494)
x = matrix("x") x = matrix("x")
y = matrix("y") y = matrix("y")
b = vector("b") b = vector("b")
...@@ -259,7 +261,11 @@ def test_jax_basic(): ...@@ -259,7 +261,11 @@ def test_jax_basic():
out_fg = FunctionGraph([x], [out]) out_fg = FunctionGraph([x], [out])
compare_jax_and_py( compare_jax_and_py(
out_fg, out_fg,
[(np.eye(10) + np.random.randn(10, 10) * 0.01).astype(config.floatX)], [
(np.eye(10) + rng.standard_normal(size=(10, 10)) * 0.01).astype(
config.floatX
)
],
) )
# not sure why this isn't working yet with lower=False # not sure why this isn't working yet with lower=False
...@@ -267,7 +273,11 @@ def test_jax_basic(): ...@@ -267,7 +273,11 @@ def test_jax_basic():
out_fg = FunctionGraph([x], [out]) out_fg = FunctionGraph([x], [out])
compare_jax_and_py( compare_jax_and_py(
out_fg, out_fg,
[(np.eye(10) + np.random.randn(10, 10) * 0.01).astype(config.floatX)], [
(np.eye(10) + rng.standard_normal(size=(10, 10)) * 0.01).astype(
config.floatX
)
],
) )
out = aet_slinalg.solve(x, b) out = aet_slinalg.solve(x, b)
...@@ -294,7 +304,11 @@ def test_jax_basic(): ...@@ -294,7 +304,11 @@ def test_jax_basic():
out_fg = FunctionGraph([x], [out]) out_fg = FunctionGraph([x], [out])
compare_jax_and_py( compare_jax_and_py(
out_fg, out_fg,
[(np.eye(10) + np.random.randn(10, 10) * 0.01).astype(config.floatX)], [
(np.eye(10) + rng.standard_normal(size=(10, 10)) * 0.01).astype(
config.floatX
)
],
) )
...@@ -405,9 +419,9 @@ def test_jax_eye(): ...@@ -405,9 +419,9 @@ def test_jax_eye():
def test_jax_basic_multiout(): def test_jax_basic_multiout():
rng = np.random.default_rng(213234)
np.random.seed(213234) M = rng.normal(size=(3, 3))
M = np.random.normal(size=(3, 3))
X = M.dot(M.T) X = M.dot(M.T)
x = matrix("x") x = matrix("x")
...@@ -638,7 +652,9 @@ def test_jax_Subtensors_omni(): ...@@ -638,7 +652,9 @@ def test_jax_Subtensors_omni():
reason="Omnistaging cannot be disabled", reason="Omnistaging cannot be disabled",
) )
def test_jax_IncSubtensor(): def test_jax_IncSubtensor():
x_np = np.random.uniform(-1, 1, size=(3, 4, 5)).astype(config.floatX) rng = np.random.default_rng(213234)
x_np = rng.uniform(-1, 1, size=(3, 4, 5)).astype(config.floatX)
x_aet = aet.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(config.floatX) x_aet = aet.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(config.floatX)
# "Set" basic indices # "Set" basic indices
...@@ -661,7 +677,7 @@ def test_jax_IncSubtensor(): ...@@ -661,7 +677,7 @@ def test_jax_IncSubtensor():
# "Set" advanced indices # "Set" advanced indices
st_aet = aet.as_tensor_variable( st_aet = aet.as_tensor_variable(
np.random.uniform(-1, 1, size=(2, 4, 5)).astype(config.floatX) rng.uniform(-1, 1, size=(2, 4, 5)).astype(config.floatX)
) )
out_aet = aet_subtensor.set_subtensor(x_aet[np.r_[0, 2]], st_aet) out_aet = aet_subtensor.set_subtensor(x_aet[np.r_[0, 2]], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor1) assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor1)
...@@ -707,7 +723,7 @@ def test_jax_IncSubtensor(): ...@@ -707,7 +723,7 @@ def test_jax_IncSubtensor():
# "Increment" advanced indices # "Increment" advanced indices
st_aet = aet.as_tensor_variable( st_aet = aet.as_tensor_variable(
np.random.uniform(-1, 1, size=(2, 4, 5)).astype(config.floatX) rng.uniform(-1, 1, size=(2, 4, 5)).astype(config.floatX)
) )
out_aet = aet_subtensor.inc_subtensor(x_aet[np.r_[0, 2]], st_aet) out_aet = aet_subtensor.inc_subtensor(x_aet[np.r_[0, 2]], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor1) assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor1)
...@@ -1202,6 +1218,7 @@ def test_random_unimplemented(): ...@@ -1202,6 +1218,7 @@ def test_random_unimplemented():
compare_jax_and_py(fgraph, []) compare_jax_and_py(fgraph, [])
@pytest.mark.xfail(reason="Generators not yet supported in JAX")
def test_RandomStream(): def test_RandomStream():
srng = RandomStream(seed=123) srng = RandomStream(seed=123)
out = srng.normal() - srng.normal() out = srng.normal() - srng.normal()
...@@ -1211,3 +1228,11 @@ def test_RandomStream(): ...@@ -1211,3 +1228,11 @@ def test_RandomStream():
jax_res_2 = fn() jax_res_2 = fn()
assert np.array_equal(jax_res_1, jax_res_2) assert np.array_equal(jax_res_1, jax_res_2)
@pytest.mark.xfail(reason="Generators not yet supported in JAX")
def test_random_generators():
rng = shared(np.random.default_rng(123))
out = normal(rng=rng)
fgraph = FunctionGraph([out.owner.inputs[0]], [out], clone=False)
compare_jax_and_py(fgraph, [])
...@@ -88,7 +88,7 @@ opts = OptimizationQuery(include=[None], exclude=["cxx_only", "BlasOpt"]) ...@@ -88,7 +88,7 @@ opts = OptimizationQuery(include=[None], exclude=["cxx_only", "BlasOpt"])
numba_mode = Mode(NumbaLinker(), opts) numba_mode = Mode(NumbaLinker(), opts)
py_mode = Mode("py", opts) py_mode = Mode("py", opts)
rng = np.random.RandomState(42849) rng = np.random.default_rng(42849)
def set_test_value(x, v): def set_test_value(x, v):
...@@ -291,13 +291,13 @@ def test_create_numba_signature(v, expected, force_scalar): ...@@ -291,13 +291,13 @@ def test_create_numba_signature(v, expected, force_scalar):
[ [
( (
[aet.vector()], [aet.vector()],
[rng.randn(100).astype(config.floatX)], [rng.standard_normal(100).astype(config.floatX)],
lambda x: aet.sigmoid(x), lambda x: aet.sigmoid(x),
None, None,
), ),
( (
[aet.vector() for i in range(4)], [aet.vector() for i in range(4)],
[rng.randn(100).astype(config.floatX) for i in range(4)], [rng.standard_normal(100).astype(config.floatX) for i in range(4)],
lambda x, y, x1, y1: (x + y) * (x1 + y1) * y, lambda x, y, x1, y1: (x + y) * (x1 + y1) * y,
None, None,
), ),
...@@ -311,8 +311,8 @@ def test_create_numba_signature(v, expected, force_scalar): ...@@ -311,8 +311,8 @@ def test_create_numba_signature(v, expected, force_scalar):
( (
[aet.vector(), aet.vector()], [aet.vector(), aet.vector()],
[ [
rng.randn(100).astype(config.floatX), rng.standard_normal(100).astype(config.floatX),
rng.randn(100).astype(config.floatX), rng.standard_normal(100).astype(config.floatX),
], ],
lambda x, y: ati.add_inplace(x, y), lambda x, y: ati.add_inplace(x, y),
None, None,
...@@ -320,8 +320,8 @@ def test_create_numba_signature(v, expected, force_scalar): ...@@ -320,8 +320,8 @@ def test_create_numba_signature(v, expected, force_scalar):
( (
[aet.vector(), aet.vector()], [aet.vector(), aet.vector()],
[ [
rng.randn(100).astype(config.floatX), rng.standard_normal(100).astype(config.floatX),
rng.randn(100).astype(config.floatX), rng.standard_normal(100).astype(config.floatX),
], ],
lambda x, y: my_multi_out(x, y), lambda x, y: my_multi_out(x, y),
NotImplementedError, NotImplementedError,
...@@ -1954,7 +1954,9 @@ def test_MaxAndArgmax(x, axes, exc): ...@@ -1954,7 +1954,9 @@ def test_MaxAndArgmax(x, axes, exc):
( (
set_test_value( set_test_value(
aet.lmatrix(), aet.lmatrix(),
(lambda x: x.T.dot(x))(rng.randint(1, 10, size=(3, 3)).astype("int64")), (lambda x: x.T.dot(x))(
rng.integers(1, 10, size=(3, 3)).astype("int64")
),
), ),
True, True,
None, None,
...@@ -2004,7 +2006,9 @@ def test_Cholesky(x, lower, exc): ...@@ -2004,7 +2006,9 @@ def test_Cholesky(x, lower, exc):
( (
set_test_value( set_test_value(
aet.lmatrix(), aet.lmatrix(),
(lambda x: x.T.dot(x))(rng.randint(1, 10, size=(3, 3)).astype("int64")), (lambda x: x.T.dot(x))(
rng.integers(1, 10, size=(3, 3)).astype("int64")
),
), ),
set_test_value(aet.dvector(), rng.random(size=(3,)).astype("float64")), set_test_value(aet.dvector(), rng.random(size=(3,)).astype("float64")),
"general", "general",
...@@ -2120,7 +2124,9 @@ y = np.array( ...@@ -2120,7 +2124,9 @@ y = np.array(
( (
set_test_value( set_test_value(
aet.lmatrix(), aet.lmatrix(),
(lambda x: x.T.dot(x))(rng.randint(1, 10, size=(3, 3)).astype("int64")), (lambda x: x.T.dot(x))(
rng.integers(1, 10, size=(3, 3)).astype("int64")
),
), ),
None, None,
), ),
...@@ -2160,7 +2166,9 @@ def test_Eig(x, exc): ...@@ -2160,7 +2166,9 @@ def test_Eig(x, exc):
( (
set_test_value( set_test_value(
aet.lmatrix(), aet.lmatrix(),
(lambda x: x.T.dot(x))(rng.randint(1, 10, size=(3, 3)).astype("int64")), (lambda x: x.T.dot(x))(
rng.integers(1, 10, size=(3, 3)).astype("int64")
),
), ),
"U", "U",
UserWarning, UserWarning,
...@@ -2200,7 +2208,9 @@ def test_Eigh(x, uplo, exc): ...@@ -2200,7 +2208,9 @@ def test_Eigh(x, uplo, exc):
( (
set_test_value( set_test_value(
aet.lmatrix(), aet.lmatrix(),
(lambda x: x.T.dot(x))(rng.randint(1, 10, size=(3, 3)).astype("int64")), (lambda x: x.T.dot(x))(
rng.integers(1, 10, size=(3, 3)).astype("int64")
),
), ),
None, None,
), ),
...@@ -2244,7 +2254,9 @@ def test_MatrixInverse(x, exc): ...@@ -2244,7 +2254,9 @@ def test_MatrixInverse(x, exc):
( (
set_test_value( set_test_value(
aet.lmatrix(), aet.lmatrix(),
(lambda x: x.T.dot(x))(rng.randint(1, 10, size=(3, 3)).astype("int64")), (lambda x: x.T.dot(x))(
rng.integers(1, 10, size=(3, 3)).astype("int64")
),
), ),
"reduced", "reduced",
None, None,
...@@ -2252,7 +2264,9 @@ def test_MatrixInverse(x, exc): ...@@ -2252,7 +2264,9 @@ def test_MatrixInverse(x, exc):
( (
set_test_value( set_test_value(
aet.lmatrix(), aet.lmatrix(),
(lambda x: x.T.dot(x))(rng.randint(1, 10, size=(3, 3)).astype("int64")), (lambda x: x.T.dot(x))(
rng.integers(1, 10, size=(3, 3)).astype("int64")
),
), ),
"complete", "complete",
UserWarning, UserWarning,
...@@ -2303,7 +2317,9 @@ def test_QRFull(x, mode, exc): ...@@ -2303,7 +2317,9 @@ def test_QRFull(x, mode, exc):
( (
set_test_value( set_test_value(
aet.lmatrix(), aet.lmatrix(),
(lambda x: x.T.dot(x))(rng.randint(1, 10, size=(3, 3)).astype("int64")), (lambda x: x.T.dot(x))(
rng.integers(1, 10, size=(3, 3)).astype("int64")
),
), ),
True, True,
True, True,
...@@ -2312,7 +2328,9 @@ def test_QRFull(x, mode, exc): ...@@ -2312,7 +2328,9 @@ def test_QRFull(x, mode, exc):
( (
set_test_value( set_test_value(
aet.lmatrix(), aet.lmatrix(),
(lambda x: x.T.dot(x))(rng.randint(1, 10, size=(3, 3)).astype("int64")), (lambda x: x.T.dot(x))(
rng.integers(1, 10, size=(3, 3)).astype("int64")
),
), ),
True, True,
False, False,
......
...@@ -41,9 +41,9 @@ def test_rop_lop(): ...@@ -41,9 +41,9 @@ def test_rop_lop():
) )
scan_f = function([mx, mv], sy) scan_f = function([mx, mv], sy)
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
vx = np.asarray(rng.randn(4, 4), aesara.config.floatX) vx = np.asarray(rng.standard_normal((4, 4)), aesara.config.floatX)
vv = np.asarray(rng.randn(4, 4), aesara.config.floatX) vv = np.asarray(rng.standard_normal((4, 4)), aesara.config.floatX)
v1 = rop_f(vx, vv) v1 = rop_f(vx, vv)
v2 = scan_f(vx, vv) v2 = scan_f(vx, vv)
...@@ -75,13 +75,13 @@ def test_rop_lop(): ...@@ -75,13 +75,13 @@ def test_rop_lop():
def test_spectral_radius_bound(): def test_spectral_radius_bound():
tol = 10 ** (-6) tol = 10 ** (-6)
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
x = matrix() x = matrix()
radius_bound = spectral_radius_bound(x, 5) radius_bound = spectral_radius_bound(x, 5)
f = aesara.function([x], radius_bound) f = aesara.function([x], radius_bound)
shp = (3, 4) shp = (3, 4)
m = rng.rand(*shp) m = rng.random(shp)
m = np.cov(m).astype(config.floatX) m = np.cov(m).astype(config.floatX)
radius_bound_aesara = f(m) radius_bound_aesara = f(m)
......
...@@ -16,7 +16,7 @@ class TestMinimal: ...@@ -16,7 +16,7 @@ class TestMinimal:
""" """
def setup_method(self): def setup_method(self):
self.rng = np.random.RandomState(utt.fetch_seed(666)) self.rng = np.random.default_rng(utt.fetch_seed(666))
def test_minimal(self): def test_minimal(self):
A = matrix() A = matrix()
...@@ -26,7 +26,7 @@ class TestMinimal: ...@@ -26,7 +26,7 @@ class TestMinimal:
f = function([A, b], minimal(A, A, b, b, A)) f = function([A, b], minimal(A, A, b, b, A))
print("built") print("built")
Aval = self.rng.randn(5, 5) Aval = self.rng.standard_normal((5, 5))
bval = np.arange(5, dtype=float) bval = np.arange(5, dtype=float)
f(Aval, bval) f(Aval, bval)
print("done") print("done")
...@@ -25,7 +25,6 @@ from tests import unittest_tools as utt ...@@ -25,7 +25,6 @@ from tests import unittest_tools as utt
# TODO: test optimizer mrg_random_make_inplace # TODO: test optimizer mrg_random_make_inplace
utt.seed_rng()
# Results generated by Java code using L'Ecuyer et al.'s code, with: # Results generated by Java code using L'Ecuyer et al.'s code, with:
# main seed: [12345]*6 (default) # main seed: [12345]*6 (default)
...@@ -90,7 +89,9 @@ def test_get_substream_rstates(): ...@@ -90,7 +89,9 @@ def test_get_substream_rstates():
n_streams = 100 n_streams = 100
dtype = "float32" dtype = "float32"
rng = MRG_RandomStream(np.random.randint(2147462579)) rng = MRG_RandomStream(
np.random.default_rng(utt.fetch_seed()).integers(2147462579)
)
rng.get_substream_rstates(n_streams, dtype) rng.get_substream_rstates(n_streams, dtype)
...@@ -889,13 +890,13 @@ def test_multMatVect(): ...@@ -889,13 +890,13 @@ def test_multMatVect():
f0 = function([A1, s1, m1, A2, s2, m2], g0) f0 = function([A1, s1, m1, A2, s2, m2], g0)
i32max = np.iinfo(np.int32).max i32max = np.iinfo(np.int32).max
rng = np.random.default_rng(utt.fetch_seed())
A1 = np.random.randint(0, i32max, (3, 3)).astype("int64") A1 = rng.integers(0, i32max, (3, 3)).astype("int64")
s1 = np.random.randint(0, i32max, 3).astype("int32") s1 = rng.integers(0, i32max, 3).astype("int32")
m1 = np.asarray(np.random.randint(i32max), dtype="int32") m1 = np.asarray(rng.integers(i32max), dtype="int32")
A2 = np.random.randint(0, i32max, (3, 3)).astype("int64") A2 = rng.integers(0, i32max, (3, 3)).astype("int64")
s2 = np.random.randint(0, i32max, 3).astype("int32") s2 = rng.integers(0, i32max, 3).astype("int32")
m2 = np.asarray(np.random.randint(i32max), dtype="int32") m2 = np.asarray(rng.integers(i32max), dtype="int32")
f0.input_storage[0].storage[0] = A1 f0.input_storage[0].storage[0] = A1
f0.input_storage[1].storage[0] = s1 f0.input_storage[1].storage[0] = s1
...@@ -964,7 +965,7 @@ def rng_mrg_overflow(sizes, fct, mode, should_raise_error): ...@@ -964,7 +965,7 @@ def rng_mrg_overflow(sizes, fct, mode, should_raise_error):
@pytest.mark.slow @pytest.mark.slow
def test_overflow_cpu(): def test_overflow_cpu():
# run with AESARA_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32 # run with AESARA_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32
rng = MRG_RandomStream(np.random.randint(1234)) rng = MRG_RandomStream(np.random.default_rng(utt.fetch_seed()).integers(1234))
fct = rng.uniform fct = rng.uniform
with config.change_flags(compute_test_value="off"): with config.change_flags(compute_test_value="off"):
# should raise error as the size overflows # should raise error as the size overflows
...@@ -1107,8 +1108,10 @@ def test_target_parameter(): ...@@ -1107,8 +1108,10 @@ def test_target_parameter():
@config.change_flags(compute_test_value="off") @config.change_flags(compute_test_value="off")
def test_undefined_grad_opt(): def test_undefined_grad_opt():
# Make sure that undefined grad get removed in optimized graph. # Make sure that undefined grad get removed in optimized graph.
random = MRG_RandomStream(np.random.randint(1, 2147462579)) random = MRG_RandomStream(
pvals = shared(np.random.rand(10, 20).astype(config.floatX)) np.random.default_rng(utt.fetch_seed()).integers(1, 2147462579)
)
pvals = shared(np.random.random((10, 20)).astype(config.floatX))
pvals = pvals / pvals.sum(axis=1) pvals = pvals / pvals.sum(axis=1)
pvals = zero_grad(pvals) pvals = zero_grad(pvals)
samples = random.multinomial(pvals=pvals, n=1) samples = random.multinomial(pvals=pvals, n=1)
......
差异被折叠。
...@@ -24,7 +24,7 @@ class TestGaussNewton: ...@@ -24,7 +24,7 @@ class TestGaussNewton:
""" """
def setup_method(self): def setup_method(self):
self.rng = np.random.RandomState(utt.fetch_seed()) self.rng = np.random.default_rng(utt.fetch_seed())
def _run(self, num_features, num_timesteps, batch_size, mode): def _run(self, num_features, num_timesteps, batch_size, mode):
# determine shapes of inputs and targets depending on the batch size # determine shapes of inputs and targets depending on the batch size
......
...@@ -33,9 +33,9 @@ class TestSP: ...@@ -33,9 +33,9 @@ class TestSP:
bias = dvector() bias = dvector()
kerns = dmatrix() kerns = dmatrix()
input = dmatrix() input = dmatrix()
rng = np.random.RandomState(3423489) rng = np.random.default_rng(3423489)
filters = rng.randn(nkern, np.prod(kshp)) filters = rng.standard_normal((nkern, np.prod(kshp)))
biasvals = rng.randn(nkern) biasvals = rng.standard_normal((nkern))
for mode in ("FAST_COMPILE", "FAST_RUN"): for mode in ("FAST_COMPILE", "FAST_RUN"):
ttot, ntot = 0, 0 ttot, ntot = 0, 0
...@@ -133,7 +133,7 @@ class TestSP: ...@@ -133,7 +133,7 @@ class TestSP:
# symbolic stuff # symbolic stuff
kerns = [dmatrix(), dmatrix()] kerns = [dmatrix(), dmatrix()]
input = dmatrix() input = dmatrix()
# rng = np.random.RandomState(3423489) # rng = np.random.default_rng(3423489)
# build actual input images # build actual input images
img2d = np.arange(bsize * np.prod(imshp)).reshape((bsize,) + imshp) img2d = np.arange(bsize * np.prod(imshp)).reshape((bsize,) + imshp)
...@@ -184,7 +184,7 @@ class TestSP: ...@@ -184,7 +184,7 @@ class TestSP:
def test_maxpool(self): def test_maxpool(self):
# generate flatted images # generate flatted images
maxpoolshps = ((2, 2), (3, 3), (4, 4), (5, 5), (6, 6)) maxpoolshps = ((2, 2), (3, 3), (4, 4), (5, 5), (6, 6))
imval = np.random.rand(4, 5, 10, 10) imval = np.random.random((4, 5, 10, 10))
images = dmatrix() images = dmatrix()
for maxpoolshp in maxpoolshps: for maxpoolshp in maxpoolshps:
......
差异被折叠。
...@@ -34,9 +34,10 @@ _all_dtypes = integer_dtypes + float_dtypes ...@@ -34,9 +34,10 @@ _all_dtypes = integer_dtypes + float_dtypes
def gen_unique_vector(size, dtype): def gen_unique_vector(size, dtype):
rng = np.random.default_rng(utt.fetch_seed())
# generate a randomized vector with unique elements # generate a randomized vector with unique elements
retval = np.arange(size) * 3.0 + np.random.uniform(-1.0, 1.0) retval = np.arange(size) * 3.0 + rng.uniform(-1.0, 1.0)
return (retval[np.random.permutation(size)] - size * 1.5).astype(dtype) return (retval[rng.permutation(size)] - size * 1.5).astype(dtype)
class TestSort: class TestSort:
...@@ -97,81 +98,85 @@ class TestSort: ...@@ -97,81 +98,85 @@ class TestSort:
utt.assert_allclose(gv, gt) utt.assert_allclose(gv, gt)
def test_grad_vector(self): def test_grad_vector(self):
data = np.random.random((10)).astype(aesara.config.floatX) data = self.rng.random((10)).astype(aesara.config.floatX)
utt.verify_grad(sort, [data]) utt.verify_grad(sort, [data])
def test_grad_none_axis(self): def test_grad_none_axis(self):
data = np.random.random((10)).astype(aesara.config.floatX) data = self.rng.random((10)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, None), [data]) utt.verify_grad(lambda x: sort(x, None), [data])
utt.verify_grad(lambda x: sort(x, 0), [data]) utt.verify_grad(lambda x: sort(x, 0), [data])
data = np.random.random((2, 3)).astype(aesara.config.floatX) data = self.rng.random((2, 3)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, None), [data]) utt.verify_grad(lambda x: sort(x, None), [data])
data = np.random.random((2, 3, 4)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, None), [data]) utt.verify_grad(lambda x: sort(x, None), [data])
def test_grad_negative_axis_2d(self): def test_grad_negative_axis_2d(self):
data = np.random.random((2, 3)).astype(aesara.config.floatX) data = self.rng.random((2, 3)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, -1), [data]) utt.verify_grad(lambda x: sort(x, -1), [data])
data = np.random.random((2, 3)).astype(aesara.config.floatX) data = self.rng.random((2, 3)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, -2), [data]) utt.verify_grad(lambda x: sort(x, -2), [data])
def test_grad_negative_axis_3d(self): def test_grad_negative_axis_3d(self):
data = np.random.random((2, 3, 4)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, -1), [data]) utt.verify_grad(lambda x: sort(x, -1), [data])
data = np.random.random((2, 3, 4)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, -2), [data]) utt.verify_grad(lambda x: sort(x, -2), [data])
data = np.random.random((2, 3, 4)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, -3), [data]) utt.verify_grad(lambda x: sort(x, -3), [data])
def test_grad_negative_axis_4d(self): def test_grad_negative_axis_4d(self):
data = np.random.random((2, 3, 4, 2)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4, 2)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, -1), [data]) utt.verify_grad(lambda x: sort(x, -1), [data])
data = np.random.random((2, 3, 4, 2)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4, 2)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, -2), [data]) utt.verify_grad(lambda x: sort(x, -2), [data])
data = np.random.random((2, 3, 4, 2)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4, 2)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, -3), [data]) utt.verify_grad(lambda x: sort(x, -3), [data])
data = np.random.random((2, 3, 4, 2)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4, 2)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, -4), [data]) utt.verify_grad(lambda x: sort(x, -4), [data])
def test_grad_nonnegative_axis_2d(self): def test_grad_nonnegative_axis_2d(self):
data = np.random.random((2, 3)).astype(aesara.config.floatX) data = self.rng.random((2, 3)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, 0), [data]) utt.verify_grad(lambda x: sort(x, 0), [data])
data = np.random.random((2, 3)).astype(aesara.config.floatX) data = self.rng.random((2, 3)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, 1), [data]) utt.verify_grad(lambda x: sort(x, 1), [data])
def test_grad_nonnegative_axis_3d(self): def test_grad_nonnegative_axis_3d(self):
data = np.random.random((2, 3, 4)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, 0), [data]) utt.verify_grad(lambda x: sort(x, 0), [data])
data = np.random.random((2, 3, 4)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, 1), [data]) utt.verify_grad(lambda x: sort(x, 1), [data])
data = np.random.random((2, 3, 4)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, 2), [data]) utt.verify_grad(lambda x: sort(x, 2), [data])
def test_grad_nonnegative_axis_4d(self): def test_grad_nonnegative_axis_4d(self):
data = np.random.random((2, 3, 4, 2)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4, 2)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, 0), [data]) utt.verify_grad(lambda x: sort(x, 0), [data])
data = np.random.random((2, 3, 4, 2)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4, 2)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, 1), [data]) utt.verify_grad(lambda x: sort(x, 1), [data])
data = np.random.random((2, 3, 4, 2)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4, 2)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, 2), [data]) utt.verify_grad(lambda x: sort(x, 2), [data])
data = np.random.random((2, 3, 4, 2)).astype(aesara.config.floatX) data = self.rng.random((2, 3, 4, 2)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: sort(x, 3), [data]) utt.verify_grad(lambda x: sort(x, 3), [data])
class TestSortInferShape(utt.InferShapeTester): class TestSortInferShape(utt.InferShapeTester):
def setup_method(self):
self.rng = np.random.default_rng(seed=utt.fetch_seed())
super().setup_method()
def test_sort(self): def test_sort(self):
x = matrix() x = matrix()
self._compile_and_check( self._compile_and_check(
[x], [x],
[sort(x)], [sort(x)],
[np.random.randn(10, 40).astype(aesara.config.floatX)], [self.rng.standard_normal(size=(10, 40)).astype(aesara.config.floatX)],
SortOp, SortOp,
) )
self._compile_and_check( self._compile_and_check(
[x], [x],
[sort(x, axis=None)], [sort(x, axis=None)],
[np.random.randn(10, 40).astype(aesara.config.floatX)], [self.rng.standard_normal(size=(10, 40)).astype(aesara.config.floatX)],
SortOp, SortOp,
) )
...@@ -238,14 +243,15 @@ def test_argsort(): ...@@ -238,14 +243,15 @@ def test_argsort():
def test_argsort_grad(): def test_argsort_grad():
rng = np.random.default_rng(seed=utt.fetch_seed())
# Testing grad of argsort # Testing grad of argsort
data = np.random.random((2, 3)).astype(aesara.config.floatX) data = rng.random((2, 3)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: argsort(x, axis=-1), [data]) utt.verify_grad(lambda x: argsort(x, axis=-1), [data])
data = np.random.random((2, 3, 4, 5)).astype(aesara.config.floatX) data = rng.random((2, 3, 4, 5)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: argsort(x, axis=-3), [data]) utt.verify_grad(lambda x: argsort(x, axis=-3), [data])
data = np.random.random((2, 3, 3)).astype(aesara.config.floatX) data = rng.random((2, 3, 3)).astype(aesara.config.floatX)
utt.verify_grad(lambda x: argsort(x, axis=2), [data]) utt.verify_grad(lambda x: argsort(x, axis=2), [data])
...@@ -434,10 +440,9 @@ class TestTopK: ...@@ -434,10 +440,9 @@ class TestTopK:
assert any( assert any(
[isinstance(n.op, self.op_class) for n in fn.maker.fgraph.apply_nodes] [isinstance(n.op, self.op_class) for n in fn.maker.fgraph.apply_nodes]
) )
xval = np.repeat( rng = np.random.default_rng(utt.fetch_seed())
np.random.uniform(-100.0, 100.0, size=size // 2).astype(dtype), 2 xval = np.repeat(rng.uniform(-100.0, 100.0, size=size // 2).astype(dtype), 2)
) xval = xval[rng.permutation(size)]
xval = xval[np.random.permutation(size)]
yval = fn(xval) yval = fn(xval)
idx = slice(-k, None) if k > 0 else slice(-k) idx = slice(-k, None) if k > 0 else slice(-k)
goal = np.argsort(xval)[idx].astype("int32") goal = np.argsort(xval)[idx].astype("int32")
......
...@@ -27,7 +27,6 @@ from aesara.typed_list.basic import ( ...@@ -27,7 +27,6 @@ from aesara.typed_list.basic import (
make_list, make_list,
) )
from aesara.typed_list.type import TypedListType from aesara.typed_list.type import TypedListType
from tests import unittest_tools as utt
def rand_ranged_matrix(minimum, maximum, shape): def rand_ranged_matrix(minimum, maximum, shape):
...@@ -55,9 +54,6 @@ def random_lil(shape, dtype, nnz): ...@@ -55,9 +54,6 @@ def random_lil(shape, dtype, nnz):
class TestGetItem: class TestGetItem:
def setup_method(self):
utt.seed_rng()
def test_sanity_check_slice(self): def test_sanity_check_slice(self):
mySymbolicMatricesList = TypedListType( mySymbolicMatricesList = TypedListType(
......
...@@ -7,7 +7,7 @@ from aesara.compile.io import In ...@@ -7,7 +7,7 @@ from aesara.compile.io import In
from aesara.tensor.type import TensorType, matrix, scalar from aesara.tensor.type import TensorType, matrix, scalar
from aesara.typed_list.basic import Append, Extend, Insert, Remove, Reverse from aesara.typed_list.basic import Append, Extend, Insert, Remove, Reverse
from aesara.typed_list.type import TypedListType from aesara.typed_list.type import TypedListType
from tests.tensor.utils import rand_ranged from tests.tensor.utils import random_ranged
class TestInplace: class TestInplace:
...@@ -26,9 +26,9 @@ class TestInplace: ...@@ -26,9 +26,9 @@ class TestInplace:
) )
assert f.maker.fgraph.toposort()[0].op.inplace assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged(-1000, 1000, [100, 101]) x = random_ranged(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101]) y = random_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x, y]), [y, x]) assert np.array_equal(f([x, y]), [y, x])
...@@ -50,9 +50,9 @@ class TestInplace: ...@@ -50,9 +50,9 @@ class TestInplace:
) )
assert f.maker.fgraph.toposort()[0].op.inplace assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged(-1000, 1000, [100, 101]) x = random_ranged(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101]) y = random_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x], y), [x, y]) assert np.array_equal(f([x], y), [x, y])
...@@ -77,9 +77,9 @@ class TestInplace: ...@@ -77,9 +77,9 @@ class TestInplace:
) )
assert f.maker.fgraph.toposort()[0].op.inplace assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged(-1000, 1000, [100, 101]) x = random_ranged(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101]) y = random_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x], [y]), [x, y]) assert np.array_equal(f([x], [y]), [x, y])
...@@ -105,9 +105,9 @@ class TestInplace: ...@@ -105,9 +105,9 @@ class TestInplace:
) )
assert f.maker.fgraph.toposort()[0].op.inplace assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged(-1000, 1000, [100, 101]) x = random_ranged(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101]) y = random_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x], np.asarray(1, dtype="int64"), y), [x, y]) assert np.array_equal(f([x], np.asarray(1, dtype="int64"), y), [x, y])
...@@ -129,9 +129,9 @@ class TestInplace: ...@@ -129,9 +129,9 @@ class TestInplace:
) )
assert f.maker.fgraph.toposort()[0].op.inplace assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged(-1000, 1000, [100, 101]) x = random_ranged(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101]) y = random_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x, y], y), [x]) assert np.array_equal(f([x, y], y), [x])
......
...@@ -5,14 +5,10 @@ import aesara ...@@ -5,14 +5,10 @@ import aesara
from aesara.tensor.type import TensorType from aesara.tensor.type import TensorType
from aesara.typed_list.basic import TypedListVariable from aesara.typed_list.basic import TypedListVariable
from aesara.typed_list.type import TypedListType from aesara.typed_list.type import TypedListType
from tests import unittest_tools as utt from tests.tensor.utils import random_ranged
from tests.tensor.utils import rand_ranged
class TestTypedListType: class TestTypedListType:
def setup_method(self):
utt.seed_rng()
def test_wrong_input_on_creation(self): def test_wrong_input_on_creation(self):
# Typed list type should raises an # Typed list type should raises an
# error if the argument passed for # error if the argument passed for
...@@ -63,7 +59,7 @@ class TestTypedListType: ...@@ -63,7 +59,7 @@ class TestTypedListType:
myType = TypedListType(TensorType(aesara.config.floatX, (False, False))) myType = TypedListType(TensorType(aesara.config.floatX, (False, False)))
x = rand_ranged(-1000, 1000, [100, 100]) x = random_ranged(-1000, 1000, [100, 100])
assert np.array_equal(myType.filter([x]), [x]) assert np.array_equal(myType.filter([x]), [x])
...@@ -81,7 +77,7 @@ class TestTypedListType: ...@@ -81,7 +77,7 @@ class TestTypedListType:
def test_load_alot(self): def test_load_alot(self):
myType = TypedListType(TensorType(aesara.config.floatX, (False, False))) myType = TypedListType(TensorType(aesara.config.floatX, (False, False)))
x = rand_ranged(-1000, 1000, [10, 10]) x = random_ranged(-1000, 1000, [10, 10])
testList = [] testList = []
for i in range(10000): for i in range(10000):
testList.append(x) testList.append(x)
...@@ -95,7 +91,7 @@ class TestTypedListType: ...@@ -95,7 +91,7 @@ class TestTypedListType:
myType = TypedListType(myNestedType) myType = TypedListType(myNestedType)
x = rand_ranged(-1000, 1000, [100, 100]) x = random_ranged(-1000, 1000, [100, 100])
assert np.array_equal(myType.filter([[x]]), [[x]]) assert np.array_equal(myType.filter([[x]]), [[x]])
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论