提交 6cca25e3 authored 作者: Brandon T. Willard's avatar Brandon T. Willard 提交者: Brandon T. Willard

Update numpy.random usage in tests

- Use newer, NumPy-recommended `numpy.random` functions (e.g. replace `rand` `random`, `randn` with `standard_normal`, etc.) - Seed some unseeded/inconsistently seeded tests - Minor `pytest` usage updates (e.g. use parameterizations, `pytest.raises`, and `pytest.warns`) - Use `np.array_equal` instead of `np.all(... == ...)`
上级 e40c1b29
......@@ -61,17 +61,18 @@ class TestFunctionIn:
out = a + b
f = function([In(a, strict=False)], out)
# works, rand generates float64 by default
f(np.random.rand(8))
assert f(np.random.random((8,)).astype(np.float64)).dtype == np.float64
# works, casting is allowed
f(np.array([1, 2, 3, 4], dtype="int32"))
assert f(np.array([1, 2, 3, 4], dtype="int32")).dtype == np.float64
f = function([In(a, strict=True)], out)
try:
with pytest.raises(TypeError):
# fails, f expects float64
f(np.array([1, 2, 3, 4], dtype="int32"))
except TypeError:
pass
def test_explicit_shared_input(self):
# This is not a test of the In class per se, but the In class relies
......@@ -94,17 +95,17 @@ class TestFunctionIn:
# using mutable=True will let f change the value in aval
f = function([In(a, mutable=True)], a_out, mode="FAST_RUN")
aval = np.random.rand(10)
aval = np.random.random((10,))
aval2 = aval.copy()
assert np.all(f(aval) == (aval2 * 2))
assert not np.all(aval == aval2)
assert np.array_equal(f(aval), (aval2 * 2))
assert not np.array_equal(aval, aval2)
# using mutable=False should leave the input untouched
f = function([In(a, mutable=False)], a_out, mode="FAST_RUN")
aval = np.random.rand(10)
aval = np.random.random((10,))
aval2 = aval.copy()
assert np.all(f(aval) == (aval2 * 2))
assert np.all(aval == aval2)
assert np.array_equal(f(aval), (aval2 * 2))
assert np.array_equal(aval, aval2)
def test_in_update(self):
a = dscalar("a")
......@@ -155,7 +156,7 @@ class TestFunctionIn:
# Both values are in range. Since they're not ndarrays (but lists),
# they will be converted, and their value checked.
assert np.all(f([3], [6], 1) == 10)
assert np.array_equal(f([3], [6], 1), [10])
# Values are in range, but a dtype too large has explicitly been given
# For performance reasons, no check of the data is explicitly performed
......@@ -164,7 +165,7 @@ class TestFunctionIn:
f([3], np.array([6], dtype="int16"), 1)
# Value too big for a, silently ignored
assert np.all(f([2**20], np.ones(1, dtype="int8"), 1) == 2)
assert np.array_equal(f([2**20], np.ones(1, dtype="int8"), 1), [2])
# Value too big for b, raises TypeError
with pytest.raises(TypeError):
......@@ -189,7 +190,7 @@ class TestFunctionIn:
)
# If the values can be accurately represented, everything is OK
assert np.all(f(0, 0, 0) == 0)
assert np.array_equal(f(0, 0, 0), 0)
# If allow_downcast is True, idem
assert np.allclose(f(0.1, 0, 0), 0.1)
......@@ -221,7 +222,7 @@ class TestFunctionIn:
# If the values can be accurately represented, everything is OK
z = [0]
assert np.all(f(z, z, z) == 0)
assert np.array_equal(f(z, z, z), [0])
# If allow_downcast is True, idem
assert np.allclose(f([0.1], z, z), 0.1)
......
......@@ -352,7 +352,7 @@ class TestFunction:
def test_swap_SharedVariable(self):
i = iscalar()
x_list = shared(value=np.random.rand(10).astype(config.floatX))
x_list = shared(value=np.random.random((10,)).astype(config.floatX))
x = scalar("x")
# SharedVariable for tests, one of them has update
......@@ -419,11 +419,11 @@ class TestFunction:
# A special testcase for logistic_sgd.py in Deep Learning Tutorial
# This test assert that SharedVariable in different function have same storage
train_x = shared(value=np.random.rand(10, 10).astype(config.floatX))
test_x = shared(value=np.random.rand(10, 10).astype(config.floatX))
train_x = shared(value=np.random.random((10, 10)).astype(config.floatX))
test_x = shared(value=np.random.random((10, 10)).astype(config.floatX))
train_y = shared(value=np.random.rand(10, 1).astype(config.floatX))
test_y = shared(value=np.random.rand(10, 1).astype(config.floatX))
train_y = shared(value=np.random.random((10, 1)).astype(config.floatX))
test_y = shared(value=np.random.random((10, 1)).astype(config.floatX))
i = iscalar("index")
x = vector("x")
......@@ -604,7 +604,7 @@ class TestFunction:
# when borrow=True is implemented.
a = dmatrix()
aval = np.random.rand(3, 3)
aval = np.random.random((3, 3))
# when borrow=False, test that a destroy map cannot alias output to input
f = function([In(a, borrow=False)], Out(a + 1, borrow=True))
......@@ -699,7 +699,7 @@ class TestFunction:
assert funct(first=1) == x
def test_check_for_aliased_inputs(self):
b = np.random.rand(5, 4)
b = np.random.random((5, 4))
s1 = shared(b)
s2 = shared(b)
x1 = vector()
......@@ -1053,7 +1053,7 @@ class TestPicklefunction:
def pers_load(id):
return saves[id]
b = np.random.rand(5, 4)
b = np.random.random((5, 4))
x = matrix()
y = shared(b)
......
......@@ -124,7 +124,7 @@ class TestOpFromGraph(unittest_tools.InferShapeTester):
)
def test_shared(self, cls_ofg):
x, y, z = matrices("xyz")
s = shared(np.random.rand(2, 2).astype(config.floatX))
s = shared(np.random.random((2, 2)).astype(config.floatX))
e = x + y * z + s
op = cls_ofg([x, y, z], [e])
# (1+3*5=array of 16) - (3+1*5=array of 8)
......@@ -144,7 +144,7 @@ class TestOpFromGraph(unittest_tools.InferShapeTester):
)
def test_shared_grad(self, cls_ofg):
x, y, z = matrices("xyz")
s = shared(np.random.rand(2, 2).astype(config.floatX))
s = shared(np.random.random((2, 2)).astype(config.floatX))
e = x + y * z + s
op = cls_ofg([x, y, z], [e])
f = op(x, y, z)
......@@ -184,8 +184,8 @@ class TestOpFromGraph(unittest_tools.InferShapeTester):
zz = at_sum(op(xx, yy))
dx, dy = grad(zz, [xx, yy])
fn = function([xx, yy], [dx, dy])
xv = np.random.rand(16).astype(config.floatX)
yv = np.random.rand(16).astype(config.floatX)
xv = np.random.random((16,)).astype(config.floatX)
yv = np.random.random((16,)).astype(config.floatX)
dxv, dyv = fn(xv, yv)
np.testing.assert_array_almost_equal(yv * 2, dxv, 4)
np.testing.assert_array_almost_equal(xv * 1.5, dyv, 4)
......@@ -210,9 +210,9 @@ class TestOpFromGraph(unittest_tools.InferShapeTester):
zz = at_sum(op_linear(xx, ww, bb))
dx, dw, db = grad(zz, [xx, ww, bb])
fn = function([xx, ww, bb], [dx, dw, db])
xv = np.random.rand(16).astype(config.floatX)
wv = np.random.rand(16).astype(config.floatX)
bv = np.random.rand(16).astype(config.floatX)
xv = np.random.random((16,)).astype(config.floatX)
wv = np.random.random((16,)).astype(config.floatX)
bv = np.random.random((16,)).astype(config.floatX)
dxv, dwv, dbv = fn(xv, wv, bv)
np.testing.assert_array_almost_equal(wv * 2, dxv, 4)
np.testing.assert_array_almost_equal(xv * 1.5, dwv, 4)
......@@ -262,7 +262,7 @@ class TestOpFromGraph(unittest_tools.InferShapeTester):
gyy2 = grad(yy2, xx)
fn = function([xx], [gyy1, gyy2])
xval = np.random.rand(32).astype(config.floatX)
xval = np.random.random((32,)).astype(config.floatX)
y1val, y2val = fn(xval)
np.testing.assert_array_almost_equal(y1val, y2val, 4)
......@@ -280,9 +280,9 @@ class TestOpFromGraph(unittest_tools.InferShapeTester):
du = vector()
dv = Rop(y, x, du)
fn = function([x, W, du], dv)
xval = np.random.rand(16).astype(config.floatX)
Wval = np.random.rand(16, 16).astype(config.floatX)
duval = np.random.rand(16).astype(config.floatX)
xval = np.random.random((16,)).astype(config.floatX)
Wval = np.random.random((16, 16)).astype(config.floatX)
duval = np.random.random((16,)).astype(config.floatX)
dvval = np.dot(duval, Wval)
dvval2 = fn(xval, Wval, duval)
np.testing.assert_array_almost_equal(dvval2, dvval, 4)
......@@ -310,7 +310,7 @@ class TestOpFromGraph(unittest_tools.InferShapeTester):
zz = op_mul(xx, yy)
dw = Rop(zz, [xx, yy], [du, dv])
fn = function([xx, yy, du, dv], dw)
vals = np.random.rand(4, 32).astype(config.floatX)
vals = np.random.random((4, 32)).astype(config.floatX)
dwval = fn(*vals)
np.testing.assert_array_almost_equal(
dwval, vals[0] * vals[3] * 1.5 + vals[1] * vals[2] * 2.0, 4
......@@ -363,8 +363,8 @@ class TestOpFromGraph(unittest_tools.InferShapeTester):
xx2, yy2 = op_ift(*op_ft(xx, yy))
fn = function([xx, yy], [xx2, yy2])
xv = np.random.rand(16).astype(config.floatX)
yv = np.random.rand(16).astype(config.floatX)
xv = np.random.random((16,)).astype(config.floatX)
yv = np.random.random((16,)).astype(config.floatX)
xv2, yv2 = fn(xv, yv)
np.testing.assert_array_almost_equal(xv, xv2, 4)
np.testing.assert_array_almost_equal(yv, yv2, 4)
......
......@@ -20,14 +20,19 @@ def test_NanGuardMode():
# Tests if NanGuardMode is working by feeding in numpy.inf and numpy.nans
# intentionally. A working implementation should be able to capture all
# the abnormalties.
rng = np.random.default_rng(2482)
x = matrix()
w = shared(np.random.randn(5, 7).astype(config.floatX))
w = shared(rng.standard_normal((5, 7)).astype(config.floatX))
y = dot(x, w)
fun = function([x], y, mode=NanGuardMode(nan_is_error=True, inf_is_error=True))
a = np.random.randn(3, 5).astype(config.floatX)
infa = np.tile((np.asarray(100.0) ** 1000000).astype(config.floatX), (3, 5))
a = rng.standard_normal((3, 5)).astype(config.floatX)
with pytest.warns(RuntimeWarning):
infa = np.tile((np.asarray(100.0) ** 1000000).astype(config.floatX), (3, 5))
nana = np.tile(np.asarray(np.nan).astype(config.floatX), (3, 5))
biga = np.tile(np.asarray(1e20).astype(config.floatX), (3, 5))
fun(a) # normal values
......@@ -38,7 +43,7 @@ def test_NanGuardMode():
_logger.propagate = False
with pytest.raises(AssertionError):
fun(infa) # INFs
with pytest.raises(AssertionError):
with pytest.raises(AssertionError), pytest.warns(RuntimeWarning):
fun(nana) # NANs
with pytest.raises(AssertionError):
fun(biga) # big values
......@@ -46,9 +51,13 @@ def test_NanGuardMode():
_logger.propagate = True
# slices
a = np.random.randn(3, 4, 5).astype(config.floatX)
infa = np.tile((np.asarray(100.0) ** 1000000).astype(config.floatX), (3, 4, 5))
a = rng.standard_normal((3, 4, 5)).astype(config.floatX)
with pytest.warns(RuntimeWarning):
infa = np.tile((np.asarray(100.0) ** 1000000).astype(config.floatX), (3, 4, 5))
nana = np.tile(np.asarray(np.nan).astype(config.floatX), (3, 4, 5))
biga = np.tile(np.asarray(1e20).astype(config.floatX), (3, 4, 5))
x = tensor3()
......@@ -59,7 +68,7 @@ def test_NanGuardMode():
_logger.propagate = False
with pytest.raises(AssertionError):
fun(infa) # INFs
with pytest.raises(AssertionError):
with pytest.raises(AssertionError), pytest.warns(RuntimeWarning):
fun(nana) # NANs
with pytest.raises(AssertionError):
fun(biga) # big values
......
......@@ -37,9 +37,9 @@ class TestSharedVariable:
# test tensor constructor
b = shared(np.zeros((5, 5), dtype="int32"))
assert b.type == TensorType("int32", shape=[False, False])
b = shared(np.random.rand(4, 5))
b = shared(np.random.random((4, 5)))
assert b.type == TensorType("float64", shape=[False, False])
b = shared(np.random.rand(5, 1, 2))
b = shared(np.random.random((5, 1, 2)))
assert b.type == TensorType("float64", shape=[False, False, False])
assert shared([]).type == generic
......@@ -178,7 +178,7 @@ class TestSharedVariable:
b = shared(np.zeros((5, 5), dtype="float32"))
with pytest.raises(TypeError):
f(b, np.random.rand(5, 5))
f(b, np.random.random((5, 5)))
def test_tensor_strict(self):
def f(var, val):
......@@ -228,7 +228,7 @@ class TestSharedVariable:
b = shared(np.zeros((5, 5), dtype="float32"))
with pytest.raises(TypeError):
f(b, np.random.rand(5, 5))
f(b, np.random.random((5, 5)))
def test_scalar_floatX(self):
......@@ -285,7 +285,7 @@ class TestSharedVariable:
b = shared(np.zeros((5, 5), dtype="float32"))
with pytest.raises(TypeError):
f(b, np.random.rand(5, 5))
f(b, np.random.random((5, 5)))
def test_tensor_floatX(self):
def f(var, val):
......@@ -338,9 +338,8 @@ class TestSharedVariable:
b = shared(np.zeros((5, 5), dtype="float32"))
with pytest.raises(TypeError):
f(b, np.random.rand(5, 5))
f(b, np.random.random((5, 5)))
def test_err_symbolic_variable(self):
with pytest.raises(TypeError):
shared(aesara.tensor.ones((2, 3)))
shared(np.ones((2, 4)))
......@@ -339,7 +339,7 @@ class TestAutoName:
Variable.__count__ = count(autoname_id)
r1 = TensorType(dtype="int32", shape=())("myvar")
r2 = TensorVariable(TensorType(dtype="int32", shape=()))
r3 = shared(np.random.randn(3, 4))
r3 = shared(np.random.standard_normal((3, 4)))
assert r1.auto_name == "auto_" + str(autoname_id)
assert r2.auto_name == "auto_" + str(autoname_id + 1)
assert r3.auto_name == "auto_" + str(autoname_id + 2)
......
import warnings
import numpy as np
import pytest
......@@ -22,7 +20,6 @@ def set_aesara_flags():
yield
# Used in TestComputeTestValue.test_no_perform
class IncOneC(COp):
"""
An Op with only a C (c_code) implementation
......@@ -80,9 +77,9 @@ class TestComputeTestValue:
def test_variable_only(self):
x = matrix("x")
x.tag.test_value = np.random.rand(3, 4).astype(config.floatX)
x.tag.test_value = np.random.random((3, 4)).astype(config.floatX)
y = matrix("y")
y.tag.test_value = np.random.rand(4, 5).astype(config.floatX)
y.tag.test_value = np.random.random((4, 5)).astype(config.floatX)
# should work
z = dot(x, y)
......@@ -91,14 +88,14 @@ class TestComputeTestValue:
assert _allclose(f(x.tag.test_value, y.tag.test_value), z.tag.test_value)
# this test should fail
y.tag.test_value = np.random.rand(6, 5).astype(config.floatX)
y.tag.test_value = np.random.random((6, 5)).astype(config.floatX)
with pytest.raises(ValueError):
dot(x, y)
def test_compute_flag(self):
x = matrix("x")
y = matrix("y")
y.tag.test_value = np.random.rand(4, 5).astype(config.floatX)
y.tag.test_value = np.random.random((4, 5)).astype(config.floatX)
# should skip computation of test value
with config.change_flags(compute_test_value="off"):
......@@ -110,18 +107,16 @@ class TestComputeTestValue:
dot(x, y)
# test that a warning is raised if required
with warnings.catch_warnings(), config.change_flags(compute_test_value="warn"):
warnings.simplefilter("error", UserWarning)
with pytest.raises(UserWarning):
dot(x, y)
with pytest.warns(UserWarning), config.change_flags(compute_test_value="warn"):
dot(x, y)
def test_string_var(self):
x = matrix("x")
x.tag.test_value = np.random.rand(3, 4).astype(config.floatX)
x.tag.test_value = np.random.random((3, 4)).astype(config.floatX)
y = matrix("y")
y.tag.test_value = np.random.rand(4, 5).astype(config.floatX)
y.tag.test_value = np.random.random((4, 5)).astype(config.floatX)
z = aesara.shared(np.random.rand(5, 6).astype(config.floatX))
z = aesara.shared(np.random.random((5, 6)).astype(config.floatX))
# should work
out = dot(dot(x, y), z)
......@@ -133,14 +128,14 @@ class TestComputeTestValue:
return dot(dot(x, y), z)
# this test should fail
z.set_value(np.random.rand(7, 6).astype(config.floatX))
z.set_value(np.random.random((7, 6)).astype(config.floatX))
with pytest.raises(ValueError):
f(x, y, z)
def test_shared(self):
x = matrix("x")
x.tag.test_value = np.random.rand(3, 4).astype(config.floatX)
y = aesara.shared(np.random.rand(4, 6).astype(config.floatX), "y")
x.tag.test_value = np.random.random((3, 4)).astype(config.floatX)
y = aesara.shared(np.random.random((4, 6)).astype(config.floatX), "y")
# should work
z = dot(x, y)
......@@ -149,13 +144,13 @@ class TestComputeTestValue:
assert _allclose(f(x.tag.test_value), z.tag.test_value)
# this test should fail
y.set_value(np.random.rand(5, 6).astype(config.floatX))
y.set_value(np.random.random((5, 6)).astype(config.floatX))
with pytest.raises(ValueError):
dot(x, y)
def test_ndarray(self):
x = np.random.rand(2, 3).astype(config.floatX)
y = aesara.shared(np.random.rand(3, 6).astype(config.floatX), "y")
x = np.random.random((2, 3)).astype(config.floatX)
y = aesara.shared(np.random.random((3, 6)).astype(config.floatX), "y")
# should work
z = dot(x, y)
......@@ -164,12 +159,12 @@ class TestComputeTestValue:
assert _allclose(f(), z.tag.test_value)
# this test should fail
x = np.random.rand(2, 4).astype(config.floatX)
x = np.random.random((2, 4)).astype(config.floatX)
with pytest.raises(ValueError):
dot(x, y)
def test_empty_elemwise(self):
x = aesara.shared(np.random.rand(0, 6).astype(config.floatX), "x")
x = aesara.shared(np.random.random((0, 6)).astype(config.floatX), "x")
# should work
z = (x + 2) * 3
......@@ -178,8 +173,8 @@ class TestComputeTestValue:
assert _allclose(f(), z.tag.test_value)
def test_constant(self):
x = at.constant(np.random.rand(2, 3), dtype=config.floatX)
y = aesara.shared(np.random.rand(3, 6).astype(config.floatX), "y")
x = at.constant(np.random.random((2, 3)), dtype=config.floatX)
y = aesara.shared(np.random.random((3, 6)).astype(config.floatX), "y")
# should work
z = dot(x, y)
......@@ -188,7 +183,7 @@ class TestComputeTestValue:
assert _allclose(f(), z.tag.test_value)
# this test should fail
x = at.constant(np.random.rand(2, 4), dtype=config.floatX)
x = at.constant(np.random.random((2, 4)), dtype=config.floatX)
with pytest.raises(ValueError):
dot(x, y)
......@@ -202,7 +197,7 @@ class TestComputeTestValue:
x = fmatrix("x")
with pytest.raises(TypeError):
# Incorrect dtype (float64) for test value
x.tag.test_value = np.random.rand(3, 4)
x.tag.test_value = np.random.random((3, 4))
def test_overided_function(self):
# We need to test those as they mess with Exception
......@@ -219,7 +214,7 @@ class TestComputeTestValue:
k = iscalar("k")
A = vector("A")
k.tag.test_value = 3
A.tag.test_value = np.random.rand(5).astype(config.floatX)
A.tag.test_value = np.random.random((5,)).astype(config.floatX)
def fx(prior_result, A):
return prior_result * A
......@@ -240,7 +235,7 @@ class TestComputeTestValue:
k = iscalar("k")
A = matrix("A")
k.tag.test_value = 3
A.tag.test_value = np.random.rand(5, 3).astype(config.floatX)
A.tag.test_value = np.random.random((5, 3)).astype(config.floatX)
def fx(prior_result, A):
return dot(prior_result, A)
......@@ -258,7 +253,7 @@ class TestComputeTestValue:
k = iscalar("k")
A = matrix("A")
k.tag.test_value = 3
A.tag.test_value = np.random.rand(5, 3).astype(config.floatX)
A.tag.test_value = np.random.random((5, 3)).astype(config.floatX)
def fx(prior_result, A):
return dot(prior_result, A)
......
......@@ -127,7 +127,7 @@ class TestMakeThunk:
x_input = dmatrix("x_input")
f = aesara.function([x_input], DoubleOp()(x_input))
inp = np.random.rand(5, 4)
inp = np.random.random((5, 4))
out = f(inp)
assert np.allclose(inp * 2, out)
......
......@@ -34,8 +34,8 @@ class MyOp(DeepCopyOp):
itype = node.inputs[0].type.__class__
if itype in self.c_code_and_version:
code, version = self.c_code_and_version[itype]
rand = np.random.rand()
return ('printf("%(rand)s\\n");' + code) % locals()
rand = np.random.random()
return f'printf("{rand}\\n");{code % locals()}'
# Else, no C code
return super(DeepCopyOp, self).c_code(node, name, inames, onames, sub)
......
......@@ -81,7 +81,7 @@ def test_cdata():
# This should be a passthrough function for vectors
f = aesara.function([i], i2, mode=mode)
v = np.random.randn(9).astype("float32")
v = np.random.standard_normal((9,)).astype("float32")
v2 = f(v)
assert (v2 == v).all()
......
import gc
import sys
import time
import numpy as np
......@@ -18,8 +16,8 @@ from aesara.link.c.basic import OpWiseCLinker
from aesara.link.c.exceptions import MissingGXX
from aesara.link.utils import map_storage
from aesara.link.vm import VM, Loop, LoopGC, VMLinker
from aesara.tensor.math import cosh, sin, tanh
from aesara.tensor.type import dvector, lscalar, scalar, scalars, vector, vectors
from aesara.tensor.math import cosh, tanh
from aesara.tensor.type import lscalar, scalar, scalars, vector, vectors
from aesara.tensor.var import TensorConstant
......@@ -292,97 +290,6 @@ def test_allow_gc_cvm():
assert f.fn.storage_map[n][0] is None
run_memory_usage_tests = False
if run_memory_usage_tests:
# these are not normal unit tests, do not run them as part of standard
# suite. I ran them while looking at top, and stopped when memory usage
# was stable.
def test_no_leak_many_graphs():
# Verify no memory leaks when creating and deleting a lot of functions
# This isn't really a unit test, you have to run it and look at top to
# see if there's a leak
for i in range(10000):
x = vector()
z = x
for d in range(10):
z = sin(-z + 1)
f = function([x], z, mode=Mode(optimizer=None, linker="cvm"))
if not i % 100:
print(gc.collect())
sys.stdout.flush()
gc.collect()
if 1:
f([2.0])
f([3.0])
f([4.0])
f([5.0])
def test_no_leak_many_call_lazy():
# Verify no memory leaks when calling a function a lot of times
# This isn't really a unit test, you have to run it and look at top to
# see if there's a leak
def build_graph(x, depth=5):
z = x
for d in range(depth):
z = ifelse(z.mean() > 0.5, -z, z)
return z
def time_linker(name, linker):
steps_a = 10
x = dvector()
a = build_graph(x, steps_a)
f_a = function([x], a, mode=Mode(optimizer=None, linker=linker()))
inp = np.random.rand(1000000)
for i in range(100):
f_a(inp)
# this doesn't seem to work, prints 0 for everything
# import resource
#
# pre = resource.getrusage(resource.RUSAGE_SELF)
# post = resource.getrusage(resource.RUSAGE_SELF)
# print(pre.ru_ixrss, post.ru_ixrss)
# print(pre.ru_idrss, post.ru_idrss)
# print(pre.ru_maxrss, post.ru_maxrss)
print(1)
time_linker("vmLinker_C", lambda: VMLinker(allow_gc=False, use_cloop=True))
print(2)
time_linker("vmLinker", lambda: VMLinker(allow_gc=False, use_cloop=False))
def test_no_leak_many_call_nonlazy():
# Verify no memory leaks when calling a function a lot of times
# This isn't really a unit test, you have to run it and look at top to
# see if there's a leak.
def build_graph(x, depth=5):
z = x
for d in range(depth):
z = sin(-z + 1)
return z
def time_linker(name, linker):
steps_a = 10
x = dvector()
a = build_graph(x, steps_a)
f_a = function([x], a, mode=Mode(optimizer=None, linker=linker()))
inp = np.random.rand(1000000)
for i in range(500):
f_a(inp)
print(1)
time_linker("vmLinker_C", lambda: VMLinker(allow_gc=False, use_cloop=True))
print(2)
time_linker("vmLinker", lambda: VMLinker(allow_gc=False, use_cloop=False))
class RunOnce(Op):
__props__ = ("nb_run",)
......
......@@ -72,8 +72,8 @@ def may_share_memory_core(a, b):
def test_may_share_memory():
a = np.random.rand(5, 4)
b = np.random.rand(5, 4)
a = np.random.random((5, 4))
b = np.random.random((5, 4))
may_share_memory_core(a, b)
......
......@@ -15,9 +15,9 @@ def test_n_samples_1():
f = function([p, u, n], m, allow_input_downcast=True)
np.random.seed(12345)
rng = np.random.default_rng(12345)
for i in [1, 5, 10, 100, 1000, 10000]:
uni = np.random.rand(2 * i).astype(config.floatX)
uni = rng.random(2 * i).astype(config.floatX)
res = f([[1.0, 0.0], [0.0, 1.0]], uni, i)
utt.assert_allclose(res, [[i * 1.0, 0.0], [0.0, i * 1.0]])
......@@ -30,17 +30,18 @@ def test_n_samples_2():
f = function([p, u, n], m, allow_input_downcast=True)
np.random.seed(12345)
rng = np.random.default_rng(12345)
for i in [1, 5, 10, 100, 1000]:
uni = np.random.rand(i).astype(config.floatX)
pvals = np.random.randint(1, 1000, (1, 1000)).astype(config.floatX)
uni = rng.random(i).astype(config.floatX)
pvals = rng.integers(1, 1000, (1, 1000)).astype(config.floatX)
pvals /= pvals.sum(1)
res = f(pvals, uni, i)
assert res.sum() == i
for i in [1, 5, 10, 100, 1000]:
uni = np.random.rand(i).astype(config.floatX)
pvals = np.random.randint(1, 1000000, (1, 1000000)).astype(config.floatX)
uni = rng.random(i).astype(config.floatX)
pvals = rng.integers(1, 1000000, (1, 1000000)).astype(config.floatX)
pvals /= pvals.sum(1)
res = f(pvals, uni, i)
assert res.sum() == i
......
......@@ -9,6 +9,9 @@ from aesara.tensor.type import fmatrix, fvector, iscalar
class TestOP:
@pytest.mark.xfail(
reason="This test is designed around very specific random draws from the old NumPy API"
)
def test_select_distinct(self):
# Tests that ChoiceFromUniform always selects distinct elements
......@@ -21,7 +24,9 @@ class TestOP:
n_elements = 1000
all_indices = range(n_elements)
np.random.seed(12345)
rng = np.random.default_rng(12345)
expected = [
np.asarray([[931, 318, 185, 209, 559]]),
np.asarray([[477, 887, 2, 717, 333, 665, 159, 559, 348, 136]]),
......@@ -84,8 +89,8 @@ class TestOP:
]
for i in [5, 10, 50, 100, 500, n_elements]:
uni = np.random.rand(i).astype(config.floatX)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
uni = rng.random(i).astype(config.floatX)
pvals = rng.integers(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
res = f(pvals, uni, i)
for ii in range(len(expected)):
......@@ -108,9 +113,9 @@ class TestOP:
n_elements = 100
n_selected = 200
np.random.seed(12345)
uni = np.random.rand(n_selected).astype(config.floatX)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
rng = np.random.default_rng(12345)
uni = rng.random(n_selected).astype(config.floatX)
pvals = rng.integers(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
with pytest.raises(ValueError):
f(pvals, uni, n_selected)
......@@ -129,13 +134,13 @@ class TestOP:
n_elements = 100
n_selected = 10
mean_rtol = 0.0005
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
rng = np.random.default_rng(12345)
pvals = rng.integers(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
avg_pvals = np.zeros((n_elements,), dtype=config.floatX)
for rep in range(10000):
uni = np.random.rand(n_selected).astype(config.floatX)
uni = rng.random(n_selected).astype(config.floatX)
res = f(pvals, uni, n_selected)
res = np.squeeze(res)
avg_pvals[res] += 1
......@@ -159,9 +164,9 @@ class TestFunction:
n_elements = 1000
all_indices = range(n_elements)
np.random.seed(12345)
rng = np.random.default_rng(12345)
for i in [5, 10, 50, 100, 500, n_elements]:
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals = rng.integers(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
res = f(pvals, i)
res = np.squeeze(res)
......@@ -183,8 +188,8 @@ class TestFunction:
n_elements = 100
n_selected = 200
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
rng = np.random.default_rng(12345)
pvals = rng.integers(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
with pytest.raises(ValueError):
f(pvals, n_selected)
......@@ -204,8 +209,8 @@ class TestFunction:
n_elements = 100
n_selected = 10
mean_rtol = 0.0005
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
rng = np.random.default_rng(12345)
pvals = rng.integers(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
avg_pvals = np.zeros((n_elements,), dtype=config.floatX)
......
......@@ -133,7 +133,6 @@ class TestSP:
# symbolic stuff
kerns = [dmatrix(), dmatrix()]
input = dmatrix()
# rng = np.random.default_rng(3423489)
# build actual input images
img2d = np.arange(bsize * np.prod(imshp)).reshape((bsize,) + imshp)
......@@ -154,7 +153,6 @@ class TestSP:
)
l1propup = function([kerns[0], input], l1hid, mode=mode)
# l1kernvals = np.random.rand(nkerns[0],np.prod(kshp[0]))
l1kernvals = np.arange(nkerns[0] * np.prod(kshp[0])).reshape(
nkerns[0], np.prod(kshp[0])
)
......@@ -172,7 +170,6 @@ class TestSP:
)
l2propup = function([kerns[1], l1hid], l2hid, mode=mode)
# l2kernvals = np.random.rand(nkerns[1],np.prod(kshp[1])*nkerns[0])
l2kernvals = np.arange(
nkerns[1] * np.prod(kshp[1]) * nkerns[0]
).reshape(nkerns[1], np.prod(kshp[1]) * nkerns[0])
......
......@@ -1019,7 +1019,7 @@ class TestComparison:
class TestConversion:
def test_basic(self):
test_val = np.random.rand(5).astype(config.floatX)
test_val = np.random.random((5,)).astype(config.floatX)
a = at.as_tensor_variable(test_val)
s = csc_from_dense(a)
val = eval_outputs([s])
......@@ -2792,11 +2792,11 @@ class TestAddSSData(utt.InferShapeTester):
for format in sparse.sparse_formats:
variable = getattr(aesara.sparse, format + "_matrix")
rand = np.array(
a_val = np.array(
np.random.default_rng(utt.fetch_seed()).integers(1, 4, size=(3, 4)) - 1,
dtype=aesara.config.floatX,
)
constant = as_sparse_format(rand, format)
constant = as_sparse_format(a_val, format)
self.x[format] = [variable() for t in range(2)]
self.a[format] = [constant for t in range(2)]
......
......@@ -164,8 +164,8 @@ def test_local_dense_from_sparse_sparse_from_dense():
def test_sd_csc():
A = sp.sparse.rand(4, 5, density=0.60, format="csc", dtype=np.float32)
b = np.random.rand(5, 2).astype(np.float32)
A = sp.sparse.random(4, 5, density=0.60, format="csc", dtype=np.float32)
b = np.random.random((5, 2)).astype(np.float32)
target = A * b
a_val = as_tensor_variable(A.data)
......
......@@ -27,12 +27,13 @@ class TestPoisson(utt.InferShapeTester):
for format in sparse.sparse_formats:
variable = getattr(aesara.sparse, format + "_matrix")
rand = np.array(
np.random.randint(1, 4, size=(3, 4)) - 1, dtype=aesara.config.floatX
a_val = np.array(
np.random.default_rng(utt.fetch_seed()).integers(1, 4, size=(3, 4)) - 1,
dtype=aesara.config.floatX,
)
x[format] = variable()
a[format] = as_sparse_format(rand, format)
a[format] = as_sparse_format(a_val, format)
def setup_method(self):
super().setup_method()
......
......@@ -10,10 +10,10 @@ from tests.sparse.test_basic import as_sparse_format
def test_hash_from_sparse():
hashes = []
rng = np.random.rand(5, 5)
x = np.random.random((5, 5))
for format in ["csc", "csr"]:
rng = as_sparse_format(rng, format)
x = as_sparse_format(x, format)
for data in [
[[-2]],
[[-1]],
......@@ -32,10 +32,10 @@ def test_hash_from_sparse():
np.zeros((5, 5), dtype="uint32"),
np.zeros((5, 5), dtype="int32"),
# Test slice
rng,
rng[1:],
rng[:4],
rng[1:3],
x,
x[1:],
x[:4],
x[1:3],
# Don't test step as they are not supported by sparse
# rng[::2], rng[::-1]
]:
......@@ -44,8 +44,8 @@ def test_hash_from_sparse():
hashes.append(hash_from_sparse(data))
# test that different type of views and their copy give the same hash
assert hash_from_sparse(rng[1:]) == hash_from_sparse(rng[1:].copy())
assert hash_from_sparse(rng[1:3]) == hash_from_sparse(rng[1:3].copy())
assert hash_from_sparse(rng[:4]) == hash_from_sparse(rng[:4].copy())
assert hash_from_sparse(x[1:]) == hash_from_sparse(x[1:].copy())
assert hash_from_sparse(x[1:3]) == hash_from_sparse(x[1:3].copy())
assert hash_from_sparse(x[:4]) == hash_from_sparse(x[:4].copy())
assert len(set(hashes)) == len(hashes)
......@@ -44,7 +44,7 @@ with config.change_flags(compute_test_value="off"):
f = aesara.function([x], [send_request, z], mode=mode)
xx = np.random.rand(*shape).astype(dtype)
xx = np.random.random(shape).astype(dtype)
expected = (xx + 1) * 2
_, zz = f(xx)
......
......@@ -1761,7 +1761,9 @@ class TestBilinearUpsampling:
# 1D and 2D kernels will generate the same result.
# checking upsampling with ratio 5
input_x = np.random.rand(5, 4, 6, 7).astype(config.floatX)
rng = np.random.default_rng(280284)
input_x = rng.random((5, 4, 6, 7)).astype(config.floatX)
mat_1D = bilinear_upsampling(
input=input_x,
ratio=5,
......@@ -1781,7 +1783,7 @@ class TestBilinearUpsampling:
utt.assert_allclose(f_1D(), f_2D(), rtol=1e-06)
# checking upsampling with ratio 8
input_x = np.random.rand(12, 11, 10, 7).astype(config.floatX)
input_x = rng.random((12, 11, 10, 7)).astype(config.floatX)
mat_1D = bilinear_upsampling(
input=input_x,
ratio=8,
......@@ -1838,7 +1840,7 @@ class TestBilinearUpsampling:
utt.assert_allclose(f_up_x(), num_up_x, rtol=1e-6)
def test_fractional_bilinear_upsampling_shape(self):
x = np.random.rand(1, 1, 200, 200).astype(config.floatX)
x = np.random.random((1, 1, 200, 200)).astype(config.floatX)
resize = (24, 20)
z = bilinear_upsampling(
at.as_tensor_variable(x), frac_ratio=resize, use_1D_kernel=False
......
......@@ -108,7 +108,7 @@ class TestSoftmax(utt.InferShapeTester):
@pytest.mark.parametrize("axis", [None, 0, 1, 2, 3, -1, -2])
def test_perform(self, axis):
x = tensor4("x")
xv = np.random.randn(2, 3, 4, 5).astype(config.floatX)
xv = np.random.standard_normal((2, 3, 4, 5)).astype(config.floatX)
f = aesara.function([x], softmax(x, axis=axis))
assert np.allclose(f(xv), sp.softmax(xv, axis=axis))
......@@ -132,7 +132,7 @@ class TestSoftmax(utt.InferShapeTester):
x = vector()
f = aesara.function([x], softmax(x, axis=None))
xv = np.random.randn(6).astype(config.floatX)
xv = np.random.standard_normal((6,)).astype(config.floatX)
assert np.allclose(f(xv), sp.softmax(xv))
def test_vector_grad(self):
......@@ -187,8 +187,8 @@ class TestSoftmaxWithBias(utt.InferShapeTester):
# print f.maker.fgraph.toposort()
def test_softmax_with_bias_trace(self):
a = aesara.shared(np.random.randn(3).astype(config.floatX))
b = aesara.shared(np.float32(np.random.randn()))
a = aesara.shared(np.random.standard_normal((3,)).astype(config.floatX))
b = aesara.shared(np.float32(np.random.standard_normal()))
sm = softmax(a + b)
f = aesara.function([], sm)
assert check_stack_trace(f, ops_to_check="last")
......@@ -219,7 +219,7 @@ class TestLogSoftmax(utt.InferShapeTester):
x = vector()
f = aesara.function([x], logsoftmax(x, axis=None))
xv = np.random.randn(6).astype(config.floatX)
xv = np.random.standard_normal((6,)).astype(config.floatX)
assert np.allclose(f(xv), sp.log_softmax(xv))
def test_vector_grad(self):
......@@ -1372,7 +1372,7 @@ TestSoftsign = makeBroadcastTester(
class TestSigmoidBinaryCrossentropy:
def _get_test_inputs(self, n=50):
pred, target = np.random.randn(2, n).astype(config.floatX)
pred, target = np.random.standard_normal((2, n)).astype(config.floatX)
# apply sigmoid to target, but not pred
return [pred, 1 / (1 + np.exp(-target))]
......
......@@ -2,7 +2,6 @@
Tests for block sparse dot
"""
import numpy as np
from numpy.random import randn
import aesara
import aesara.tensor as at
......@@ -42,18 +41,21 @@ class TestBlockSparseGemvAndOuter(utt.InferShapeTester):
outputWindowSize = 3
batchSize = 2
input = randn(batchSize, inputWindowSize, inputSize).astype("float32")
permutation = np.random.permutation
rng = np.random.default_rng(230920)
input = rng.standard_normal((batchSize, inputWindowSize, inputSize)).astype(
"float32"
)
inputIndice = np.vstack(
permutation(nInputBlock)[:inputWindowSize] for _ in range(batchSize)
rng.permutation(nInputBlock)[:inputWindowSize] for _ in range(batchSize)
).astype("int32")
outputIndice = np.vstack(
permutation(nOutputBlock)[:outputWindowSize] for _ in range(batchSize)
rng.permutation(nOutputBlock)[:outputWindowSize] for _ in range(batchSize)
).astype("int32")
weight = randn(nInputBlock, nOutputBlock, inputSize, outputSize).astype(
"float32"
)
bias = randn(nOutputBlock, outputSize).astype("float32")
weight = rng.standard_normal(
(nInputBlock, nOutputBlock, inputSize, outputSize)
).astype("float32")
bias = rng.standard_normal((nOutputBlock, outputSize)).astype("float32")
return weight, input, inputIndice, bias, outputIndice
......@@ -67,15 +69,18 @@ class TestBlockSparseGemvAndOuter(utt.InferShapeTester):
yWindowSize = 3
batchSize = 2
o = randn(nInputBlock, nOutputBlock, xSize, ySize).astype("float32")
x = randn(batchSize, xWindowSize, xSize).astype("float32")
y = randn(batchSize, yWindowSize, ySize).astype("float32")
randint = np.random.randint
rng = np.random.default_rng(230920)
o = rng.standard_normal((nInputBlock, nOutputBlock, xSize, ySize)).astype(
"float32"
)
x = rng.standard_normal((batchSize, xWindowSize, xSize)).astype("float32")
y = rng.standard_normal((batchSize, yWindowSize, ySize)).astype("float32")
xIdx = np.vstack(
randint(0, nInputBlock, size=xWindowSize) for _ in range(batchSize)
rng.integers(0, nInputBlock, size=xWindowSize) for _ in range(batchSize)
).astype("int32")
yIdx = np.vstack(
randint(0, nOutputBlock, size=yWindowSize) for _ in range(batchSize)
rng.integers(0, nOutputBlock, size=yWindowSize) for _ in range(batchSize)
).astype("int32")
return o, x, y, xIdx, yIdx
......@@ -223,11 +228,13 @@ class TestBlockSparseGemvAndOuter(utt.InferShapeTester):
def test_sparseblockgemv_grad_1(self):
# Test that we correctly handle cases where dimensions are 1.
h_val = randn(1, 1, 1).astype("float32")
iIdx_val = np.random.permutation(1)[:1][None, :]
oIdx_val = np.random.permutation(1)[:1][None, :]
W_val = randn(1, 1, 1, 1).astype("float32")
b_val = randn(1, 1).astype("float32")
rng = np.random.default_rng(230920)
h_val = rng.standard_normal((1, 1, 1)).astype("float32")
iIdx_val = rng.permutation(1)[:1][None, :]
oIdx_val = rng.permutation(1)[:1][None, :]
W_val = rng.standard_normal((1, 1, 1, 1)).astype("float32")
b_val = rng.standard_normal((1, 1)).astype("float32")
iIdx = at.constant(iIdx_val)
oIdx = at.constant(oIdx_val)
......
......@@ -25,7 +25,7 @@ class TestConv2D(utt.InferShapeTester):
# This will be set to the appropriate function in the inherited classes.
# The call to `staticmethod` is necessary to prevent Python from passing
# `self` as the first argument.
conv2d = staticmethod(conv.conv2d)
conv2d = staticmethod(conv2d)
def setup_method(self):
self.input = tensor4("input", dtype=self.dtype)
......@@ -372,7 +372,6 @@ class TestConv2D(utt.InferShapeTester):
should_raise=True,
)
@pytest.mark.slow
def test_subsample(self):
# Tests convolution where subsampling != (1,1)
self.validate((3, 2, 7, 5), (5, 2, 2, 3), "full", subsample=(2, 2))
......@@ -407,7 +406,6 @@ class TestConv2D(utt.InferShapeTester):
with pytest.raises(AssertionError):
self.validate((3, 2, 8, 8), (4, 3, 5, 5), "valid")
@pytest.mark.slow
def test_invalid_input_shape(self):
# Tests that when the shape given at build time is not the same as
# run time we raise an error
......@@ -627,8 +625,10 @@ class TestConv2D(utt.InferShapeTester):
# Note: infer_shape is incomplete and thus input and filter shapes
# must be provided explicitly
rng = np.random.default_rng(280284)
def rand(*shape):
r = np.asarray(np.random.rand(*shape), dtype="float64")
r = np.asarray(rng.random(shape), dtype="float64")
return r * 2 - 1
adtens = dtensor4()
......@@ -768,11 +768,8 @@ class TestConv2D(utt.InferShapeTester):
# nnet.conv2d() interface. This was reported in #3763, and uses the example
# code from that ticket.
def test_broadcast_grad():
# rng = numpy.random.RandomState(utt.fetch_seed())
x1 = tensor4("x")
# x1_data = rng.randn(1, 1, 300, 300)
sigma = scalar("sigma")
# sigma_data = 20
window_radius = 3
filter_1d = at.arange(-window_radius, window_radius + 1)
......@@ -783,4 +780,5 @@ def test_broadcast_grad():
filter_W = filter_1d.dimshuffle(["x", "x", 0, "x"])
y = conv2d(x1, filter_W, border_mode="full", filter_shape=[1, 1, None, None])
# TODO FIXME: Make this a real test and `assert` something
aesara.grad(y.sum(), sigma)
import time
import numpy as np
import pytest
......@@ -28,7 +26,7 @@ def test_get_diagonal_subtensor_view(wrap=lambda a: a):
xv01 = get_diagonal_subtensor_view(x, 0, 1)
# test that it works in 2d
assert np.all(np.asarray(xv01) == [[12, 9, 6, 3], [16, 13, 10, 7]])
assert np.array_equal(np.asarray(xv01), [[12, 9, 6, 3], [16, 13, 10, 7]])
x = np.arange(24).reshape(4, 3, 2)
xv01 = get_diagonal_subtensor_view(x, 0, 1)
......@@ -38,23 +36,23 @@ def test_get_diagonal_subtensor_view(wrap=lambda a: a):
# print 'x', x
# print 'xv01', xv01
# print 'xv02', xv02
assert np.all(
np.asarray(xv01) == [[[12, 13], [8, 9], [4, 5]], [[18, 19], [14, 15], [10, 11]]]
assert np.array_equal(
np.asarray(xv01), [[[12, 13], [8, 9], [4, 5]], [[18, 19], [14, 15], [10, 11]]]
)
assert np.all(
np.asarray(xv02)
== [
assert np.array_equal(
np.asarray(xv02),
[
[[6, 1], [8, 3], [10, 5]],
[[12, 7], [14, 9], [16, 11]],
[[18, 13], [20, 15], [22, 17]],
]
],
)
# diagonal views of each leading matrix is the same
# as the slices out of the diagonal view of the entire 3d tensor
for xi, xvi in zip(x, xv12):
assert np.all(xvi == get_diagonal_subtensor_view(xi, 0, 1))
assert np.array_equal(xvi, get_diagonal_subtensor_view(xi, 0, 1))
def pyconv3d(signals, filters, border_mode="valid"):
......@@ -129,9 +127,9 @@ def test_conv3d(border_mode):
np.arange(Nf * Tf * C * Hf * Wf).reshape(Nf, Tf, C, Hf, Wf).astype("float32")
)
t0 = time.time()
# t0 = time.time()
pyres = pyconv3d(signals, filters, border_mode)
print(time.time() - t0)
# print(time.time() - t0)
s_signals = shared(signals)
s_filters = shared(filters)
......@@ -148,9 +146,9 @@ def test_conv3d(border_mode):
newconv3d = aesara.function([], [], updates={s_output: out}, mode=mode)
check_diagonal_subtensor_view_traces(newconv3d)
t0 = time.time()
# t0 = time.time()
newconv3d()
print(time.time() - t0)
# print(time.time() - t0)
utt.assert_allclose(pyres, s_output.get_value(borrow=True))
gsignals, gfilters = aesara.grad(out.sum(), [s_signals, s_filters])
gnewconv3d = aesara.function(
......@@ -162,15 +160,17 @@ def test_conv3d(border_mode):
)
check_diagonal_subtensor_view_traces(gnewconv3d)
t0 = time.time()
# t0 = time.time()
gnewconv3d()
print("grad", time.time() - t0)
# print("grad", time.time() - t0)
Ns, Ts, C, Hs, Ws = 3, 3, 3, 5, 5
Nf, Tf, C, Hf, Wf = 4, 2, 3, 2, 2
signals = np.random.rand(Ns, Ts, C, Hs, Ws).astype("float32")
filters = np.random.rand(Nf, Tf, C, Hf, Wf).astype("float32")
rng = np.random.default_rng(280284)
signals = rng.random((Ns, Ts, C, Hs, Ws)).astype("float32")
filters = rng.random((Nf, Tf, C, Hf, Wf)).astype("float32")
utt.verify_grad(
lambda s, f: conv3d(s, f, border_mode=border_mode),
[signals, filters],
......@@ -189,9 +189,9 @@ def test_conv3d(border_mode):
np.arange(Nf * Tf * C * Hf * Wf).reshape(Nf, Tf, C, Hf, Wf).astype("float32")
)
t0 = time.time()
# t0 = time.time()
pyres = pyconv3d(signals, filters, border_mode)
print(time.time() - t0)
# print(time.time() - t0)
s_signals = shared(signals)
s_filters = shared(filters)
......@@ -207,9 +207,9 @@ def test_conv3d(border_mode):
newconv3d = aesara.function([], [], updates={s_output: out}, mode=mode)
t0 = time.time()
# t0 = time.time()
newconv3d()
print(time.time() - t0)
# print(time.time() - t0)
utt.assert_allclose(pyres, s_output.get_value(borrow=True))
gsignals, gfilters = aesara.grad(out.sum(), [s_signals, s_filters])
gnewconv3d = aesara.function(
......@@ -220,15 +220,15 @@ def test_conv3d(border_mode):
name="grad",
)
t0 = time.time()
# t0 = time.time()
gnewconv3d()
print("grad", time.time() - t0)
# print("grad", time.time() - t0)
Ns, Ts, C, Hs, Ws = 3, 3, 3, 5, 5
Nf, Tf, C, Hf, Wf = 4, 1, 3, 2, 2
signals = np.random.rand(Ns, Ts, C, Hs, Ws).astype("float32")
filters = np.random.rand(Nf, Tf, C, Hf, Wf).astype("float32")
signals = rng.random((Ns, Ts, C, Hs, Ws)).astype("float32")
filters = rng.random((Nf, Tf, C, Hf, Wf)).astype("float32")
utt.verify_grad(
lambda s, f: conv3d(s, f, border_mode=border_mode),
[signals, filters],
......
......@@ -297,8 +297,10 @@ class TestCorr2D(utt.InferShapeTester):
def test_dtype_upcast(self):
# Checks dtype upcast for CorrMM methods.
rng = np.random.default_rng(280284)
def rand(shape, dtype="float64"):
r = np.asarray(np.random.rand(*shape), dtype=dtype)
r = np.asarray(rng.random(shape), dtype=dtype)
return r * 2 - 1
ops = [corr.CorrMM, corr.CorrMM_gradWeights, corr.CorrMM_gradInputs]
......@@ -325,8 +327,11 @@ class TestCorr2D(utt.InferShapeTester):
reason="SciPy and cxx needed",
)
def test_infer_shape_forward(self):
rng = np.random.default_rng(280284)
def rand(*shape):
r = np.asarray(np.random.rand(*shape), dtype="float64")
r = np.asarray(rng.random(shape), dtype="float64")
return r * 2 - 1
corrMM = corr.CorrMM
......@@ -373,8 +378,11 @@ class TestCorr2D(utt.InferShapeTester):
reason="SciPy and cxx needed",
)
def test_infer_shape_gradW(self):
rng = np.random.default_rng(280284)
def rand(*shape):
r = np.asarray(np.random.rand(*shape), dtype="float64")
r = np.asarray(rng.random(shape), dtype="float64")
return r * 2 - 1
corrMM = corr.CorrMM
......@@ -429,8 +437,11 @@ class TestCorr2D(utt.InferShapeTester):
reason="Need cxx for this test",
)
def test_infer_shape_gradI(self):
rng = np.random.default_rng(280284)
def rand(*shape):
r = np.asarray(np.random.rand(*shape), dtype="float64")
r = np.asarray(rng.random(shape), dtype="float64")
return r * 2 - 1
corrMM = corr.CorrMM
......@@ -503,8 +514,9 @@ class TestGroupCorr2d(TestGroupedConvNoOptim):
def test_graph(self):
# define common values first
groups = 3
bottom = np.random.rand(3, 6, 5, 5).astype(aesara.config.floatX)
kern = np.random.rand(9, 2, 3, 3).astype(aesara.config.floatX)
rng = np.random.default_rng(280284)
bottom = rng.random((3, 6, 5, 5)).astype(aesara.config.floatX)
kern = rng.random((9, 2, 3, 3)).astype(aesara.config.floatX)
bottom_sym = tensor4("bottom")
kern_sym = tensor4("kern")
......
......@@ -77,8 +77,10 @@ class TestCorr3D(utt.InferShapeTester):
aesara_corr = aesara.function([input, filters], output, mode=self.mode)
# initialize input and compute result
image_data = np.random.random(N_image_shape).astype(self.dtype)
filter_data = np.random.random(N_filter_shape).astype(self.dtype)
rng = np.random.default_rng(28483)
image_data = rng.random(N_image_shape).astype(self.dtype)
filter_data = rng.random(N_filter_shape).astype(self.dtype)
image_data /= 10
filter_data /= 10
if non_contiguous:
......@@ -337,8 +339,10 @@ class TestCorr3D(utt.InferShapeTester):
def test_dtype_upcast(self):
# Checks dtype upcast for Corr3dMM methods.
rng = np.random.default_rng(28483)
def rand(shape, dtype="float64"):
r = np.asarray(np.random.rand(*shape), dtype=dtype)
r = np.asarray(rng.random(shape), dtype=dtype)
return r * 2 - 1
ops = [corr3d.Corr3dMM, corr3d.Corr3dMMGradWeights, corr3d.Corr3dMMGradInputs]
......@@ -365,8 +369,11 @@ class TestCorr3D(utt.InferShapeTester):
reason="Need cxx for this test",
)
def test_infer_shape_forward(self):
rng = np.random.default_rng(28483)
def rand(*shape):
r = np.asarray(np.random.rand(*shape), dtype="float64")
r = np.asarray(rng.random(shape), dtype="float64")
return r * 2 - 1
corr3dMM = corr3d.Corr3dMM
......@@ -413,8 +420,11 @@ class TestCorr3D(utt.InferShapeTester):
reason="Need cxx for this test",
)
def test_infer_shape_gradW(self):
rng = np.random.default_rng(28483)
def rand(*shape):
r = np.asarray(np.random.rand(*shape), dtype="float64")
r = np.asarray(rng.random(shape), dtype="float64")
return r * 2 - 1
corr3dMM = corr3d.Corr3dMM
......@@ -473,8 +483,11 @@ class TestCorr3D(utt.InferShapeTester):
reason="Need cxx for this test",
)
def test_infer_shape_gradI(self):
rng = np.random.default_rng(28483)
def rand(*shape):
r = np.asarray(np.random.rand(*shape), dtype="float64")
r = np.asarray(rng.random(shape), dtype="float64")
return r * 2 - 1
corr3dMM = corr3d.Corr3dMM
......
......@@ -297,7 +297,7 @@ class TestImages2Neibs(unittest_tools.InferShapeTester):
):
for neib_shape in neib_shapes:
for dtype in self.dtypes:
x = aesara.shared(np.random.randn(*shape).astype(dtype))
x = aesara.shared(np.random.standard_normal(shape).astype(dtype))
extra = (neib_shape[0] // 2, neib_shape[1] // 2)
padded_shape = (
x.shape[0],
......@@ -334,7 +334,7 @@ class TestImages2Neibs(unittest_tools.InferShapeTester):
):
for neib_shape in neib_shapes:
for dtype in self.dtypes:
x = aesara.shared(np.random.randn(*shape).astype(dtype))
x = aesara.shared(np.random.standard_normal(shape).astype(dtype))
extra = (neib_shape[0] - 1, neib_shape[1] - 1)
padded_shape = (
x.shape[0],
......@@ -398,7 +398,7 @@ class TestImages2Neibs(unittest_tools.InferShapeTester):
def test_grad_wrap_centered(self):
# It is not implemented for now. So test that we raise an error.
shape = (2, 3, 6, 6)
images_val = np.random.rand(*shape).astype("float32")
images_val = np.random.random(shape).astype("float32")
def fn(images):
return images2neibs(images, (3, 3), mode="wrap_centered")
......@@ -409,7 +409,8 @@ class TestImages2Neibs(unittest_tools.InferShapeTester):
def test_grad_half(self):
# It is not implemented for now. So test that we raise an error.
shape = (2, 3, 6, 6)
images_val = np.random.rand(*shape).astype("float32")
rng = np.random.default_rng(28483)
images_val = rng.random(shape).astype("float32")
def fn(images):
return images2neibs(images, (3, 3), mode="half")
......@@ -420,7 +421,8 @@ class TestImages2Neibs(unittest_tools.InferShapeTester):
def test_grad_full(self):
# It is not implemented for now. So test that we raise an error.
shape = (2, 3, 6, 6)
images_val = np.random.rand(*shape).astype("float32")
rng = np.random.default_rng(28483)
images_val = rng.random(shape).astype("float32")
def fn(images):
return images2neibs(images, (3, 3), mode="full")
......@@ -430,7 +432,8 @@ class TestImages2Neibs(unittest_tools.InferShapeTester):
def test_grad_valid(self):
shape = (2, 3, 6, 6)
images_val = np.random.rand(*shape).astype("float32")
rng = np.random.default_rng(28483)
images_val = rng.random(shape).astype("float32")
def fn(images):
return images2neibs(images, (2, 2))
......@@ -449,7 +452,8 @@ class TestImages2Neibs(unittest_tools.InferShapeTester):
def test_grad_ignore_border(self):
shape = (2, 3, 5, 5)
images_val = np.random.rand(*shape).astype("float32")
rng = np.random.default_rng(28483)
images_val = rng.random(shape).astype("float32")
def fn(images):
return images2neibs(images, (2, 2), mode="ignore_borders")
......@@ -459,7 +463,8 @@ class TestImages2Neibs(unittest_tools.InferShapeTester):
def test_neibs2images_grad(self):
# say we had images of size (2, 3, 10, 10)
# then we extracted 2x2 neighbors on this, we get (2 * 3 * 5 * 5, 4)
neibs_val = np.random.rand(150, 4)
rng = np.random.default_rng(28483)
neibs_val = rng.random((150, 4))
def fn(neibs):
return neibs2images(neibs, (2, 2), (2, 3, 10, 10))
......@@ -519,7 +524,10 @@ class TestImages2Neibs(unittest_tools.InferShapeTester):
im_val = np.ones((1, 3, 320, 320), dtype=np.float32)
neibs = extractPatches(im_val)
# TODO FIXME: Make this a real test and `assert` something
f(neibs, im_val.shape)
# Wrong number of dimensions
with pytest.raises(ValueError):
f(neibs, (1, 1, 3, 320, 320))
......
import numpy as np
import pytest
import aesara
import aesara.tensor.basic as basic
from aesara import function
from aesara.compile.io import In
from aesara.misc.safe_asarray import _asarray
from aesara.tensor.basic import (
_convert_to_complex64,
_convert_to_complex128,
_convert_to_float32,
_convert_to_float64,
_convert_to_int8,
......@@ -26,29 +28,26 @@ from aesara.tensor.type import (
class TestCasting:
def test_0(self):
for op_fn in [_convert_to_int32, _convert_to_float32, _convert_to_float64]:
for type_fn in bvector, ivector, fvector, dvector:
x = type_fn()
f = function([x], op_fn(x))
xval = _asarray(np.random.rand(10) * 10, dtype=type_fn.dtype)
yval = f(xval)
assert (
str(yval.dtype)
== op_fn.scalar_op.output_types_preference.spec[0].dtype
)
@pytest.mark.parametrize(
"op_fn", [_convert_to_int32, _convert_to_float32, _convert_to_float64]
)
@pytest.mark.parametrize("type_fn", [bvector, ivector, fvector, dvector])
def test_0(self, op_fn, type_fn):
x = type_fn()
f = function([x], op_fn(x))
xval = _asarray(np.random.random(10) * 10, dtype=type_fn.dtype)
yval = f(xval)
assert str(yval.dtype) == op_fn.scalar_op.output_types_preference.spec[0].dtype
def test_illegal(self):
try:
x = zmatrix()
x = zmatrix()
with pytest.raises(TypeError):
function([x], cast(x, "float64"))(np.ones((2, 3), dtype="complex128"))
except TypeError:
return
assert 0
def test_basic(self):
for type1 in [
@pytest.mark.parametrize(
"type1",
[
"uint8",
"uint16",
"uint32",
......@@ -59,24 +58,29 @@ class TestCasting:
"int64",
"float32",
"float64",
]:
x = TensorType(dtype=type1, shape=(False,))()
for type2, converter in zip(
["int8", "int16", "int32", "int64", "float32", "float64"],
[
_convert_to_int8,
_convert_to_int16,
_convert_to_int32,
_convert_to_int64,
_convert_to_float32,
_convert_to_float64,
],
):
y = converter(x)
f = function([In(x, strict=True)], y)
a = np.arange(10, dtype=type1)
b = f(a)
assert np.all(b == np.arange(10, dtype=type2))
],
)
@pytest.mark.parametrize(
"type2, converter",
zip(
["int8", "int16", "int32", "int64", "float32", "float64"],
[
_convert_to_int8,
_convert_to_int16,
_convert_to_int32,
_convert_to_int64,
_convert_to_float32,
_convert_to_float64,
],
),
)
def test_basic(self, type1, type2, converter):
x = TensorType(dtype=type1, shape=(False,))()
y = converter(x)
f = function([In(x, strict=True)], y)
a = np.arange(10, dtype=type1)
b = f(a)
assert np.array_equal(b, np.arange(10, dtype=type2))
def test_convert_to_complex(self):
val64 = np.ones(3, dtype="complex64") + 0.5j
......@@ -85,46 +89,44 @@ class TestCasting:
vec64 = TensorType("complex64", (False,))()
vec128 = TensorType("complex128", (False,))()
f = function([vec64], basic._convert_to_complex128(vec64))
f = function([vec64], _convert_to_complex128(vec64))
# we need to compare with the same type.
assert vec64.type.values_eq_approx(val128, f(val64))
f = function([vec128], basic._convert_to_complex128(vec128))
f = function([vec128], _convert_to_complex128(vec128))
assert vec64.type.values_eq_approx(val128, f(val128))
f = function([vec64], basic._convert_to_complex64(vec64))
f = function([vec64], _convert_to_complex64(vec64))
assert vec64.type.values_eq_approx(val64, f(val64))
f = function([vec128], basic._convert_to_complex64(vec128))
f = function([vec128], _convert_to_complex64(vec128))
assert vec128.type.values_eq_approx(val64, f(val128))
# upcasting to complex128
for t in ["int8", "int16", "int32", "int64", "float32", "float64"]:
a = aesara.shared(np.ones(3, dtype=t))
b = aesara.shared(np.ones(3, dtype="complex128"))
f = function([], basic._convert_to_complex128(a))
f = function([], _convert_to_complex128(a))
assert a.type.values_eq_approx(b.get_value(), f())
# upcasting to complex64
for t in ["int8", "int16", "int32", "int64", "float32"]:
a = aesara.shared(np.ones(3, dtype=t))
b = aesara.shared(np.ones(3, dtype="complex64"))
f = function([], basic._convert_to_complex64(a))
f = function([], _convert_to_complex64(a))
assert a.type.values_eq_approx(b.get_value(), f())
# downcast to complex64
for t in ["float64"]:
a = aesara.shared(np.ones(3, dtype=t))
b = aesara.shared(np.ones(3, dtype="complex64"))
f = function([], basic._convert_to_complex64(a))
f = function([], _convert_to_complex64(a))
assert a.type.values_eq_approx(b.get_value(), f())
def test_bug_complext_10_august_09(self):
v0 = dmatrix()
v1 = basic._convert_to_complex128(v0)
v1 = _convert_to_complex128(v0)
inputs = [v0]
outputs = [v1]
f = function(inputs, outputs)
f = function([v0], v1)
i = np.zeros((2, 2))
assert (f(i) == np.zeros((2, 2))).all()
assert np.array_equal(f(i), i)
......@@ -17,9 +17,9 @@ from aesara.tensor.math import var
from aesara.tensor.type import dtensor3
# this tests other ops to ensure they keep the dimensions of their
# inputs correctly
class TestKeepDims:
r"""This tests other `Op`\s to ensure they keep the dimensions of their inputs correctly."""
def makeKeepDims_local(self, x, y, axis):
if axis is None:
newaxis = list(range(x.ndim))
......@@ -45,18 +45,66 @@ class TestKeepDims:
return DimShuffle(y.type.broadcastable, new_dims)(y)
@pytest.mark.slow
def test_keepdims(self):
@pytest.mark.parametrize(
"axis",
[
0,
1,
2,
[0],
[1],
[2],
None,
[0, 1, 2],
[-1],
[-2],
[-3],
[-1, -2, -3],
[0, -1, -2],
[-2, -3, 2],
],
)
@pytest.mark.parametrize(
"op",
[max_and_argmax],
)
def test_max_and_argmax(self, axis, op):
x = dtensor3()
a = np.random.rand(3, 2, 4)
a = np.random.random((3, 2, 4))
# We don't need to test all opt and C code, as this is tested
# by the ops tests.
mode = Mode(optimizer="fast_compile", linker="py")
# 'max_and_argmax' has two outputs and can be specified with either
# a single or every axis:
for axis in [
f = function(
[x],
[
op(x, axis=axis, keepdims=True)[0],
self.makeKeepDims_local(x, op(x, axis=axis, keepdims=False)[0], axis),
],
mode=mode,
)
ans1, ans2 = f(a)
assert np.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
f = function(
[x],
[
op(x, axis=axis, keepdims=True)[1],
self.makeKeepDims_local(x, op(x, axis=axis, keepdims=False)[1], axis),
],
mode=mode,
)
ans1, ans2 = f(a)
assert np.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
@pytest.mark.parametrize(
"axis",
[
0,
1,
2,
......@@ -69,75 +117,58 @@ class TestKeepDims:
[-2],
[-3],
[-1, -2, -3],
[0, -1, -2],
[-2, -3, 2],
]:
op = max_and_argmax
f = function(
[x],
[
op(x, axis=axis, keepdims=True)[0],
self.makeKeepDims_local(
x, op(x, axis=axis, keepdims=False)[0], axis
),
],
mode=mode,
)
ans1, ans2 = f(a)
assert np.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
f = function(
[x],
[
op(x, axis=axis, keepdims=True)[1],
self.makeKeepDims_local(
x, op(x, axis=axis, keepdims=False)[1], axis
),
],
mode=mode,
)
ans1, ans2 = f(a)
assert np.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
[0, -2, 2],
],
)
@pytest.mark.parametrize(
"op",
[argmax, argmin],
)
def test_single_or_any_axis(self, axis, op):
# the following ops can be specified with either a single axis or every
# axis:
for op in [argmax, argmin]:
for axis in [
0,
1,
2,
[0],
[1],
[2],
None,
[0, 1, 2],
[-1],
[-2],
[-3],
[-1, -2, -3],
[0, -2, 2],
]:
f = function(
[x],
[
op(x, axis=axis, keepdims=True),
self.makeKeepDims_local(
x, op(x, axis=axis, keepdims=False), axis
),
],
mode=mode,
)
ans1, ans2 = f(a)
assert np.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
x = dtensor3()
a = np.random.random((3, 2, 4))
# We don't need to test all opt and C code, as this is tested
# by the ops tests.
mode = Mode(optimizer="fast_compile", linker="py")
# the following ops can be specified with a freely specified axis
# parameter
for op in [
f = function(
[x],
[
op(x, axis=axis, keepdims=True),
self.makeKeepDims_local(x, op(x, axis=axis, keepdims=False), axis),
],
mode=mode,
)
ans1, ans2 = f(a)
assert np.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
@pytest.mark.parametrize(
"axis",
[
0,
1,
2,
[0],
[1],
[2],
None,
[0, 1],
[1, 2],
[0, 1, 2],
[-1],
[-2],
[-3],
[-1, -2],
[-1, -2, -3],
[0, -2, 2],
],
)
@pytest.mark.parametrize(
"op",
[
at_sum,
prod,
mean,
......@@ -147,48 +178,34 @@ class TestKeepDims:
at_any,
at_max,
at_min,
]:
for axis in [
0,
1,
2,
[0],
[1],
[2],
None,
[0, 1],
[1, 2],
[0, 1, 2],
[-1],
[-2],
[-3],
[-1, -2],
[-1, -2, -3],
[0, -2, 2],
]:
f = function(
[x],
[
op(x, axis=axis, keepdims=True),
self.makeKeepDims_local(
x, op(x, axis=axis, keepdims=False), axis
),
],
mode=mode,
)
ans1, ans2 = f(a)
assert np.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
def test_norm(self):
],
)
def test_free_axis(self, axis, op):
x = dtensor3()
a = np.random.rand(3, 2, 4).astype(aesara.config.floatX)
a = np.random.random((3, 2, 4))
# We don't need to test all opt and C code, as this is tested
# by the ops tests.
mode = Mode(optimizer="fast_compile", linker="py")
for axis in [
# the following ops can be specified with a freely specified axis
# parameter
f = function(
[x],
[
op(x, axis=axis, keepdims=True),
self.makeKeepDims_local(x, op(x, axis=axis, keepdims=False), axis),
],
mode=mode,
)
ans1, ans2 = f(a)
assert np.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
@pytest.mark.parametrize(
"axis",
[
0,
1,
2,
......@@ -205,34 +222,40 @@ class TestKeepDims:
[-1, -2],
[-1, -2, -3],
[0, -2, 2],
]:
f = function(
[x],
[
x.norm(L=1, axis=axis, keepdims=True),
self.makeKeepDims_local(
x, x.norm(L=1, axis=axis, keepdims=False), axis
),
],
mode=mode,
)
ans1, ans2 = f(a)
assert np.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
g = function(
[x],
[
x.norm(L=2, axis=axis, keepdims=True),
self.makeKeepDims_local(
x, x.norm(L=2, axis=axis, keepdims=False), axis
),
],
mode=mode,
)
ans1, ans2 = g(a)
assert np.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
],
)
def test_norm(self, axis):
x = dtensor3()
a = np.random.random((3, 2, 4)).astype(aesara.config.floatX)
mode = Mode(optimizer="fast_compile", linker="py")
f = function(
[x],
[
x.norm(L=1, axis=axis, keepdims=True),
self.makeKeepDims_local(
x, x.norm(L=1, axis=axis, keepdims=False), axis
),
],
mode=mode,
)
ans1, ans2 = f(a)
assert np.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
g = function(
[x],
[
x.norm(L=2, axis=axis, keepdims=True),
self.makeKeepDims_local(
x, x.norm(L=2, axis=axis, keepdims=False), axis
),
],
mode=mode,
)
ans1, ans2 = g(a)
assert np.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
......@@ -20,18 +20,19 @@ from aesara.tensor.type import ivector, lscalar, matrix
def gen_data():
rng = np.random.default_rng(249820)
# generate the dataset
train_set = (
np.asarray(np.random.rand(10000, 784), dtype="float32"),
np.asarray(np.random.rand(10000) * 10, dtype="int64"),
np.asarray(rng.random((10000, 784)), dtype="float32"),
np.asarray(rng.random((10000,)) * 10, dtype="int64"),
)
valid_set = (
np.asarray(np.random.rand(10000, 784), dtype="float32"),
np.asarray(np.random.rand(10000) * 10, dtype="int64"),
np.asarray(rng.random((10000, 784)), dtype="float32"),
np.asarray(rng.random((10000,)) * 10, dtype="int64"),
)
test_set = (
np.asarray(np.random.rand(10000, 784), dtype="float32"),
np.asarray(np.random.rand(10000) * 10, dtype="int64"),
np.asarray(rng.random((10000, 784)), dtype="float32"),
np.asarray(rng.random((10000,)) * 10, dtype="int64"),
)
def shared_dataset(data_xy):
......
......@@ -53,7 +53,7 @@ class TestMinMax:
)
def test_optimization_max(self):
data = np.asarray(np.random.rand(2, 3), dtype=config.floatX)
data = np.asarray(np.random.random((2, 3)), dtype=config.floatX)
n = matrix()
for axis in [0, 1, -1]:
......@@ -86,7 +86,7 @@ class TestMinMax:
f(data)
def test_optimization_min(self):
data = np.asarray(np.random.rand(2, 3), dtype=config.floatX)
data = np.asarray(np.random.random((2, 3)), dtype=config.floatX)
n = matrix()
for axis in [0, 1, -1]:
......@@ -209,7 +209,7 @@ def test_local_dimshuffle_subtensor():
topo = f.maker.fgraph.toposort()
assert not all(isinstance(x, DimShuffle) for x in topo)
assert f(np.random.rand(5, 1, 4, 1), 2).shape == (4,)
assert f(np.random.random((5, 1, 4, 1)), 2).shape == (4,)
# Test a corner case that had Aesara return a bug.
x = dtensor4("x")
......
......@@ -69,8 +69,8 @@ if mode_opt == "FAST_COMPILE":
mode_opt = get_mode(mode_opt)
y = create_aesara_param(np.random.randint(0, 4, size=(2,)))
z = create_aesara_param(np.random.randint(0, 4, size=(2, 2)))
y = create_aesara_param(np.random.default_rng().integers(0, 4, size=(2,)))
z = create_aesara_param(np.random.default_rng().integers(0, 4, size=(2, 2)))
@pytest.mark.parametrize(
......
......@@ -9,7 +9,7 @@ from aesara.tensor.utils import hash_from_ndarray, shape_of_variables
def test_hash_from_ndarray():
hashes = []
rng = np.random.rand(5, 5)
x = np.random.random((5, 5))
for data in [
-2,
......@@ -29,12 +29,12 @@ def test_hash_from_ndarray():
np.zeros((5, 5), dtype="uint32"),
np.zeros((5, 5), dtype="int32"),
# Test slice
rng,
rng[1:],
rng[:4],
rng[1:3],
rng[::2],
rng[::-1],
x,
x[1:],
x[:4],
x[1:3],
x[::2],
x[::-1],
]:
data = np.asarray(data)
hashes.append(hash_from_ndarray(data))
......@@ -42,11 +42,11 @@ def test_hash_from_ndarray():
assert len(set(hashes)) == len(hashes)
# test that different type of views and their copy give the same hash
assert hash_from_ndarray(rng[1:]) == hash_from_ndarray(rng[1:].copy())
assert hash_from_ndarray(rng[1:3]) == hash_from_ndarray(rng[1:3].copy())
assert hash_from_ndarray(rng[:4]) == hash_from_ndarray(rng[:4].copy())
assert hash_from_ndarray(rng[::2]) == hash_from_ndarray(rng[::2].copy())
assert hash_from_ndarray(rng[::-1]) == hash_from_ndarray(rng[::-1].copy())
assert hash_from_ndarray(x[1:]) == hash_from_ndarray(x[1:].copy())
assert hash_from_ndarray(x[1:3]) == hash_from_ndarray(x[1:3].copy())
assert hash_from_ndarray(x[:4]) == hash_from_ndarray(x[:4].copy())
assert hash_from_ndarray(x[::2]) == hash_from_ndarray(x[::2].copy())
assert hash_from_ndarray(x[::-1]) == hash_from_ndarray(x[::-1].copy())
class TestShapeOfVariables:
......
......@@ -93,7 +93,7 @@ def test_empty_list_indexing():
def test_copy():
x = dmatrix("x")
data = np.random.rand(5, 5)
data = np.random.random((5, 5))
y = x.copy(name="y")
f = aesara.function([x], y)
assert_equal(f(data), data)
......
import numpy.random
import numpy as np
import aesara
from aesara.tensor import as_tensor_variable
......@@ -6,25 +6,22 @@ from aesara.tensor.xlogx import xlogx, xlogy0
from tests import unittest_tools as utt
class TestXlogX:
def test_basic(self):
x = as_tensor_variable([1, 0])
y = xlogx(x)
f = aesara.function([], [y])
assert numpy.all(f() == numpy.asarray([0, 0.0]))
def test_xlogx():
x = as_tensor_variable([1, 0])
y = xlogx(x)
f = aesara.function([], y)
assert np.array_equal(f(), np.asarray([0, 0.0]))
# class Dummy(object):
# def make_node(self, a):
# return [xlogx(a)[:,2]]
utt.verify_grad(xlogx, [numpy.random.rand(3, 4)])
rng = np.random.default_rng(24982)
utt.verify_grad(xlogx, [rng.random((3, 4))])
class TestXlogY0:
def test_basic(self):
utt.verify_grad(xlogy0, [numpy.random.rand(3, 4), numpy.random.rand(3, 4)])
def test_xlogy0():
x = as_tensor_variable([1, 0])
y = as_tensor_variable([1, 0])
z = xlogy0(x, y)
f = aesara.function([], z)
assert np.array_equal(f(), np.asarray([0, 0.0]))
x = as_tensor_variable([1, 0])
y = as_tensor_variable([1, 0])
z = xlogy0(x, y)
f = aesara.function([], z)
assert numpy.all(f() == numpy.asarray([0, 0.0]))
rng = np.random.default_rng(24982)
utt.verify_grad(xlogy0, [rng.random((3, 4)), rng.random((3, 4))])
......@@ -31,7 +31,7 @@ from aesara.typed_list.type import TypedListType
def rand_ranged_matrix(minimum, maximum, shape):
return np.asarray(
np.random.rand(*shape) * (maximum - minimum) + minimum,
np.random.random(shape) * (maximum - minimum) + minimum,
dtype=aesara.config.floatX,
)
......@@ -42,8 +42,8 @@ def random_lil(shape, dtype, nnz):
huge = 2**30
for k in range(nnz):
# set non-zeros in random locations (row x, col y)
idx = np.random.randint(1, huge + 1, size=2) % shape
value = np.random.rand()
idx = np.random.default_rng().integers(1, huge + 1, size=2) % shape
value = np.random.random()
# if dtype *int*, value will always be zeros!
if dtype in integer_dtypes:
value = int(value * 100)
......@@ -575,10 +575,10 @@ class TestMakeList:
x = tensor3()
y = tensor3()
A = np.cast[aesara.config.floatX](np.random.rand(5, 3))
B = np.cast[aesara.config.floatX](np.random.rand(7, 2))
X = np.cast[aesara.config.floatX](np.random.rand(5, 6, 1))
Y = np.cast[aesara.config.floatX](np.random.rand(1, 9, 3))
A = np.cast[aesara.config.floatX](np.random.random((5, 3)))
B = np.cast[aesara.config.floatX](np.random.random((7, 2)))
X = np.cast[aesara.config.floatX](np.random.random((5, 6, 1)))
Y = np.cast[aesara.config.floatX](np.random.random((1, 9, 3)))
make_list((3.0, 4.0))
c = make_list((a, b))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论