提交 8044a412 authored 作者: kc611's avatar kc611 提交者: Brandon T. Willard

Make tests.unittest_tools use NumPy Generators

上级 e0eea331
......@@ -16,9 +16,6 @@ from tests import unittest_tools as utt
class TestScipy:
def setup_method(self):
utt.seed_rng()
def test_scipy_paper_example1(self):
a = vector("a") # declare variable
b = a + a ** 10 # build expression
......@@ -28,11 +25,11 @@ class TestScipy:
@config.change_flags(floatX="float64")
def test_scipy_paper_example2(self):
""" This just sees if things compile well and if they run """
rng = numpy.random
rng = numpy.random.default_rng(utt.fetch_seed())
x = matrix()
y = vector()
w = shared(rng.randn(100))
w = shared(rng.standard_normal((100)))
b = shared(np.zeros(()))
# Construct Aesara expression graph
......@@ -52,7 +49,7 @@ class TestScipy:
N = 4
feats = 100
D = (rng.randn(N, feats), rng.randint(size=4, low=0, high=2))
D = (rng.standard_normal((N, feats)), rng.integers(size=4, low=0, high=2))
training_steps = 10
for i in range(training_steps):
pred, err = train(D[0], D[1])
......@@ -384,9 +384,9 @@ class TestGrad:
def output(x):
return x * x
rng = np.random.RandomState([2012, 8, 28])
rng = np.random.default_rng([2012, 8, 28])
vx = rng.randn(2)
vx = rng.standard_normal((2))
utt.verify_grad(output, [vx])
......@@ -396,10 +396,10 @@ class TestGrad:
def cost(x, A):
return dot(x, dot(A, x))
rng = np.random.RandomState([2012, 8, 28])
rng = np.random.default_rng([2012, 8, 28])
vx = rng.randn(2)
vA = rng.randn(2, 2)
vx = rng.standard_normal((2))
vA = rng.standard_normal((2, 2))
utt.verify_grad(cost, [vx, vA])
......@@ -409,10 +409,10 @@ class TestGrad:
def output(x, A):
return dot(x * x, A)
rng = np.random.RandomState([2012, 8, 28])
rng = np.random.default_rng([2012, 8, 28])
vx = rng.randn(2)
vA = rng.randn(2, 2)
vx = rng.standard_normal((2))
vA = rng.standard_normal((2, 2))
utt.verify_grad(output, [vx, vA])
......@@ -422,10 +422,10 @@ class TestGrad:
def cost(x, A):
return dot(x * x, dot(A, x))
rng = np.random.RandomState([2012, 8, 28])
rng = np.random.default_rng([2012, 8, 28])
vx = rng.randn(2)
vA = rng.randn(2, 2)
vx = rng.standard_normal((2))
vA = rng.standard_normal((2, 2))
utt.verify_grad(cost, [vx, vA])
......@@ -436,10 +436,10 @@ class TestGrad:
orig_cost = dot(x, dot(A, x))
return grad(orig_cost, x)
rng = np.random.RandomState([2012, 8, 28])
rng = np.random.default_rng([2012, 8, 28])
vx = rng.randn(2)
vA = rng.randn(2, 2)
vx = rng.standard_normal((2))
vA = rng.standard_normal((2, 2))
utt.verify_grad(output, [vx, vA])
......@@ -450,10 +450,10 @@ class TestGrad:
orig_cost = dot(x * x, dot(A, x))
return grad(orig_cost, x)
rng = np.random.RandomState([2012, 8, 28])
rng = np.random.default_rng([2012, 8, 28])
vx = rng.randn(2)
vA = rng.randn(2, 2)
vx = rng.standard_normal((2))
vA = rng.standard_normal((2, 2))
utt.verify_grad(output, [vx, vA])
......@@ -480,14 +480,14 @@ class TestGrad:
m = 5
d = 3
n = 4
rng = np.random.RandomState([2012, 9, 5])
rng = np.random.default_rng([2012, 9, 5])
int_type = imatrix().dtype
float_type = "float64"
X = np.cast[int_type](rng.randn(m, d) * 127.0)
W = np.cast[W.dtype](rng.randn(d, n))
b = np.cast[b.dtype](rng.randn(n))
X = np.cast[int_type](rng.standard_normal((m, d)) * 127.0)
W = np.cast[W.dtype](rng.standard_normal((d, n)))
b = np.cast[b.dtype](rng.standard_normal((n)))
int_result = int_func(X, W, b)
float_result = float_func(np.cast[float_type](X), W, b)
......@@ -511,8 +511,8 @@ class TestGrad:
# we still need to pass in x because it determines the shape of
# the output
f = aesara.function([x], g)
rng = np.random.RandomState([2012, 9, 5])
x = np.cast[x.dtype](rng.randn(3))
rng = np.random.default_rng([2012, 9, 5])
x = np.cast[x.dtype](rng.standard_normal((3)))
g = f(x)
assert np.allclose(g, np.ones(x.shape, dtype=x.dtype))
......@@ -632,8 +632,8 @@ def test_known_grads():
inputs = [coeffs, t, x]
rng = np.random.RandomState([2012, 11, 15])
values = [rng.randn(10), rng.randint(10), rng.randn()]
rng = np.random.default_rng([2012, 11, 15])
values = [rng.standard_normal((10)), rng.integers(10), rng.standard_normal()]
values = [np.cast[ipt.dtype](value) for ipt, value in zip(inputs, values)]
true_grads = grad(cost, inputs, disconnected_inputs="ignore")
......@@ -732,8 +732,8 @@ def test_subgraph_grad():
x = fvector("x")
t = fvector("t")
w1 = aesara.shared(np.random.randn(3, 4))
w2 = aesara.shared(np.random.randn(4, 2))
w1 = aesara.shared(np.random.standard_normal((3, 4)))
w2 = aesara.shared(np.random.standard_normal((4, 2)))
a1 = tanh(dot(x, w1))
a2 = tanh(dot(a1, w2))
cost2 = sqr(a2 - t).sum()
......@@ -745,8 +745,8 @@ def test_subgraph_grad():
grad_ends = [[a1], [x]]
inputs = [t, x]
rng = np.random.RandomState([2012, 11, 15])
values = [rng.randn(2), rng.randn(3)]
rng = np.random.default_rng([2012, 11, 15])
values = [rng.standard_normal((2)), rng.standard_normal((3))]
values = [np.cast[ipt.dtype](value) for ipt, value in zip(inputs, values)]
wrt = [w2, w1]
......@@ -772,8 +772,7 @@ def test_subgraph_grad():
class TestConsiderConstant:
def setup_method(self):
utt.seed_rng()
self.rng = np.random.RandomState(seed=utt.fetch_seed())
self.rng = np.random.default_rng(seed=utt.fetch_seed())
def test_op_removed(self):
x = matrix("x")
......@@ -784,7 +783,7 @@ class TestConsiderConstant:
assert consider_constant_ not in [node.op for node in f.maker.fgraph.toposort()]
def test_grad(self):
a = np.asarray(self.rng.randn(5, 5), dtype=config.floatX)
a = np.asarray(self.rng.standard_normal((5, 5)), dtype=config.floatX)
x = matrix("x")
......@@ -807,8 +806,7 @@ class TestConsiderConstant:
class TestZeroGrad:
def setup_method(self):
utt.seed_rng()
self.rng = np.random.RandomState(seed=utt.fetch_seed())
self.rng = np.random.default_rng(seed=utt.fetch_seed())
def test_op_removed(self):
x = matrix("x")
......@@ -819,7 +817,7 @@ class TestZeroGrad:
assert zero_grad_ not in [node.op for node in f.maker.fgraph.toposort()]
def test_grad(self):
a = np.asarray(self.rng.randn(5, 5), dtype=config.floatX)
a = np.asarray(self.rng.standard_normal((5, 5)), dtype=config.floatX)
x = matrix("x")
......@@ -847,16 +845,15 @@ class TestZeroGrad:
rop = Rop(y, x, v)
f = aesara.function([x, v], rop, on_unused_input="ignore")
a = np.asarray(self.rng.randn(5), dtype=config.floatX)
u = np.asarray(self.rng.randn(5), dtype=config.floatX)
a = np.asarray(self.rng.standard_normal((5)), dtype=config.floatX)
u = np.asarray(self.rng.standard_normal((5)), dtype=config.floatX)
assert np.count_nonzero(f(a, u)) == 0
class TestDisconnectedGrad:
def setup_method(self):
utt.seed_rng()
self.rng = np.random.RandomState(seed=utt.fetch_seed())
self.rng = np.random.default_rng(seed=utt.fetch_seed())
def test_op_removed(self):
x = matrix("x")
......@@ -867,7 +864,7 @@ class TestDisconnectedGrad:
assert disconnected_grad_ not in [node.op for node in f.maker.fgraph.toposort()]
def test_grad(self):
a = np.asarray(self.rng.randn(5, 5), dtype=config.floatX)
a = np.asarray(self.rng.standard_normal((5, 5)), dtype=config.floatX)
x = matrix("x")
......@@ -896,7 +893,7 @@ class TestDisconnectedGrad:
def test_disconnected_paths(self):
# Test that taking gradient going through a disconnected
# path rasises an exception
a = np.asarray(self.rng.randn(5, 5), dtype=config.floatX)
a = np.asarray(self.rng.standard_normal((5, 5)), dtype=config.floatX)
x = matrix("x")
......@@ -955,9 +952,9 @@ def test_grad_scale():
@config.change_flags(compute_test_value="off")
def test_undefined_grad_opt():
# Make sure that undefined grad get removed in optimized graph.
random = MRG_RandomStream(np.random.randint(1, 2147462579))
random = MRG_RandomStream(np.random.default_rng().integers(1, 2147462579))
pvals = aesara.shared(np.random.rand(10, 20).astype(config.floatX))
pvals = aesara.shared(np.random.random((10, 20)).astype(config.floatX))
pvals = pvals / pvals.sum(axis=1)
pvals = zero_grad(pvals)
......@@ -977,7 +974,7 @@ def test_undefined_grad_opt():
def test_jacobian_vector():
x = vector()
y = x * 2
rng = np.random.RandomState(seed=utt.fetch_seed())
rng = np.random.default_rng(seed=utt.fetch_seed())
# test when the jacobian is called with a tensor as wrt
Jx = jacobian(y, x)
......@@ -1018,7 +1015,7 @@ def test_jacobian_vector():
def test_jacobian_matrix():
x = matrix()
y = 2 * x.sum(axis=0)
rng = np.random.RandomState(seed=utt.fetch_seed())
rng = np.random.default_rng(seed=utt.fetch_seed())
ev = np.zeros((10, 10, 10))
for dx in range(10):
ev[dx, :, dx] = 2.0
......@@ -1063,7 +1060,7 @@ def test_jacobian_matrix():
def test_jacobian_scalar():
x = scalar()
y = x * 2
rng = np.random.RandomState(seed=utt.fetch_seed())
rng = np.random.default_rng(seed=utt.fetch_seed())
# test when the jacobian is called with a tensor as wrt
Jx = jacobian(y, x)
......
......@@ -59,10 +59,10 @@ class TestIfelse(utt.OptimizationTestMixin):
c = iscalar("c")
f = function([c, x, y], ifelse(c, x, y), mode=self.mode)
self.assertFunctionContains1(f, self.get_ifelse(1))
rng = np.random.RandomState(utt.fetch_seed())
rng = np.random.default_rng(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
xlen = rng.integers(200)
ylen = rng.integers(200)
vx = np.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = np.asarray(rng.uniform(size=(ylen,)), self.dtype)
......@@ -91,10 +91,10 @@ class TestIfelse(utt.OptimizationTestMixin):
ifnode = [n for n in f.maker.fgraph.toposort() if isinstance(n.op, IfElse)]
assert len(ifnode) == 1
assert not ifnode[0].op.as_view
rng = np.random.RandomState(utt.fetch_seed())
rng = np.random.default_rng(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
xlen = rng.integers(200)
ylen = rng.integers(200)
vx = np.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = np.asarray(rng.uniform(size=(ylen,)), self.dtype)
......@@ -110,10 +110,10 @@ class TestIfelse(utt.OptimizationTestMixin):
c = iscalar("c")
f = function([c, x1, x2, y1, y2], ifelse(c, (x1, x2), (y1, y2)), mode=self.mode)
self.assertFunctionContains1(f, self.get_ifelse(2))
rng = np.random.RandomState(utt.fetch_seed())
rng = np.random.default_rng(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
xlen = rng.integers(200)
ylen = rng.integers(200)
vx1 = np.asarray(rng.uniform(size=(xlen,)) * 3, "int32")
vx2 = np.asarray(rng.uniform(size=(xlen,)), self.dtype)
......@@ -153,10 +153,10 @@ class TestIfelse(utt.OptimizationTestMixin):
# There is only 2 of the 3 ifelse that are moved on the GPU.
# The one that stay on the CPU is for the shape.
self.assertFunctionContains(f, self.get_ifelse(1), min=2, max=3)
rng = np.random.RandomState(utt.fetch_seed())
rng = np.random.default_rng(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
xlen = rng.integers(200)
ylen = rng.integers(200)
vx = np.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = np.asarray(rng.uniform(size=(ylen,)), self.dtype)
......@@ -195,12 +195,12 @@ class TestIfelse(utt.OptimizationTestMixin):
ifnode = [x for x in f.maker.fgraph.toposort() if isinstance(x.op, IfElse)][0]
assert len(ifnode.outputs) == 2
rng = np.random.RandomState(utt.fetch_seed())
rng = np.random.default_rng(utt.fetch_seed())
x1len = rng.randint(200)
x2len = rng.randint(200)
y1len = rng.randint(200)
y2len = rng.randint(200)
x1len = rng.integers(200)
x2len = rng.integers(200)
y1len = rng.integers(200)
y2len = rng.integers(200)
vx1 = np.asarray(rng.uniform(size=(x1len,)), self.dtype)
vx2 = np.asarray(rng.uniform(size=(x2len,)), self.dtype)
......@@ -225,9 +225,9 @@ class TestIfelse(utt.OptimizationTestMixin):
grads = aesara.grad(z[0].sum() + z[1].sum(), [x1, x2, y1, y2])
f = function([c, x1, x2, y1, y2], grads)
rng = np.random.RandomState(utt.fetch_seed())
rng = np.random.default_rng(utt.fetch_seed())
lens = [rng.randint(200) for i in range(4)]
lens = [rng.integers(200) for i in range(4)]
values = [
np.asarray(rng.uniform(size=(l,)), aesara.config.floatX) for l in lens
]
......@@ -286,8 +286,8 @@ class TestIfelse(utt.OptimizationTestMixin):
f(i1, i2, i3, i4)
def test_dtype_mismatch(self):
rng = np.random.RandomState(utt.fetch_seed())
data = rng.rand(5).astype(self.dtype)
rng = np.random.default_rng(utt.fetch_seed())
data = rng.random((5)).astype(self.dtype)
x = self.shared(data)
y = aet.cast(x * 10, "int8")
cond = iscalar("cond")
......@@ -298,8 +298,8 @@ class TestIfelse(utt.OptimizationTestMixin):
ifelse(cond, y, x)
def test_ndim_mismatch(self):
rng = np.random.RandomState(utt.fetch_seed())
data = rng.rand(5).astype(self.dtype)
rng = np.random.default_rng(utt.fetch_seed())
data = rng.random((5)).astype(self.dtype)
x = self.shared(data)
y = col("y", self.dtype)
cond = iscalar("cond")
......@@ -310,8 +310,8 @@ class TestIfelse(utt.OptimizationTestMixin):
ifelse(cond, y, x)
def test_broadcast_mismatch(self):
rng = np.random.RandomState(utt.fetch_seed())
data = rng.rand(5).astype(self.dtype)
rng = np.random.default_rng(utt.fetch_seed())
data = rng.random((5)).astype(self.dtype)
x = self.shared(data)
# print x.broadcastable
y = row("y", self.dtype)
......@@ -328,8 +328,8 @@ class TestIfelse(utt.OptimizationTestMixin):
import aesara.sparse
rng = np.random.RandomState(utt.fetch_seed())
data = rng.rand(2, 3).astype(self.dtype)
rng = np.random.default_rng(utt.fetch_seed())
data = rng.random((2, 3)).astype(self.dtype)
x = self.shared(data)
y = aesara.sparse.matrix("csc", dtype=self.dtype, name="y")
z = aesara.sparse.matrix("csr", dtype=self.dtype, name="z")
......@@ -400,7 +400,7 @@ class TestIfelse(utt.OptimizationTestMixin):
f = function([x1, x2, y1, y2, w1, w2, c], out, allow_input_downcast=True)
assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
rng = np.random.RandomState(utt.fetch_seed())
rng = np.random.default_rng(utt.fetch_seed())
vx1 = rng.uniform()
vx2 = rng.uniform()
vy1 = rng.uniform()
......@@ -426,7 +426,7 @@ class TestIfelse(utt.OptimizationTestMixin):
f = function([x1, y1, y2, c], out, allow_input_downcast=True)
assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
rng = np.random.RandomState(utt.fetch_seed())
rng = np.random.default_rng(utt.fetch_seed())
vx1 = rng.uniform()
vy1 = rng.uniform()
vy2 = rng.uniform()
......@@ -449,7 +449,7 @@ class TestIfelse(utt.OptimizationTestMixin):
f = function([x1, x2, y1, y2, w1, w2, c], out, allow_input_downcast=True)
assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
rng = np.random.RandomState(utt.fetch_seed())
rng = np.random.default_rng(utt.fetch_seed())
vx1 = rng.uniform()
vx2 = rng.uniform()
vy1 = rng.uniform()
......@@ -488,7 +488,7 @@ class TestIfelse(utt.OptimizationTestMixin):
len([x for x in f.maker.fgraph.toposort() if isinstance(x.op, IfElse)]) == 1
)
rng = np.random.RandomState(utt.fetch_seed())
rng = np.random.default_rng(utt.fetch_seed())
vx1 = rng.uniform()
vx2 = rng.uniform()
vy1 = rng.uniform()
......@@ -507,8 +507,8 @@ class TestIfelse(utt.OptimizationTestMixin):
aesara.grad(ifelse(0, x, x), x)
def test_grad_int_value(self):
w = aesara.shared(np.random.rand(10))
b = aesara.shared(np.random.rand())
w = aesara.shared(np.random.random((10)))
b = aesara.shared(np.random.random())
params = [w, b]
x = vector()
......
......@@ -66,16 +66,15 @@ class RopLopChecker:
"""
def setup_method(self):
utt.seed_rng()
# Using vectors make things a lot simpler for generating the same
# computations using scan
self.x = vector("x")
self.v = vector("v")
self.rng = np.random.RandomState(utt.fetch_seed())
self.in_shape = (5 + self.rng.randint(3),)
self.rng = np.random.default_rng(utt.fetch_seed())
self.in_shape = (5 + self.rng.integers(3),)
self.mx = matrix("mx")
self.mv = matrix("mv")
self.mat_in_shape = (5 + self.rng.randint(3), 5 + self.rng.randint(3))
self.mat_in_shape = (5 + self.rng.integers(3), 5 + self.rng.integers(3))
def check_nondiff_rop(self, y):
"""
......@@ -247,7 +246,7 @@ class TestRopLop(RopLopChecker):
@pytest.mark.slow
def test_downsample(self):
rng = np.random.RandomState(utt.fetch_seed())
rng = np.random.default_rng(utt.fetch_seed())
# ws, shp
examples = (
((2,), (16,)),
......@@ -278,8 +277,8 @@ class TestRopLop(RopLopChecker):
for example, ignore_border in itertools.product(examples, [True, False]):
(ws, shp) = example
vx = rng.rand(*shp)
vex = rng.rand(*shp)
vx = rng.random(shp)
vex = rng.random(shp)
x = aesara.shared(vx)
ex = aesara.shared(vex)
......
......@@ -25,8 +25,9 @@ def fetch_seed(pseed=None):
If config.unittest.rseed is set to "random", it will seed the rng with
None, which is equivalent to seeding with a random seed.
Useful for seeding RandomState objects.
Useful for seeding RandomState or Generator objects.
>>> rng = np.random.RandomState(unittest_tools.fetch_seed())
>>> rng = np.random.default_rng(unittest_tools.fetch_seed())
"""
seed = pseed or config.unittests__rseed
......@@ -51,21 +52,21 @@ def fetch_seed(pseed=None):
return seed
def seed_rng(pseed=None):
"""
Seeds numpy's random number generator with the value returned by fetch_seed.
Usage: unittest_tools.seed_rng()
"""
# def seed_rng(pseed=None):
# """
# Seeds numpy's random number generator with the value returned by fetch_seed.
# Usage: unittest_tools.seed_rng()
# """
seed = fetch_seed(pseed)
if pseed and pseed != seed:
print(
"Warning: using seed given by config.unittests__rseed=%i"
"instead of seed %i given as parameter" % (seed, pseed),
file=sys.stderr,
)
np.random.seed(seed)
return seed
# seed = fetch_seed(pseed)
# if pseed and pseed != seed:
# print(
# "Warning: using seed given by config.unittests__rseed=%i"
# "instead of seed %i given as parameter" % (seed, pseed),
# file=sys.stderr,
# )
# np.random.seed(seed)
# return seed
def verify_grad(op, pt, n_tests=2, rng=None, *args, **kwargs):
......@@ -74,8 +75,14 @@ def verify_grad(op, pt, n_tests=2, rng=None, *args, **kwargs):
Takes care of seeding the random number generator if None is given
"""
if rng is None:
seed_rng()
rng = np.random
rng = np.random.default_rng(fetch_seed())
# TODO: Needed to increase tolerance for certain tests when migrating to
# Generators from RandomStates. Caused flaky test failures. Needs further investigation
if "rel_tol" not in kwargs:
kwargs["rel_tol"] = 0.05
if "abs_tol" not in kwargs:
kwargs["abs_tol"] = 0.05
orig_verify_grad(op, pt, n_tests, rng, *args, **kwargs)
......@@ -183,7 +190,6 @@ class OpContractTestMixin:
class InferShapeTester:
def setup_method(self):
seed_rng()
# Take into account any mode that may be defined in a child class
# and it can be None
mode = getattr(self, "mode", None)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论