提交 5611cf71 authored 作者: kc611's avatar kc611 提交者: Brandon T. Willard

Refactor tests.gpuarray to use NumPy Generator

上级 39c11472
...@@ -612,7 +612,6 @@ class BaseTestDnnConv: ...@@ -612,7 +612,6 @@ class BaseTestDnnConv:
return None, None return None, None
def __init__(self): def __init__(self):
utt.seed_rng(1234)
self.dtype_configs = cudnn.get_supported_dtype_configs( self.dtype_configs = cudnn.get_supported_dtype_configs(
check_dtype_config_support check_dtype_config_support
) )
......
...@@ -35,14 +35,13 @@ from tests.tensor.test_basic import ( ...@@ -35,14 +35,13 @@ from tests.tensor.test_basic import (
TestJoinAndSplit, TestJoinAndSplit,
TestReshape, TestReshape,
) )
from tests.tensor.utils import rand, safe_make_node from tests.tensor.utils import random, safe_make_node
pygpu = pytest.importorskip("pygpu") pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray gpuarray = pygpu.gpuarray
utt.seed_rng() rng = np.random.default_rng(seed=utt.fetch_seed())
rng = np.random.RandomState(seed=utt.fetch_seed())
def inplace_func( def inplace_func(
...@@ -79,7 +78,7 @@ def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs): ...@@ -79,7 +78,7 @@ def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs):
def rand_gpuarray(*shape, **kwargs): def rand_gpuarray(*shape, **kwargs):
r = rng.rand(*shape) * 2 - 1 r = rng.random(shape) * 2 - 1
dtype = kwargs.pop("dtype", aesara.config.floatX) dtype = kwargs.pop("dtype", aesara.config.floatX)
cls = kwargs.pop("cls", None) cls = kwargs.pop("cls", None)
if len(kwargs) != 0: if len(kwargs) != 0:
...@@ -219,7 +218,7 @@ def test_transfer_cpu_gpu(): ...@@ -219,7 +218,7 @@ def test_transfer_cpu_gpu():
a = fmatrix("a") a = fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g") g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 4), dtype="float32") av = np.asarray(rng.random((5, 4)), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name)) gv = gpuarray.array(av, context=get_context(test_ctx_name))
f = aesara.function([a], GpuFromHost(test_ctx_name)(a)) f = aesara.function([a], GpuFromHost(test_ctx_name)(a))
...@@ -236,7 +235,7 @@ def test_transfer_gpu_gpu(): ...@@ -236,7 +235,7 @@ def test_transfer_gpu_gpu():
dtype="float32", broadcastable=(False, False), context_name=test_ctx_name dtype="float32", broadcastable=(False, False), context_name=test_ctx_name
)() )()
av = np.asarray(rng.rand(5, 4), dtype="float32") av = np.asarray(rng.random((5, 4)), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name)) gv = gpuarray.array(av, context=get_context(test_ctx_name))
mode = mode_with_gpu.excluding( mode = mode_with_gpu.excluding(
"cut_gpua_host_transfers", "local_cut_gpua_host_gpua" "cut_gpua_host_transfers", "local_cut_gpua_host_gpua"
...@@ -256,7 +255,7 @@ def test_transfer_strided(): ...@@ -256,7 +255,7 @@ def test_transfer_strided():
a = fmatrix("a") a = fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g") g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 8), dtype="float32") av = np.asarray(rng.random((5, 8)), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name)) gv = gpuarray.array(av, context=get_context(test_ctx_name))
av = av[:, ::2] av = av[:, ::2]
...@@ -283,14 +282,14 @@ TestGpuAlloc = makeTester( ...@@ -283,14 +282,14 @@ TestGpuAlloc = makeTester(
op=lambda *args: alloc(*args) + 1, op=lambda *args: alloc(*args) + 1,
gpu_op=GpuAlloc(test_ctx_name), gpu_op=GpuAlloc(test_ctx_name),
cases=dict( cases=dict(
correct01=(rand(), np.int32(7)), correct01=(random(), np.int32(7)),
# just gives a DeepCopyOp with possibly wrong results on the CPU # just gives a DeepCopyOp with possibly wrong results on the CPU
# correct01_bcast=(rand(1), np.int32(7)), # correct01_bcast=(random(1), np.int32(7)),
correct02=(rand(), np.int32(4), np.int32(7)), correct02=(random(), np.int32(4), np.int32(7)),
correct12=(rand(7), np.int32(4), np.int32(7)), correct12=(random(7), np.int32(4), np.int32(7)),
correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)), correct13=(random(7), np.int32(2), np.int32(4), np.int32(7)),
correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)), correct23=(random(4, 7), np.int32(2), np.int32(4), np.int32(7)),
bad_shape12=(rand(7), np.int32(7), np.int32(5)), bad_shape12=(random(7), np.int32(7), np.int32(5)),
), ),
) )
...@@ -357,7 +356,7 @@ def test_shape(): ...@@ -357,7 +356,7 @@ def test_shape():
def test_gpu_contiguous(): def test_gpu_contiguous():
a = fmatrix("a") a = fmatrix("a")
i = iscalar("i") i = iscalar("i")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32") a_val = np.asarray(np.random.random(4, 5), dtype="float32")
# The reshape is needed otherwise we make the subtensor on the CPU # The reshape is needed otherwise we make the subtensor on the CPU
# to transfer less data. # to transfer less data.
f = aesara.function( f = aesara.function(
...@@ -390,7 +389,6 @@ class TestGPUReshape(TestReshape): ...@@ -390,7 +389,6 @@ class TestGPUReshape(TestReshape):
class TestGPUComparison(TestComparison): class TestGPUComparison(TestComparison):
def setup_method(self): def setup_method(self):
utt.seed_rng()
self.mode = mode_with_gpu self.mode = mode_with_gpu
self.shared = gpuarray_shared_constructor self.shared = gpuarray_shared_constructor
self.dtypes = ["float64", "float32"] self.dtypes = ["float64", "float32"]
...@@ -415,8 +413,8 @@ class TestGPUJoinAndSplit(TestJoinAndSplit): ...@@ -415,8 +413,8 @@ class TestGPUJoinAndSplit(TestJoinAndSplit):
def test_gpusplit_opt(self): def test_gpusplit_opt(self):
# Test that we move the node to the GPU # Test that we move the node to the GPU
# Also test float16 computation at the same time. # Also test float16 computation at the same time.
rng = np.random.RandomState(seed=utt.fetch_seed()) rng = np.random.default_rng(seed=utt.fetch_seed())
m = self.shared(rng.rand(4, 6).astype("float16")) m = self.shared(rng.random((4, 6)).astype("float16"))
o = Split(2)(m, 0, [2, 2]) o = Split(2)(m, 0, [2, 2])
assert o[0].dtype == "float16" assert o[0].dtype == "float16"
f = aesara.function([], o, mode=self.mode) f = aesara.function([], o, mode=self.mode)
...@@ -433,9 +431,9 @@ class TestGPUJoinAndSplit(TestJoinAndSplit): ...@@ -433,9 +431,9 @@ class TestGPUJoinAndSplit(TestJoinAndSplit):
def test_gpujoin_gpualloc(): def test_gpujoin_gpualloc():
a = fmatrix("a") a = fmatrix("a")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32") a_val = np.asarray(np.random.random(4, 5), dtype="float32")
b = fmatrix("b") b = fmatrix("b")
b_val = np.asarray(np.random.rand(3, 5), dtype="float32") b_val = np.asarray(np.random.random(3, 5), dtype="float32")
f = aesara.function( f = aesara.function(
[a, b], [a, b],
...@@ -514,9 +512,9 @@ def test_hostfromgpu_shape_i(): ...@@ -514,9 +512,9 @@ def test_hostfromgpu_shape_i():
) )
a = fmatrix("a") a = fmatrix("a")
ca = aesara.gpuarray.type.GpuArrayType("float32", (False, False))() ca = aesara.gpuarray.type.GpuArrayType("float32", (False, False))()
av = np.asarray(np.random.rand(5, 4), dtype="float32") av = np.asarray(np.random.random(5, 4), dtype="float32")
cv = gpuarray.asarray( cv = gpuarray.asarray(
np.random.rand(5, 4), dtype="float32", context=get_context(test_ctx_name) np.random.random(5, 4), dtype="float32", context=get_context(test_ctx_name)
) )
f = aesara.function([a], GpuFromHost(test_ctx_name)(a), mode=m) f = aesara.function([a], GpuFromHost(test_ctx_name)(a), mode=m)
...@@ -583,12 +581,11 @@ def test_gpu_tril_triu(): ...@@ -583,12 +581,11 @@ def test_gpu_tril_triu():
assert result.dtype == np.dtype(dtype) assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
utt.seed_rng() test_rng = np.random.default_rng(seed=utt.fetch_seed())
test_rng = np.random.RandomState(seed=utt.fetch_seed())
for dtype in ["float64", "float32", "float16"]: for dtype in ["float64", "float32", "float16"]:
# try a big one # try a big one
m = np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype) m = np.asarray(test_rng.random((5000, 5000)) * 2 - 1, dtype=dtype)
check_l(m, 0) check_l(m, 0)
check_l(m, 1) check_l(m, 1)
check_l(m, -1) check_l(m, -1)
...@@ -597,7 +594,7 @@ def test_gpu_tril_triu(): ...@@ -597,7 +594,7 @@ def test_gpu_tril_triu():
check_u(m, 1) check_u(m, 1)
check_u(m, -1) check_u(m, -1)
m = np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype) m = np.asarray(test_rng.random((10, 10)) * 2 - 1, dtype=dtype)
check_l(m, 0) check_l(m, 0)
check_l(m, 1) check_l(m, 1)
check_l(m, -1) check_l(m, -1)
...@@ -606,7 +603,7 @@ def test_gpu_tril_triu(): ...@@ -606,7 +603,7 @@ def test_gpu_tril_triu():
check_u(m, 1) check_u(m, 1)
check_u(m, -1) check_u(m, -1)
m = np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype) m = np.asarray(test_rng.random((10, 5)) * 2 - 1, dtype=dtype)
check_l(m, 0) check_l(m, 0)
check_l(m, 1) check_l(m, 1)
check_l(m, -1) check_l(m, -1)
......
...@@ -17,7 +17,6 @@ from tests.tensor.nnet.test_blocksparse import TestBlockSparseGemvAndOuter ...@@ -17,7 +17,6 @@ from tests.tensor.nnet.test_blocksparse import TestBlockSparseGemvAndOuter
class TestBlockSparseGemvAndOuterGPUarray(TestBlockSparseGemvAndOuter): class TestBlockSparseGemvAndOuterGPUarray(TestBlockSparseGemvAndOuter):
def setup_method(self): def setup_method(self):
utt.seed_rng()
self.mode = mode_with_gpu.excluding("constant_folding") self.mode = mode_with_gpu.excluding("constant_folding")
self.gemv_op = gpu_sparse_block_gemv self.gemv_op = gpu_sparse_block_gemv
self.outer_op = gpu_sparse_block_outer self.outer_op = gpu_sparse_block_outer
......
...@@ -170,7 +170,6 @@ def test_dnn_conv_merge(): ...@@ -170,7 +170,6 @@ def test_dnn_conv_merge():
def test_dnn_conv_inplace(): def test_dnn_conv_inplace():
# This test that we have inplace work correctly even when # This test that we have inplace work correctly even when
# GpuAllocEmpty get merged together. # GpuAllocEmpty get merged together.
utt.seed_rng()
img_shp = [2, 5, 6, 8] img_shp = [2, 5, 6, 8]
kern_shp = [3, 5, 5, 6] kern_shp = [3, 5, 5, 6]
...@@ -312,7 +311,6 @@ def test_dnn_conv3d_mixed_dtype(): ...@@ -312,7 +311,6 @@ def test_dnn_conv3d_mixed_dtype():
def test_pooling(): def test_pooling():
utt.seed_rng()
modes = get_dnn_pool_modes() modes = get_dnn_pool_modes()
...@@ -420,7 +418,6 @@ def test_pooling(): ...@@ -420,7 +418,6 @@ def test_pooling():
# This test will be run with different values of 'mode' # This test will be run with different values of 'mode'
# (see next test below). # (see next test below).
def run_pooling_with_tensor_vars(mode): def run_pooling_with_tensor_vars(mode):
utt.seed_rng()
x = tensor4() x = tensor4()
ws = aesara.shared(np.array([2, 2], dtype="int32")) ws = aesara.shared(np.array([2, 2], dtype="int32"))
...@@ -473,7 +470,6 @@ def test_pooling_with_tensor_vars(): ...@@ -473,7 +470,6 @@ def test_pooling_with_tensor_vars():
@pytest.mark.skipif(dnn.version(raises=False) < 3000, reason=dnn.dnn_available.msg) @pytest.mark.skipif(dnn.version(raises=False) < 3000, reason=dnn.dnn_available.msg)
def test_pooling3d(): def test_pooling3d():
# 3d pooling requires version 3 or newer. # 3d pooling requires version 3 or newer.
utt.seed_rng()
# We force the FAST_RUN as we don't want the reference to run in DebugMode. # We force the FAST_RUN as we don't want the reference to run in DebugMode.
mode_without_gpu_ref = aesara.compile.mode.get_mode("FAST_RUN").excluding( mode_without_gpu_ref = aesara.compile.mode.get_mode("FAST_RUN").excluding(
...@@ -582,7 +578,6 @@ def test_pooling3d(): ...@@ -582,7 +578,6 @@ def test_pooling3d():
def test_pooling_opt(): def test_pooling_opt():
utt.seed_rng()
# 2D pooling # 2D pooling
x = matrix() x = matrix()
...@@ -654,7 +649,6 @@ def test_pooling_opt(): ...@@ -654,7 +649,6 @@ def test_pooling_opt():
def test_pooling_opt_arbitrary_dimensions(): def test_pooling_opt_arbitrary_dimensions():
# test if input with an arbitrary number of non-pooling dimensions # test if input with an arbitrary number of non-pooling dimensions
# is correctly reshaped to run on the GPU # is correctly reshaped to run on the GPU
utt.seed_rng()
modes = get_dnn_pool_modes() modes = get_dnn_pool_modes()
...@@ -1081,7 +1075,6 @@ def test_dnn_conv_border_mode(): ...@@ -1081,7 +1075,6 @@ def test_dnn_conv_border_mode():
def test_dnn_conv_alpha_output_merge(): def test_dnn_conv_alpha_output_merge():
utt.seed_rng()
img = tensor4() img = tensor4()
kern = tensor4() kern = tensor4()
...@@ -1151,7 +1144,6 @@ def test_dnn_conv_alpha_output_merge(): ...@@ -1151,7 +1144,6 @@ def test_dnn_conv_alpha_output_merge():
def test_dnn_conv_grad(): def test_dnn_conv_grad():
utt.seed_rng()
b = 1 b = 1
c = 4 c = 4
...@@ -1261,7 +1253,6 @@ def run_conv_small_batched_vs_multicall(inputs_shape, filters_shape, batch_sub): ...@@ -1261,7 +1253,6 @@ def run_conv_small_batched_vs_multicall(inputs_shape, filters_shape, batch_sub):
batch_size = inputs_shape[0] batch_size = inputs_shape[0]
utt.seed_rng()
inputs_val = np.random.random(inputs_shape).astype("float32") inputs_val = np.random.random(inputs_shape).astype("float32")
filters_val = np.random.random(filters_shape).astype("float32") filters_val = np.random.random(filters_shape).astype("float32")
# Scale down the input values to prevent very large absolute errors # Scale down the input values to prevent very large absolute errors
...@@ -1311,8 +1302,6 @@ def test_batched_conv3d_small(): ...@@ -1311,8 +1302,6 @@ def test_batched_conv3d_small():
def test_conv3d_fwd(): def test_conv3d_fwd():
utt.seed_rng()
def run_conv3d_fwd( def run_conv3d_fwd(
inputs_shape, filters_shape, subsample, dilation, border_mode, conv_mode inputs_shape, filters_shape, subsample, dilation, border_mode, conv_mode
): ):
...@@ -1378,8 +1367,6 @@ def test_conv3d_fwd(): ...@@ -1378,8 +1367,6 @@ def test_conv3d_fwd():
def test_conv3d_bwd(): def test_conv3d_bwd():
utt.seed_rng()
def run_conv3d_bwd( def run_conv3d_bwd(
inputs_shape, filters_shape, subsample, dilation, border_mode, conv_mode inputs_shape, filters_shape, subsample, dilation, border_mode, conv_mode
): ):
...@@ -1819,7 +1806,6 @@ def test_dnn_maxandargmax_opt(): ...@@ -1819,7 +1806,6 @@ def test_dnn_maxandargmax_opt():
def test_dnn_batchnorm_train(): def test_dnn_batchnorm_train():
utt.seed_rng()
for mode in ("per-activation", "spatial"): for mode in ("per-activation", "spatial"):
for vartype in ( for vartype in (
...@@ -2022,7 +2008,6 @@ def test_dnn_batchnorm_train(): ...@@ -2022,7 +2008,6 @@ def test_dnn_batchnorm_train():
def test_dnn_batchnorm_train_without_running_averages(): def test_dnn_batchnorm_train_without_running_averages():
# compile and run batch_normalization_train without running averages # compile and run batch_normalization_train without running averages
utt.seed_rng()
x, scale, bias, dy = ( x, scale, bias, dy = (
tensor4("x"), tensor4("x"),
...@@ -2096,7 +2081,6 @@ def test_dnn_batchnorm_train_without_running_averages(): ...@@ -2096,7 +2081,6 @@ def test_dnn_batchnorm_train_without_running_averages():
def test_without_dnn_batchnorm_train_without_running_averages(): def test_without_dnn_batchnorm_train_without_running_averages():
# compile and run batch_normalization_train without running averages # compile and run batch_normalization_train without running averages
# But disable cudnn and make sure it run on the GPU. # But disable cudnn and make sure it run on the GPU.
utt.seed_rng()
x, scale, bias, dy = ( x, scale, bias, dy = (
tensor4("x"), tensor4("x"),
...@@ -2163,7 +2147,6 @@ def test_without_dnn_batchnorm_train_without_running_averages(): ...@@ -2163,7 +2147,6 @@ def test_without_dnn_batchnorm_train_without_running_averages():
@utt.assertFailure_fast @utt.assertFailure_fast
def test_dnn_batchnorm_train_inplace(): def test_dnn_batchnorm_train_inplace():
# test inplace_running_mean and inplace_running_var # test inplace_running_mean and inplace_running_var
utt.seed_rng()
x, scale, bias = tensor4("x"), tensor4("scale"), tensor4("bias") x, scale, bias = tensor4("x"), tensor4("scale"), tensor4("bias")
data_shape = (5, 10, 30, 25) data_shape = (5, 10, 30, 25)
...@@ -2218,7 +2201,6 @@ def test_dnn_batchnorm_train_inplace(): ...@@ -2218,7 +2201,6 @@ def test_dnn_batchnorm_train_inplace():
def test_batchnorm_inference(): def test_batchnorm_inference():
utt.seed_rng()
for mode in ("per-activation", "spatial"): for mode in ("per-activation", "spatial"):
for vartype in ( for vartype in (
...@@ -2344,7 +2326,6 @@ def test_batchnorm_inference(): ...@@ -2344,7 +2326,6 @@ def test_batchnorm_inference():
@utt.assertFailure_fast @utt.assertFailure_fast
def test_batchnorm_inference_inplace(): def test_batchnorm_inference_inplace():
# test inplace # test inplace
utt.seed_rng()
x, scale, bias, mean, var = ( x, scale, bias, mean, var = (
tensor4(n) for n in ("x", "scale", "bias", "mean", "var") tensor4(n) for n in ("x", "scale", "bias", "mean", "var")
...@@ -2460,7 +2441,6 @@ def test_dnn_batchnorm_valid_and_invalid_axes(): ...@@ -2460,7 +2441,6 @@ def test_dnn_batchnorm_valid_and_invalid_axes():
def test_dnn_rnn_gru(): def test_dnn_rnn_gru():
utt.seed_rng()
# test params # test params
input_dim = 32 input_dim = 32
...@@ -2569,7 +2549,6 @@ def test_dnn_rnn_gru(): ...@@ -2569,7 +2549,6 @@ def test_dnn_rnn_gru():
def test_dnn_rnn_gru_bidi(): def test_dnn_rnn_gru_bidi():
utt.seed_rng()
# test params # test params
input_dim = 32 input_dim = 32
...@@ -2630,7 +2609,6 @@ def test_dnn_rnn_gru_bidi(): ...@@ -2630,7 +2609,6 @@ def test_dnn_rnn_gru_bidi():
def test_dnn_rnn_lstm(): def test_dnn_rnn_lstm():
utt.seed_rng()
# test params # test params
input_dim = 32 input_dim = 32
...@@ -2716,7 +2694,6 @@ def test_dnn_rnn_lstm(): ...@@ -2716,7 +2694,6 @@ def test_dnn_rnn_lstm():
def test_dnn_rnn_lstm_grad_c(): def test_dnn_rnn_lstm_grad_c():
utt.seed_rng()
# test params # test params
input_dim = 32 input_dim = 32
...@@ -2819,7 +2796,6 @@ class Cudnn_grouped_conv3d(TestGroupedConv3dNoOptim): ...@@ -2819,7 +2796,6 @@ class Cudnn_grouped_conv3d(TestGroupedConv3dNoOptim):
def test_dnn_spatialtf(): def test_dnn_spatialtf():
utt.seed_rng()
""" """
Spatial Transformer implementation using Aesara from Lasagne Spatial Transformer implementation using Aesara from Lasagne
...@@ -3023,7 +2999,6 @@ def test_dnn_spatialtf_invalid_shapes(): ...@@ -3023,7 +2999,6 @@ def test_dnn_spatialtf_invalid_shapes():
def test_dnn_spatialtf_grad(): def test_dnn_spatialtf_grad():
utt.seed_rng()
inputs = tensor4("inputs") inputs = tensor4("inputs")
theta = tensor3("theta") theta = tensor3("theta")
...@@ -3097,7 +3072,7 @@ class TestDnnConv2DRuntimeAlgorithms: ...@@ -3097,7 +3072,7 @@ class TestDnnConv2DRuntimeAlgorithms:
] ]
def __init__(self): def __init__(self):
utt.seed_rng()
self.runtime_algorithms = ( self.runtime_algorithms = (
"time_once", "time_once",
"guess_once", "guess_once",
...@@ -3286,7 +3261,7 @@ class TestDnnConv3DRuntimeAlgorithms(TestDnnConv2DRuntimeAlgorithms): ...@@ -3286,7 +3261,7 @@ class TestDnnConv3DRuntimeAlgorithms(TestDnnConv2DRuntimeAlgorithms):
def test_conv_guess_once_with_dtypes(): def test_conv_guess_once_with_dtypes():
# This test checks that runtime conv algorithm selection does not raise any exception # This test checks that runtime conv algorithm selection does not raise any exception
# when consecutive functions with different dtypes and precisions are executed. # when consecutive functions with different dtypes and precisions are executed.
utt.seed_rng()
inputs_shape = (2, 3, 5, 5) inputs_shape = (2, 3, 5, 5)
filters_shape = (2, 3, 40, 4) filters_shape = (2, 3, 40, 4)
border_mode = "full" border_mode = "full"
......
...@@ -27,7 +27,7 @@ from aesara.tensor.slinalg import Cholesky, cholesky, imported_scipy ...@@ -27,7 +27,7 @@ from aesara.tensor.slinalg import Cholesky, cholesky, imported_scipy
from aesara.tensor.type import fmatrix, matrix, tensor3, vector from aesara.tensor.type import fmatrix, matrix, tensor3, vector
from tests import unittest_tools as utt from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu from tests.gpuarray.config import mode_with_gpu, mode_without_gpu
from tests.gpuarray.test_basic_ops import rand from tests.gpuarray.test_basic_ops import random
@pytest.mark.skipif( @pytest.mark.skipif(
...@@ -149,7 +149,7 @@ class TestCusolver: ...@@ -149,7 +149,7 @@ class TestCusolver:
utt.verify_grad(solve_op, [A_val, b_val], 3, rng, eps=eps) utt.verify_grad(solve_op, [A_val, b_val], 3, rng, eps=eps)
def test_solve_grad(self): def test_solve_grad(self):
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
structures = ["general", "lower_triangular", "upper_triangular"] structures = ["general", "lower_triangular", "upper_triangular"]
for A_structure in structures: for A_structure in structures:
lower = A_structure == "lower_triangular" lower = A_structure == "lower_triangular"
...@@ -166,9 +166,6 @@ class TestCusolver: ...@@ -166,9 +166,6 @@ class TestCusolver:
reason="Optional package scikits.cuda.cusolver not available", reason="Optional package scikits.cuda.cusolver not available",
) )
class TestGpuCholesky: class TestGpuCholesky:
def setup_method(self):
utt.seed_rng()
def get_gpu_cholesky_func(self, lower=True, inplace=False): def get_gpu_cholesky_func(self, lower=True, inplace=False):
# Helper function to compile function from GPU Cholesky op. # Helper function to compile function from GPU Cholesky op.
A = matrix("A", dtype="float32") A = matrix("A", dtype="float32")
...@@ -267,9 +264,6 @@ class TestGpuCholesky: ...@@ -267,9 +264,6 @@ class TestGpuCholesky:
reason="Optional package scikits.cuda.cusolver not available", reason="Optional package scikits.cuda.cusolver not available",
) )
class TestGpuCholesky64: class TestGpuCholesky64:
def setup_method(self):
utt.seed_rng()
def get_gpu_cholesky_func(self, lower=True, inplace=False): def get_gpu_cholesky_func(self, lower=True, inplace=False):
# Helper function to compile function from GPU Cholesky op. # Helper function to compile function from GPU Cholesky op.
A = matrix("A", dtype="float64") A = matrix("A", dtype="float64")
...@@ -388,18 +382,18 @@ class TestMagma: ...@@ -388,18 +382,18 @@ class TestMagma:
fn = aesara.function([A], gpu_matrix_inverse(A), mode=mode_with_gpu) fn = aesara.function([A], gpu_matrix_inverse(A), mode=mode_with_gpu)
N = 1000 N = 1000
test_rng = np.random.RandomState(seed=1) test_rng = np.random.default_rng(seed=1)
# Copied from tests.tensor.utils.rand. # Copied from tests.tensor.utils.random.
A_val = test_rng.rand(N, N).astype("float32") * 2 - 1 A_val = test_rng.random((N, N)).astype("float32") * 2 - 1
A_val_inv = fn(A_val) A_val_inv = fn(A_val)
utt.assert_allclose(np.eye(N), np.dot(A_val_inv, A_val), atol=1e-2) utt.assert_allclose(np.eye(N), np.dot(A_val_inv, A_val), atol=1e-2)
@utt.assertFailure_fast @utt.assertFailure_fast
def test_gpu_matrix_inverse_inplace(self): def test_gpu_matrix_inverse_inplace(self):
N = 1000 N = 1000
test_rng = np.random.RandomState(seed=1) test_rng = np.random.default_rng(seed=1)
A_val_gpu = gpuarray_shared_constructor( A_val_gpu = gpuarray_shared_constructor(
test_rng.rand(N, N).astype("float32") * 2 - 1 test_rng.random((N, N)).astype("float32") * 2 - 1
) )
A_val_copy = A_val_gpu.get_value() A_val_copy = A_val_gpu.get_value()
A_val_gpu_inv = GpuMagmaMatrixInverse()(A_val_gpu) A_val_gpu_inv = GpuMagmaMatrixInverse()(A_val_gpu)
...@@ -448,7 +442,7 @@ class TestMagma: ...@@ -448,7 +442,7 @@ class TestMagma:
utt.assert_allclose(np.dot(np.dot(U, S_m), VT), A, rtol=rtol, atol=atol) utt.assert_allclose(np.dot(np.dot(U, S_m), VT), A, rtol=rtol, atol=atol)
def test_gpu_svd_wide(self): def test_gpu_svd_wide(self):
A = rand(100, 50).astype("float32") A = random(100, 50).astype("float32")
M, N = A.shape M, N = A.shape
U, S, VT = self.run_gpu_svd(A) U, S, VT = self.run_gpu_svd(A)
...@@ -463,7 +457,7 @@ class TestMagma: ...@@ -463,7 +457,7 @@ class TestMagma:
self.assert_column_orthonormal(VT.T) self.assert_column_orthonormal(VT.T)
def test_gpu_svd_tall(self): def test_gpu_svd_tall(self):
A = rand(50, 100).astype("float32") A = random(50, 100).astype("float32")
M, N = A.shape M, N = A.shape
U, S, VT = self.run_gpu_svd(A) U, S, VT = self.run_gpu_svd(A)
...@@ -484,10 +478,10 @@ class TestMagma: ...@@ -484,10 +478,10 @@ class TestMagma:
) )
f_gpu = aesara.function([A], gpu_svd(A, compute_uv=False), mode=mode_with_gpu) f_gpu = aesara.function([A], gpu_svd(A, compute_uv=False), mode=mode_with_gpu)
A_val = rand(50, 100).astype("float32") A_val = random(50, 100).astype("float32")
utt.assert_allclose(f_cpu(A_val), f_gpu(A_val)) utt.assert_allclose(f_cpu(A_val), f_gpu(A_val))
A_val = rand(100, 50).astype("float32") A_val = random(100, 50).astype("float32")
utt.assert_allclose(f_cpu(A_val), f_gpu(A_val)) utt.assert_allclose(f_cpu(A_val), f_gpu(A_val))
def run_gpu_cholesky(self, A_val, lower=True): def run_gpu_cholesky(self, A_val, lower=True):
...@@ -500,7 +494,7 @@ class TestMagma: ...@@ -500,7 +494,7 @@ class TestMagma:
return f(A_val) return f(A_val)
def rand_symmetric(self, N): def rand_symmetric(self, N):
A = rand(N, N).astype("float32") A = random(N, N).astype("float32")
# ensure that eigenvalues are not too small which sometimes results in # ensure that eigenvalues are not too small which sometimes results in
# magma cholesky failure due to gpu limited numerical precision # magma cholesky failure due to gpu limited numerical precision
D, W = np.linalg.eigh(A) D, W = np.linalg.eigh(A)
...@@ -566,7 +560,7 @@ class TestMagma: ...@@ -566,7 +560,7 @@ class TestMagma:
return fn(A_val) return fn(A_val)
def check_gpu_qr(self, M, N, complete=True, rtol=None, atol=None): def check_gpu_qr(self, M, N, complete=True, rtol=None, atol=None):
A = rand(M, N).astype("float32") A = random(M, N).astype("float32")
if complete: if complete:
Q_gpu, R_gpu = self.run_gpu_qr(A, complete=complete) Q_gpu, R_gpu = self.run_gpu_qr(A, complete=complete)
else: else:
...@@ -611,7 +605,7 @@ class TestMagma: ...@@ -611,7 +605,7 @@ class TestMagma:
return fn(A_val) return fn(A_val)
def check_gpu_eigh(self, N, UPLO="L", compute_v=True, rtol=None, atol=None): def check_gpu_eigh(self, N, UPLO="L", compute_v=True, rtol=None, atol=None):
A = rand(N, N).astype("float32") A = random(N, N).astype("float32")
A = np.dot(A.T, A) A = np.dot(A.T, A)
d_np, v_np = np.linalg.eigh(A, UPLO=UPLO) d_np, v_np = np.linalg.eigh(A, UPLO=UPLO)
if compute_v: if compute_v:
...@@ -643,8 +637,8 @@ class TestMagma: ...@@ -643,8 +637,8 @@ class TestMagma:
# mostly copied from aesara/tensor/tests/test_slinalg.py # mostly copied from aesara/tensor/tests/test_slinalg.py
def test_cholesky_grad(): def test_cholesky_grad():
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
r = rng.randn(5, 5).astype(config.floatX) r = rng.standard_normal((5, 5)).astype(config.floatX)
# The dots are inside the graph since Cholesky needs separable matrices # The dots are inside the graph since Cholesky needs separable matrices
...@@ -681,9 +675,9 @@ def test_lower_triangular_and_cholesky_grad(): ...@@ -681,9 +675,9 @@ def test_lower_triangular_and_cholesky_grad():
N = 100 N = 100
else: else:
N = 5 N = 5
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
r = rng.randn(N, N).astype(config.floatX) r = rng.standard_normal((N, N)).astype(config.floatX)
y = rng.rand(N, 1).astype(config.floatX) y = rng.random((N, 1)).astype(config.floatX)
def f(r, y): def f(r, y):
PD = r.dot(r.T) PD = r.dot(r.T)
......
...@@ -42,9 +42,6 @@ def test_GpuCrossentropySoftmaxArgmax1HotWithBias(): ...@@ -42,9 +42,6 @@ def test_GpuCrossentropySoftmaxArgmax1HotWithBias():
# case. # case.
dot_result = fmatrix("dot_result") dot_result = fmatrix("dot_result")
# Seed numpy.random with config.unittests__rseed
utt.seed_rng()
xx = np.asarray(np.random.rand(batch_size, n_in), dtype=np.float32) xx = np.asarray(np.random.rand(batch_size, n_in), dtype=np.float32)
yy = np.ones((batch_size,), dtype="int32") yy = np.ones((batch_size,), dtype="int32")
b_values = np.zeros((n_out,), dtype="float32") b_values = np.zeros((n_out,), dtype="float32")
...@@ -97,9 +94,6 @@ def test_GpuCrossentropySoftmax1HotWithBiasDx(): ...@@ -97,9 +94,6 @@ def test_GpuCrossentropySoftmax1HotWithBiasDx():
if not isinstance(mode_with_gpu, aesara.compile.debugmode.DebugMode): if not isinstance(mode_with_gpu, aesara.compile.debugmode.DebugMode):
n_out = 4099 n_out = 4099
# Seed numpy.random with config.unittests__rseed
utt.seed_rng()
softmax_output_value = np.random.rand(batch_size, n_out).astype("float32") softmax_output_value = np.random.rand(batch_size, n_out).astype("float32")
dnll_value = np.asarray(np.random.rand(batch_size), dtype="float32") dnll_value = np.asarray(np.random.rand(batch_size), dtype="float32")
y_idx_value = np.random.randint(low=0, high=5, size=batch_size) y_idx_value = np.random.randint(low=0, high=5, size=batch_size)
......
...@@ -595,14 +595,15 @@ def test_many_arg_elemwise(): ...@@ -595,14 +595,15 @@ def test_many_arg_elemwise():
# This test checks whether the + and * elemwise ops can handle # This test checks whether the + and * elemwise ops can handle
# extremely large numbers of arguments on gpu. # extremely large numbers of arguments on gpu.
rng = np.random.RandomState([1, 2, 3]) rng = np.random.default_rng([1, 2, 3])
nb_of_inputs_overflows = [] nb_of_inputs_overflows = []
for num_args in [64]: for num_args in [64]:
for op_to_test in [aesara.tensor.add, aesara.tensor.mul]: for op_to_test in [aesara.tensor.add, aesara.tensor.mul]:
for nb_dim in [2, 8]: for nb_dim in [2, 8]:
shapes = [rng.randint(1, 5) for i in range(nb_dim)] shapes = [rng.integers(1, 5) for i in range(nb_dim)]
args = [ args = [
np.cast["float32"](rng.randn(*shapes)) for arg in range(0, num_args) np.cast["float32"](rng.standard_normal(shapes))
for arg in range(0, num_args)
] ]
symb_args = [ symb_args = [
...@@ -645,8 +646,8 @@ def test_not_useless_scalar_gpuelemwise(): ...@@ -645,8 +646,8 @@ def test_not_useless_scalar_gpuelemwise():
with config.change_flags(warn_float64="ignore"): with config.change_flags(warn_float64="ignore"):
X = fmatrix() X = fmatrix()
x = np.random.randn(32, 32).astype(np.float32) x = np.random.standard_normal((32, 32)).astype(np.float32)
m1 = aesara.shared(np.random.randn(32, 32).astype(np.float32)) m1 = aesara.shared(np.random.standard_normal((32, 32)).astype(np.float32))
loss = (X - dot(X, m1)).norm(L=2) loss = (X - dot(X, m1)).norm(L=2)
lr = aesara.shared(np.asarray(0.001, dtype=np.float32)) lr = aesara.shared(np.asarray(0.001, dtype=np.float32))
grad = aesara.grad(loss, m1) grad = aesara.grad(loss, m1)
...@@ -672,7 +673,7 @@ def test_local_lift_abstractconv_gpu_shape(): ...@@ -672,7 +673,7 @@ def test_local_lift_abstractconv_gpu_shape():
def test_local_assert_no_cpu_op(): def test_local_assert_no_cpu_op():
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
m = rng.uniform(-1, 1, (10, 10)).astype("float32") m = rng.uniform(-1, 1, (10, 10)).astype("float32")
ms = gpuarray_shared_constructor(m, name="m_shared") ms = gpuarray_shared_constructor(m, name="m_shared")
out = tanh(ms).dot(ms.T) out = tanh(ms).dot(ms.T)
...@@ -806,17 +807,17 @@ def test_local_gpua_advanced_incsubtensor(): ...@@ -806,17 +807,17 @@ def test_local_gpua_advanced_incsubtensor():
def test_batched_dot_lifter(): def test_batched_dot_lifter():
# The CPU Op accepts 2D and 3D inputs, as well as mixed dtypes. # The CPU Op accepts 2D and 3D inputs, as well as mixed dtypes.
# Make sure the lifter adds the appropriate dimshuffles and casts # Make sure the lifter adds the appropriate dimshuffles and casts
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
def randX(*args): def randX(*args):
return rng.rand(*args).astype(config.floatX) return rng.random(args).astype(config.floatX)
cases = [ cases = [
(randX(3, 5, 7), randX(3, 7)), (randX(3, 5, 7), randX(3, 7)),
(randX(3, 5), randX(3, 5, 7)), (randX(3, 5), randX(3, 5, 7)),
(randX(3, 5), randX(3, 5)), (randX(3, 5), randX(3, 5)),
(rng.rand(3, 5, 7).astype("float32"), randX(3, 7, 9)), (rng.random((3, 5, 7)).astype("float32"), randX(3, 7, 9)),
(rng.rand(3, 5, 7).astype("float64"), randX(3, 7, 9)), (rng.random((3, 5, 7)).astype("float64"), randX(3, 7, 9)),
] ]
for x_val, y_val in cases: for x_val, y_val in cases:
x = TensorType(broadcastable=[s == 1 for s in x_val.shape], dtype=x_val.dtype)( x = TensorType(broadcastable=[s == 1 for s in x_val.shape], dtype=x_val.dtype)(
...@@ -832,7 +833,7 @@ def test_batched_dot_lifter(): ...@@ -832,7 +833,7 @@ def test_batched_dot_lifter():
def test_crossentropycategorical1hot_lifter(): def test_crossentropycategorical1hot_lifter():
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
x = matrix() x = matrix()
y = lvector() y = lvector()
z = aesara.tensor.nnet.crossentropy_categorical_1hot(x, y) z = aesara.tensor.nnet.crossentropy_categorical_1hot(x, y)
...@@ -850,7 +851,7 @@ def test_crossentropycategorical1hot_lifter(): ...@@ -850,7 +851,7 @@ def test_crossentropycategorical1hot_lifter():
) )
f( f(
rng.uniform(0.1, 0.9, (13, 5)).astype(config.floatX), rng.uniform(0.1, 0.9, (13, 5)).astype(config.floatX),
rng.randint(5, size=(13,)), rng.integers(5, size=(13,)),
) )
......
...@@ -21,13 +21,13 @@ from aesara.tensor.signal.pool import ( ...@@ -21,13 +21,13 @@ from aesara.tensor.signal.pool import (
) )
from tests import unittest_tools as utt from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu from tests.gpuarray.config import mode_with_gpu, mode_without_gpu
from tests.gpuarray.test_basic_ops import rand from tests.gpuarray.test_basic_ops import random
class TestPool: class TestPool:
def test_pool_py_interface(self): def test_pool_py_interface(self):
shp = (2, 2, 2, 2) shp = (2, 2, 2, 2)
inp = aesara.shared(rand(*shp), "a") inp = aesara.shared(random(*shp), "a")
inp = aet.as_tensor_variable(inp) inp = aet.as_tensor_variable(inp)
with pytest.raises(ValueError): with pytest.raises(ValueError):
# test when pad >= ws # test when pad >= ws
...@@ -43,7 +43,7 @@ class TestPool: ...@@ -43,7 +43,7 @@ class TestPool:
gpu_mode.check_py_code = False gpu_mode.check_py_code = False
shp = (2, 2, 2, 2) shp = (2, 2, 2, 2)
inp = aesara.shared(rand(*shp), "a") inp = aesara.shared(random(*shp), "a")
inp = aet.as_tensor_variable(inp) inp = aet.as_tensor_variable(inp)
with pytest.raises(ValueError): with pytest.raises(ValueError):
# test when ignore_border and pad >= 0 # test when ignore_border and pad >= 0
...@@ -57,7 +57,7 @@ class TestPool: ...@@ -57,7 +57,7 @@ class TestPool:
gpu_mode.check_py_code = False gpu_mode.check_py_code = False
shp = (2, 2, 2, 2) shp = (2, 2, 2, 2)
inp = aesara.shared(rand(*shp), "a") inp = aesara.shared(random(*shp), "a")
inp = aet.as_tensor_variable(inp) inp = aet.as_tensor_variable(inp)
ds_op = GpuPool(ignore_border=False, mode="average_exc_pad", ndim=2) ds_op = GpuPool(ignore_border=False, mode="average_exc_pad", ndim=2)
pad = aet.as_tensor_variable([0, 0]) pad = aet.as_tensor_variable([0, 0])
...@@ -102,7 +102,7 @@ def test_pool2d(): ...@@ -102,7 +102,7 @@ def test_pool2d():
(3, 2, 6, 6, 6, 5, 7), (3, 2, 6, 6, 6, 5, 7),
] ]
np.random.RandomState(utt.fetch_seed()).shuffle(shps) np.random.default_rng(utt.fetch_seed()).shuffle(shps)
test_ws = (2, 2), (3, 2), (1, 1) test_ws = (2, 2), (3, 2), (1, 1)
test_st = (2, 2), (3, 2), (1, 1) test_st = (2, 2), (3, 2), (1, 1)
test_mode = ["max", "sum", "average_inc_pad", "average_exc_pad"] test_mode = ["max", "sum", "average_inc_pad", "average_exc_pad"]
...@@ -124,7 +124,7 @@ def test_pool2d(): ...@@ -124,7 +124,7 @@ def test_pool2d():
# print('test_pool2d', shp, ws, st, pad, mode, ignore_border) # print('test_pool2d', shp, ws, st, pad, mode, ignore_border)
ds_op = Pool(ndim=len(ws), mode=mode, ignore_border=ignore_border) ds_op = Pool(ndim=len(ws), mode=mode, ignore_border=ignore_border)
a = aesara.shared(rand(*shp), "a") a = aesara.shared(random(*shp), "a")
a_pooled = ds_op(aet.as_tensor_variable(a), ws, st, pad) a_pooled = ds_op(aet.as_tensor_variable(a), ws, st, pad)
f = aesara.function([], a_pooled, mode=gpu_mode) f = aesara.function([], a_pooled, mode=gpu_mode)
...@@ -163,7 +163,7 @@ def test_pool2d(): ...@@ -163,7 +163,7 @@ def test_pool2d():
if mode != "max": if mode != "max":
continue continue
ea = aesara.shared(rand(*shp), "ea") ea = aesara.shared(random(*shp), "ea")
gr = aesara.function([], Rop(a_pooled, a, ea), mode=gpu_mode) gr = aesara.function([], Rop(a_pooled, a, ea), mode=gpu_mode)
gr2 = aesara.function([], Rop(a_pooled, a, ea), mode=ref_mode) gr2 = aesara.function([], Rop(a_pooled, a, ea), mode=ref_mode)
...@@ -226,7 +226,7 @@ def test_pool3d(): ...@@ -226,7 +226,7 @@ def test_pool3d():
(3, 2, 6, 6, 6, 5, 7), (3, 2, 6, 6, 6, 5, 7),
] ]
np.random.RandomState(utt.fetch_seed()).shuffle(shps) np.random.default_rng(utt.fetch_seed()).shuffle(shps)
test_ws = (2, 2, 2), (3, 2, 3), (1, 1, 1) test_ws = (2, 2, 2), (3, 2, 3), (1, 1, 1)
test_st = (2, 2, 2), (2, 3, 2), (1, 1, 1) test_st = (2, 2, 2), (2, 3, 2), (1, 1, 1)
test_mode = ["max", "sum", "average_inc_pad", "average_exc_pad"] test_mode = ["max", "sum", "average_inc_pad", "average_exc_pad"]
...@@ -250,7 +250,7 @@ def test_pool3d(): ...@@ -250,7 +250,7 @@ def test_pool3d():
# print('test_pool3d', shp, ws, st, pad, mode, ignore_border) # print('test_pool3d', shp, ws, st, pad, mode, ignore_border)
ds_op = Pool(ndim=len(ws), mode=mode, ignore_border=ignore_border) ds_op = Pool(ndim=len(ws), mode=mode, ignore_border=ignore_border)
a = aesara.shared(rand(*shp), "a") a = aesara.shared(random(*shp), "a")
a_pooled = ds_op(aet.as_tensor_variable(a), ws, st, pad) a_pooled = ds_op(aet.as_tensor_variable(a), ws, st, pad)
f = aesara.function([], a_pooled, mode=gpu_mode) f = aesara.function([], a_pooled, mode=gpu_mode)
...@@ -289,7 +289,7 @@ def test_pool3d(): ...@@ -289,7 +289,7 @@ def test_pool3d():
if mode != "max": if mode != "max":
continue continue
ea = aesara.shared(rand(*shp), "ea") ea = aesara.shared(random(*shp), "ea")
gr = aesara.function([], Rop(a_pooled, a, ea), mode=gpu_mode) gr = aesara.function([], Rop(a_pooled, a, ea), mode=gpu_mode)
gr2 = aesara.function([], Rop(a_pooled, a, ea), mode=ref_mode) gr2 = aesara.function([], Rop(a_pooled, a, ea), mode=ref_mode)
......
...@@ -15,9 +15,6 @@ from tests.sandbox.test_rng_mrg import java_samples, rng_mrg_overflow ...@@ -15,9 +15,6 @@ from tests.sandbox.test_rng_mrg import java_samples, rng_mrg_overflow
from tests.sandbox.test_rng_mrg import test_f16_nonzero as cpu_f16_nonzero from tests.sandbox.test_rng_mrg import test_f16_nonzero as cpu_f16_nonzero
utt.seed_rng()
def test_consistency_GPUA_serial(): def test_consistency_GPUA_serial():
# Verify that the random numbers generated by GPUA_mrg_uniform, serially, # Verify that the random numbers generated by GPUA_mrg_uniform, serially,
# are the same as the reference (Java) implementation by L'Ecuyer et al. # are the same as the reference (Java) implementation by L'Ecuyer et al.
......
...@@ -32,9 +32,6 @@ else: ...@@ -32,9 +32,6 @@ else:
class TestScan: class TestScan:
def setup_method(self):
utt.seed_rng()
def test_one_sequence_one_output_weights_gpu1(self): def test_one_sequence_one_output_weights_gpu1(self):
def f_rnn(u_t, x_tm1, W_in, W): def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W return u_t * W_in + x_tm1 * W
...@@ -65,7 +62,7 @@ class TestScan: ...@@ -65,7 +62,7 @@ class TestScan:
mode=mode, mode=mode,
) )
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0) v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform() v_x0 = rng.uniform()
W = rng.uniform() W = rng.uniform()
...@@ -134,7 +131,7 @@ class TestScan: ...@@ -134,7 +131,7 @@ class TestScan:
) )
# get random initial values # get random initial values
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0) v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform() v_x0 = rng.uniform()
W = rng.uniform() W = rng.uniform()
...@@ -193,7 +190,7 @@ class TestScan: ...@@ -193,7 +190,7 @@ class TestScan:
) )
# get random initial values # get random initial values
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0) v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform() v_x0 = rng.uniform()
W = rng.uniform() W = rng.uniform()
...@@ -225,7 +222,7 @@ class TestScan: ...@@ -225,7 +222,7 @@ class TestScan:
assert not any([isinstance(node.op, GpuFromHost) for node in scan_node_topo]) assert not any([isinstance(node.op, GpuFromHost) for node in scan_node_topo])
def test_gpu4_gibbs_chain(self): def test_gpu4_gibbs_chain(self):
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
v_vsample = np.array( v_vsample = np.array(
rng.binomial( rng.binomial(
1, 1,
...@@ -313,7 +310,7 @@ class ScanGpuTests: ...@@ -313,7 +310,7 @@ class ScanGpuTests:
) )
# get random initial values # get random initial values
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0) v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform() v_x0 = rng.uniform()
W = rng.uniform() W = rng.uniform()
...@@ -402,7 +399,7 @@ class ScanGpuTests: ...@@ -402,7 +399,7 @@ class ScanGpuTests:
) )
# get random initial values # get random initial values
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0) v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform() v_x0 = rng.uniform()
W = rng.uniform() W = rng.uniform()
...@@ -481,7 +478,7 @@ class ScanGpuTests: ...@@ -481,7 +478,7 @@ class ScanGpuTests:
) )
# get random initial values # get random initial values
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0) v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform() v_x0 = rng.uniform()
W = rng.uniform() W = rng.uniform()
...@@ -507,7 +504,7 @@ class ScanGpuTests: ...@@ -507,7 +504,7 @@ class ScanGpuTests:
assert self.is_scan_on_gpu(scan_node) assert self.is_scan_on_gpu(scan_node)
def test_gibbs_chain(self): def test_gibbs_chain(self):
rng = np.random.RandomState(utt.fetch_seed()) rng = np.random.default_rng(utt.fetch_seed())
v_vsample = np.array( v_vsample = np.array(
rng.binomial( rng.binomial(
1, 1,
...@@ -681,8 +678,6 @@ class TestScanGpuarray(ScanGpuTests): ...@@ -681,8 +678,6 @@ class TestScanGpuarray(ScanGpuTests):
if not self.gpu_backend.pygpu_activated: if not self.gpu_backend.pygpu_activated:
pytest.skip("Optional package pygpu disabled") pytest.skip("Optional package pygpu disabled")
utt.seed_rng()
def is_scan_on_gpu(self, node): def is_scan_on_gpu(self, node):
return node.op.info.get("gpua", False) return node.op.info.get("gpua", False)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论