Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
39c11472
提交
39c11472
authored
6月 02, 2021
作者:
kc611
提交者:
Brandon T. Willard
6月 25, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Refactor remaining tests to use NumPy Generator
上级
1ff4b9d3
隐藏空白字符变更
内嵌
并排
正在显示
19 个修改的文件
包含
334 行增加
和
276 行删除
+334
-276
test_pfunc.py
tests/compile/function/test_pfunc.py
+10
-10
test_debugmode.py
tests/compile/test_debugmode.py
+6
-6
test_misc.py
tests/compile/test_misc.py
+4
-3
models.py
tests/d3viz/models.py
+1
-1
test_d3viz.py
tests/d3viz/test_d3viz.py
+1
-1
test_formatting.py
tests/d3viz/test_formatting.py
+1
-1
test_jax.py
tests/link/test_jax.py
+33
-8
test_numba.py
tests/link/test_numba.py
+34
-16
test_linalg.py
tests/sandbox/linalg/test_linalg.py
+5
-5
test_minimal.py
tests/sandbox/test_minimal.py
+2
-2
test_rng_mrg.py
tests/sandbox/test_rng_mrg.py
+15
-12
test_basic.py
tests/scan/test_basic.py
+113
-88
test_opt.py
tests/scan/test_opt.py
+1
-1
test_sp.py
tests/sparse/sandbox/test_sp.py
+5
-5
test_basic.py
tests/sparse/test_basic.py
+50
-61
test_sort.py
tests/tensor/test_sort.py
+38
-33
test_basic.py
tests/typed_list/test_basic.py
+0
-4
test_opt.py
tests/typed_list/test_opt.py
+11
-11
test_type.py
tests/typed_list/test_type.py
+4
-8
没有找到文件。
tests/compile/function/test_pfunc.py
浏览文件 @
39c11472
...
...
@@ -64,7 +64,7 @@ class TestPfunc:
def
test_shared
(
self
):
# CHECK: two functions (f1 and f2) can share w
w
=
shared
(
np
.
random
.
rand
(
2
,
2
),
"w"
)
w
=
shared
(
np
.
random
.
rand
om
((
2
,
2
)
),
"w"
)
wval
=
w
.
get_value
(
borrow
=
False
)
x
=
dmatrix
()
...
...
@@ -72,7 +72,7 @@ class TestPfunc:
out2
=
w
*
x
f1
=
pfunc
([
x
],
[
out1
])
f2
=
pfunc
([
x
],
[
out2
])
xval
=
np
.
random
.
rand
(
2
,
2
)
xval
=
np
.
random
.
rand
om
((
2
,
2
)
)
assert
np
.
all
(
f1
(
xval
)
==
xval
+
wval
)
assert
np
.
all
(
f2
(
xval
)
==
xval
*
wval
)
...
...
@@ -89,7 +89,7 @@ class TestPfunc:
def
test_no_shared_as_input
(
self
):
# Test that shared variables cannot be used as function inputs.
w_init
=
np
.
random
.
rand
(
2
,
2
)
w_init
=
np
.
random
.
rand
om
((
2
,
2
)
)
w
=
shared
(
w_init
.
copy
(),
"w"
)
with
pytest
.
raises
(
TypeError
,
match
=
r"^Cannot use a shared variable \(w\) as explicit input"
...
...
@@ -100,8 +100,8 @@ class TestPfunc:
# Ensure it is possible to (implicitly) use a shared variable in a
# function, as a 'state' that can be updated at will.
rng
=
np
.
random
.
RandomState
(
1827
)
w_init
=
rng
.
rand
(
5
)
rng
=
np
.
random
.
default_rng
(
1827
)
w_init
=
rng
.
rand
om
((
5
)
)
w
=
shared
(
w_init
.
copy
(),
"w"
)
reg
=
aet_sum
(
w
*
w
)
f
=
pfunc
([],
reg
)
...
...
@@ -127,8 +127,8 @@ class TestPfunc:
out
=
a
+
b
f
=
pfunc
([
In
(
a
,
strict
=
False
)],
[
out
])
# works, rand generates float64 by default
f
(
np
.
random
.
rand
(
8
))
# works, rand
om(
generates float64 by default
f
(
np
.
random
.
rand
om
((
8
)
))
# works, casting is allowed
f
(
np
.
array
([
1
,
2
,
3
,
4
],
dtype
=
"int32"
))
...
...
@@ -145,14 +145,14 @@ class TestPfunc:
# using mutable=True will let fip change the value in aval
fip
=
pfunc
([
In
(
a
,
mutable
=
True
)],
[
a_out
],
mode
=
"FAST_RUN"
)
aval
=
np
.
random
.
rand
(
10
)
aval
=
np
.
random
.
rand
om
((
10
)
)
aval2
=
aval
.
copy
()
assert
np
.
all
(
fip
(
aval
)
==
(
aval2
*
2
))
assert
not
np
.
all
(
aval
==
aval2
)
# using mutable=False should leave the input untouched
f
=
pfunc
([
In
(
a
,
mutable
=
False
)],
[
a_out
],
mode
=
"FAST_RUN"
)
aval
=
np
.
random
.
rand
(
10
)
aval
=
np
.
random
.
rand
om
((
10
)
)
aval2
=
aval
.
copy
()
assert
np
.
all
(
f
(
aval
)
==
(
aval2
*
2
))
assert
np
.
all
(
aval
==
aval2
)
...
...
@@ -375,7 +375,7 @@ class TestPfunc:
def
test_update_err_broadcast
(
self
):
# Test that broadcastable dimensions raise error
data
=
np
.
random
.
rand
(
10
,
10
)
.
astype
(
"float32"
)
data
=
np
.
random
.
rand
om
((
10
,
10
)
)
.
astype
(
"float32"
)
output_var
=
shared
(
name
=
"output"
,
value
=
data
)
# the update_var has type matrix, and the update expression
...
...
tests/compile/test_debugmode.py
浏览文件 @
39c11472
...
...
@@ -736,7 +736,7 @@ class VecAsRowAndCol(Op):
class
TestPreallocatedOutput
:
def
setup_method
(
self
):
self
.
rng
=
np
.
random
.
RandomState
(
seed
=
utt
.
fetch_seed
())
self
.
rng
=
np
.
random
.
default_rng
(
seed
=
utt
.
fetch_seed
())
def
test_f_contiguous
(
self
):
a
=
fmatrix
(
"a"
)
...
...
@@ -745,8 +745,8 @@ class TestPreallocatedOutput:
# In this test, we do not want z to be an output of the graph.
out
=
dot
(
z
,
np
.
eye
(
7
))
a_val
=
self
.
rng
.
randn
(
7
,
7
)
.
astype
(
"float32"
)
b_val
=
self
.
rng
.
randn
(
7
,
7
)
.
astype
(
"float32"
)
a_val
=
self
.
rng
.
standard_normal
((
7
,
7
)
)
.
astype
(
"float32"
)
b_val
=
self
.
rng
.
standard_normal
((
7
,
7
)
)
.
astype
(
"float32"
)
# Should work
mode
=
DebugMode
(
check_preallocated_output
=
[
"c_contiguous"
])
...
...
@@ -776,8 +776,8 @@ class TestPreallocatedOutput:
b
=
fmatrix
(
"b"
)
out
=
BrokenCImplementationAdd
()(
a
,
b
)
a_val
=
self
.
rng
.
randn
(
7
,
7
)
.
astype
(
"float32"
)
b_val
=
self
.
rng
.
randn
(
7
,
7
)
.
astype
(
"float32"
)
a_val
=
self
.
rng
.
standard_normal
((
7
,
7
)
)
.
astype
(
"float32"
)
b_val
=
self
.
rng
.
standard_normal
((
7
,
7
)
)
.
astype
(
"float32"
)
# Should work
mode
=
DebugMode
(
check_preallocated_output
=
[
"c_contiguous"
])
...
...
@@ -805,5 +805,5 @@ class TestPreallocatedOutput:
c
,
r
=
VecAsRowAndCol
()(
v
)
f
=
function
([
v
],
[
c
,
r
])
v_val
=
self
.
rng
.
randn
(
5
)
.
astype
(
"float32"
)
v_val
=
self
.
rng
.
standard_normal
((
5
)
)
.
astype
(
"float32"
)
f
(
v_val
)
tests/compile/test_misc.py
浏览文件 @
39c11472
...
...
@@ -54,8 +54,8 @@ class NNet:
def
test_nnet
():
rng
=
np
.
random
.
RandomState
(
1827
)
data
=
rng
.
rand
(
10
,
4
)
rng
=
np
.
random
.
default_rng
(
279
)
data
=
rng
.
rand
om
((
10
,
4
)
)
nnet
=
NNet
(
n_input
=
3
,
n_hidden
=
10
)
for
epoch
in
range
(
3
):
mean_cost
=
0
...
...
@@ -66,7 +66,8 @@ def test_nnet():
mean_cost
+=
cost
mean_cost
/=
float
(
len
(
data
))
# print 'Mean cost at epoch %s: %s' % (epoch, mean_cost)
assert
abs
(
mean_cost
-
0.20588975452
)
<
1e-6
# Seed based test
assert
abs
(
mean_cost
-
0.2301901
)
<
1e-6
# Just call functions to make sure they do not crash.
nnet
.
compute_output
(
input
)
nnet
.
output_from_hidden
(
np
.
ones
(
10
))
tests/d3viz/models.py
浏览文件 @
39c11472
...
...
@@ -11,7 +11,7 @@ class Mlp:
if
rng
is
None
:
rng
=
0
if
isinstance
(
rng
,
int
):
rng
=
np
.
random
.
RandomState
(
rng
)
rng
=
np
.
random
.
default_rng
(
rng
)
self
.
rng
=
rng
self
.
nfeatures
=
nfeatures
self
.
noutputs
=
noutputs
...
...
tests/d3viz/test_d3viz.py
浏览文件 @
39c11472
...
...
@@ -19,7 +19,7 @@ if not pydot_imported:
class
TestD3Viz
:
def
setup_method
(
self
):
self
.
rng
=
np
.
random
.
RandomState
(
0
)
self
.
rng
=
np
.
random
.
default_rng
(
0
)
self
.
data_dir
=
pt
.
join
(
"data"
,
"test_d3viz"
)
def
check
(
self
,
f
,
reference
=
None
,
verbose
=
False
):
...
...
tests/d3viz/test_formatting.py
浏览文件 @
39c11472
...
...
@@ -13,7 +13,7 @@ from tests.d3viz import models
class
TestPyDotFormatter
:
def
setup_method
(
self
):
self
.
rng
=
np
.
random
.
RandomState
(
0
)
self
.
rng
=
np
.
random
.
default_rng
(
0
)
def
node_counts
(
self
,
graph
):
node_types
=
[
node
.
get_attributes
()[
"node_type"
]
for
node
in
graph
.
get_nodes
()]
...
...
tests/link/test_jax.py
浏览文件 @
39c11472
...
...
@@ -218,6 +218,8 @@ def test_jax_compile_ops():
def
test_jax_basic
():
rng
=
np
.
random
.
default_rng
(
28494
)
x
=
matrix
(
"x"
)
y
=
matrix
(
"y"
)
b
=
vector
(
"b"
)
...
...
@@ -259,7 +261,11 @@ def test_jax_basic():
out_fg
=
FunctionGraph
([
x
],
[
out
])
compare_jax_and_py
(
out_fg
,
[(
np
.
eye
(
10
)
+
np
.
random
.
randn
(
10
,
10
)
*
0.01
)
.
astype
(
config
.
floatX
)],
[
(
np
.
eye
(
10
)
+
rng
.
standard_normal
(
size
=
(
10
,
10
))
*
0.01
)
.
astype
(
config
.
floatX
)
],
)
# not sure why this isn't working yet with lower=False
...
...
@@ -267,7 +273,11 @@ def test_jax_basic():
out_fg
=
FunctionGraph
([
x
],
[
out
])
compare_jax_and_py
(
out_fg
,
[(
np
.
eye
(
10
)
+
np
.
random
.
randn
(
10
,
10
)
*
0.01
)
.
astype
(
config
.
floatX
)],
[
(
np
.
eye
(
10
)
+
rng
.
standard_normal
(
size
=
(
10
,
10
))
*
0.01
)
.
astype
(
config
.
floatX
)
],
)
out
=
aet_slinalg
.
solve
(
x
,
b
)
...
...
@@ -294,7 +304,11 @@ def test_jax_basic():
out_fg
=
FunctionGraph
([
x
],
[
out
])
compare_jax_and_py
(
out_fg
,
[(
np
.
eye
(
10
)
+
np
.
random
.
randn
(
10
,
10
)
*
0.01
)
.
astype
(
config
.
floatX
)],
[
(
np
.
eye
(
10
)
+
rng
.
standard_normal
(
size
=
(
10
,
10
))
*
0.01
)
.
astype
(
config
.
floatX
)
],
)
...
...
@@ -405,9 +419,9 @@ def test_jax_eye():
def
test_jax_basic_multiout
():
rng
=
np
.
random
.
default_rng
(
213234
)
np
.
random
.
seed
(
213234
)
M
=
np
.
random
.
normal
(
size
=
(
3
,
3
))
M
=
rng
.
normal
(
size
=
(
3
,
3
))
X
=
M
.
dot
(
M
.
T
)
x
=
matrix
(
"x"
)
...
...
@@ -638,7 +652,9 @@ def test_jax_Subtensors_omni():
reason
=
"Omnistaging cannot be disabled"
,
)
def
test_jax_IncSubtensor
():
x_np
=
np
.
random
.
uniform
(
-
1
,
1
,
size
=
(
3
,
4
,
5
))
.
astype
(
config
.
floatX
)
rng
=
np
.
random
.
default_rng
(
213234
)
x_np
=
rng
.
uniform
(
-
1
,
1
,
size
=
(
3
,
4
,
5
))
.
astype
(
config
.
floatX
)
x_aet
=
aet
.
arange
(
3
*
4
*
5
)
.
reshape
((
3
,
4
,
5
))
.
astype
(
config
.
floatX
)
# "Set" basic indices
...
...
@@ -661,7 +677,7 @@ def test_jax_IncSubtensor():
# "Set" advanced indices
st_aet
=
aet
.
as_tensor_variable
(
np
.
random
.
uniform
(
-
1
,
1
,
size
=
(
2
,
4
,
5
))
.
astype
(
config
.
floatX
)
rng
.
uniform
(
-
1
,
1
,
size
=
(
2
,
4
,
5
))
.
astype
(
config
.
floatX
)
)
out_aet
=
aet_subtensor
.
set_subtensor
(
x_aet
[
np
.
r_
[
0
,
2
]],
st_aet
)
assert
isinstance
(
out_aet
.
owner
.
op
,
aet_subtensor
.
AdvancedIncSubtensor1
)
...
...
@@ -707,7 +723,7 @@ def test_jax_IncSubtensor():
# "Increment" advanced indices
st_aet
=
aet
.
as_tensor_variable
(
np
.
random
.
uniform
(
-
1
,
1
,
size
=
(
2
,
4
,
5
))
.
astype
(
config
.
floatX
)
rng
.
uniform
(
-
1
,
1
,
size
=
(
2
,
4
,
5
))
.
astype
(
config
.
floatX
)
)
out_aet
=
aet_subtensor
.
inc_subtensor
(
x_aet
[
np
.
r_
[
0
,
2
]],
st_aet
)
assert
isinstance
(
out_aet
.
owner
.
op
,
aet_subtensor
.
AdvancedIncSubtensor1
)
...
...
@@ -1202,6 +1218,7 @@ def test_random_unimplemented():
compare_jax_and_py
(
fgraph
,
[])
@pytest.mark.xfail
(
reason
=
"Generators not yet supported in JAX"
)
def
test_RandomStream
():
srng
=
RandomStream
(
seed
=
123
)
out
=
srng
.
normal
()
-
srng
.
normal
()
...
...
@@ -1211,3 +1228,11 @@ def test_RandomStream():
jax_res_2
=
fn
()
assert
np
.
array_equal
(
jax_res_1
,
jax_res_2
)
@pytest.mark.xfail
(
reason
=
"Generators not yet supported in JAX"
)
def
test_random_generators
():
rng
=
shared
(
np
.
random
.
default_rng
(
123
))
out
=
normal
(
rng
=
rng
)
fgraph
=
FunctionGraph
([
out
.
owner
.
inputs
[
0
]],
[
out
],
clone
=
False
)
compare_jax_and_py
(
fgraph
,
[])
tests/link/test_numba.py
浏览文件 @
39c11472
...
...
@@ -88,7 +88,7 @@ opts = OptimizationQuery(include=[None], exclude=["cxx_only", "BlasOpt"])
numba_mode
=
Mode
(
NumbaLinker
(),
opts
)
py_mode
=
Mode
(
"py"
,
opts
)
rng
=
np
.
random
.
RandomState
(
42849
)
rng
=
np
.
random
.
default_rng
(
42849
)
def
set_test_value
(
x
,
v
):
...
...
@@ -291,13 +291,13 @@ def test_create_numba_signature(v, expected, force_scalar):
[
(
[
aet
.
vector
()],
[
rng
.
randn
(
100
)
.
astype
(
config
.
floatX
)],
[
rng
.
standard_normal
(
100
)
.
astype
(
config
.
floatX
)],
lambda
x
:
aet
.
sigmoid
(
x
),
None
,
),
(
[
aet
.
vector
()
for
i
in
range
(
4
)],
[
rng
.
randn
(
100
)
.
astype
(
config
.
floatX
)
for
i
in
range
(
4
)],
[
rng
.
standard_normal
(
100
)
.
astype
(
config
.
floatX
)
for
i
in
range
(
4
)],
lambda
x
,
y
,
x1
,
y1
:
(
x
+
y
)
*
(
x1
+
y1
)
*
y
,
None
,
),
...
...
@@ -311,8 +311,8 @@ def test_create_numba_signature(v, expected, force_scalar):
(
[
aet
.
vector
(),
aet
.
vector
()],
[
rng
.
randn
(
100
)
.
astype
(
config
.
floatX
),
rng
.
randn
(
100
)
.
astype
(
config
.
floatX
),
rng
.
standard_normal
(
100
)
.
astype
(
config
.
floatX
),
rng
.
standard_normal
(
100
)
.
astype
(
config
.
floatX
),
],
lambda
x
,
y
:
ati
.
add_inplace
(
x
,
y
),
None
,
...
...
@@ -320,8 +320,8 @@ def test_create_numba_signature(v, expected, force_scalar):
(
[
aet
.
vector
(),
aet
.
vector
()],
[
rng
.
randn
(
100
)
.
astype
(
config
.
floatX
),
rng
.
randn
(
100
)
.
astype
(
config
.
floatX
),
rng
.
standard_normal
(
100
)
.
astype
(
config
.
floatX
),
rng
.
standard_normal
(
100
)
.
astype
(
config
.
floatX
),
],
lambda
x
,
y
:
my_multi_out
(
x
,
y
),
NotImplementedError
,
...
...
@@ -1954,7 +1954,9 @@ def test_MaxAndArgmax(x, axes, exc):
(
set_test_value
(
aet
.
lmatrix
(),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
randint
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
integers
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)
),
),
True
,
None
,
...
...
@@ -2004,7 +2006,9 @@ def test_Cholesky(x, lower, exc):
(
set_test_value
(
aet
.
lmatrix
(),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
randint
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
integers
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)
),
),
set_test_value
(
aet
.
dvector
(),
rng
.
random
(
size
=
(
3
,))
.
astype
(
"float64"
)),
"general"
,
...
...
@@ -2120,7 +2124,9 @@ y = np.array(
(
set_test_value
(
aet
.
lmatrix
(),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
randint
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
integers
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)
),
),
None
,
),
...
...
@@ -2160,7 +2166,9 @@ def test_Eig(x, exc):
(
set_test_value
(
aet
.
lmatrix
(),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
randint
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
integers
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)
),
),
"U"
,
UserWarning
,
...
...
@@ -2200,7 +2208,9 @@ def test_Eigh(x, uplo, exc):
(
set_test_value
(
aet
.
lmatrix
(),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
randint
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
integers
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)
),
),
None
,
),
...
...
@@ -2244,7 +2254,9 @@ def test_MatrixInverse(x, exc):
(
set_test_value
(
aet
.
lmatrix
(),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
randint
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
integers
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)
),
),
"reduced"
,
None
,
...
...
@@ -2252,7 +2264,9 @@ def test_MatrixInverse(x, exc):
(
set_test_value
(
aet
.
lmatrix
(),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
randint
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
integers
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)
),
),
"complete"
,
UserWarning
,
...
...
@@ -2303,7 +2317,9 @@ def test_QRFull(x, mode, exc):
(
set_test_value
(
aet
.
lmatrix
(),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
randint
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
integers
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)
),
),
True
,
True
,
...
...
@@ -2312,7 +2328,9 @@ def test_QRFull(x, mode, exc):
(
set_test_value
(
aet
.
lmatrix
(),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
randint
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)),
(
lambda
x
:
x
.
T
.
dot
(
x
))(
rng
.
integers
(
1
,
10
,
size
=
(
3
,
3
))
.
astype
(
"int64"
)
),
),
True
,
False
,
...
...
tests/sandbox/linalg/test_linalg.py
浏览文件 @
39c11472
...
...
@@ -41,9 +41,9 @@ def test_rop_lop():
)
scan_f
=
function
([
mx
,
mv
],
sy
)
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
vx
=
np
.
asarray
(
rng
.
randn
(
4
,
4
),
aesara
.
config
.
floatX
)
vv
=
np
.
asarray
(
rng
.
randn
(
4
,
4
),
aesara
.
config
.
floatX
)
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vx
=
np
.
asarray
(
rng
.
standard_normal
((
4
,
4
)
),
aesara
.
config
.
floatX
)
vv
=
np
.
asarray
(
rng
.
standard_normal
((
4
,
4
)
),
aesara
.
config
.
floatX
)
v1
=
rop_f
(
vx
,
vv
)
v2
=
scan_f
(
vx
,
vv
)
...
...
@@ -75,13 +75,13 @@ def test_rop_lop():
def
test_spectral_radius_bound
():
tol
=
10
**
(
-
6
)
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
x
=
matrix
()
radius_bound
=
spectral_radius_bound
(
x
,
5
)
f
=
aesara
.
function
([
x
],
radius_bound
)
shp
=
(
3
,
4
)
m
=
rng
.
rand
(
*
shp
)
m
=
rng
.
rand
om
(
shp
)
m
=
np
.
cov
(
m
)
.
astype
(
config
.
floatX
)
radius_bound_aesara
=
f
(
m
)
...
...
tests/sandbox/test_minimal.py
浏览文件 @
39c11472
...
...
@@ -16,7 +16,7 @@ class TestMinimal:
"""
def
setup_method
(
self
):
self
.
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
(
666
))
self
.
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
(
666
))
def
test_minimal
(
self
):
A
=
matrix
()
...
...
@@ -26,7 +26,7 @@ class TestMinimal:
f
=
function
([
A
,
b
],
minimal
(
A
,
A
,
b
,
b
,
A
))
print
(
"built"
)
Aval
=
self
.
rng
.
randn
(
5
,
5
)
Aval
=
self
.
rng
.
standard_normal
((
5
,
5
)
)
bval
=
np
.
arange
(
5
,
dtype
=
float
)
f
(
Aval
,
bval
)
print
(
"done"
)
tests/sandbox/test_rng_mrg.py
浏览文件 @
39c11472
...
...
@@ -25,7 +25,6 @@ from tests import unittest_tools as utt
# TODO: test optimizer mrg_random_make_inplace
utt
.
seed_rng
()
# Results generated by Java code using L'Ecuyer et al.'s code, with:
# main seed: [12345]*6 (default)
...
...
@@ -90,7 +89,9 @@ def test_get_substream_rstates():
n_streams
=
100
dtype
=
"float32"
rng
=
MRG_RandomStream
(
np
.
random
.
randint
(
2147462579
))
rng
=
MRG_RandomStream
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
integers
(
2147462579
)
)
rng
.
get_substream_rstates
(
n_streams
,
dtype
)
...
...
@@ -889,13 +890,13 @@ def test_multMatVect():
f0
=
function
([
A1
,
s1
,
m1
,
A2
,
s2
,
m2
],
g0
)
i32max
=
np
.
iinfo
(
np
.
int32
)
.
max
A1
=
np
.
random
.
randint
(
0
,
i32max
,
(
3
,
3
))
.
astype
(
"int64"
)
s1
=
np
.
random
.
randint
(
0
,
i32max
,
3
)
.
astype
(
"int32"
)
m1
=
np
.
asarray
(
np
.
random
.
randint
(
i32max
),
dtype
=
"int32"
)
A2
=
np
.
random
.
randint
(
0
,
i32max
,
(
3
,
3
))
.
astype
(
"int64"
)
s2
=
np
.
random
.
randint
(
0
,
i32max
,
3
)
.
astype
(
"int32"
)
m2
=
np
.
asarray
(
np
.
random
.
randint
(
i32max
),
dtype
=
"int32"
)
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
A1
=
rng
.
integers
(
0
,
i32max
,
(
3
,
3
))
.
astype
(
"int64"
)
s1
=
rng
.
integers
(
0
,
i32max
,
3
)
.
astype
(
"int32"
)
m1
=
np
.
asarray
(
rng
.
integers
(
i32max
),
dtype
=
"int32"
)
A2
=
rng
.
integers
(
0
,
i32max
,
(
3
,
3
))
.
astype
(
"int64"
)
s2
=
rng
.
integers
(
0
,
i32max
,
3
)
.
astype
(
"int32"
)
m2
=
np
.
asarray
(
rng
.
integers
(
i32max
),
dtype
=
"int32"
)
f0
.
input_storage
[
0
]
.
storage
[
0
]
=
A1
f0
.
input_storage
[
1
]
.
storage
[
0
]
=
s1
...
...
@@ -964,7 +965,7 @@ def rng_mrg_overflow(sizes, fct, mode, should_raise_error):
@pytest.mark.slow
def
test_overflow_cpu
():
# run with AESARA_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32
rng
=
MRG_RandomStream
(
np
.
random
.
randint
(
1234
))
rng
=
MRG_RandomStream
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
integers
(
1234
))
fct
=
rng
.
uniform
with
config
.
change_flags
(
compute_test_value
=
"off"
):
# should raise error as the size overflows
...
...
@@ -1107,8 +1108,10 @@ def test_target_parameter():
@config.change_flags
(
compute_test_value
=
"off"
)
def
test_undefined_grad_opt
():
# Make sure that undefined grad get removed in optimized graph.
random
=
MRG_RandomStream
(
np
.
random
.
randint
(
1
,
2147462579
))
pvals
=
shared
(
np
.
random
.
rand
(
10
,
20
)
.
astype
(
config
.
floatX
))
random
=
MRG_RandomStream
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
integers
(
1
,
2147462579
)
)
pvals
=
shared
(
np
.
random
.
random
((
10
,
20
))
.
astype
(
config
.
floatX
))
pvals
=
pvals
/
pvals
.
sum
(
axis
=
1
)
pvals
=
zero_grad
(
pvals
)
samples
=
random
.
multinomial
(
pvals
=
pvals
,
n
=
1
)
...
...
tests/scan/test_basic.py
浏览文件 @
39c11472
...
...
@@ -254,8 +254,6 @@ def scan_nodes_from_fct(fct):
class
TestScan
:
def
setup_method
(
self
):
utt
.
seed_rng
()
# generator network, only one output , type scalar ; no sequence or
# non sequence arguments
...
...
@@ -299,7 +297,7 @@ class TestScan:
if
tmpdir
is
not
None
:
shutil
.
rmtree
(
tmpdir
)
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
state
=
rng
.
uniform
()
steps
=
5
...
...
@@ -332,7 +330,7 @@ class TestScan:
assert
all
(
i
.
value
is
None
for
i
in
scan_node
.
op
.
fn
.
input_storage
)
assert
all
(
o
.
value
is
None
for
o
in
scan_node
.
op
.
fn
.
output_storage
)
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
state
=
rng
.
uniform
()
steps
=
5
...
...
@@ -365,7 +363,7 @@ class TestScan:
[
state
,
n_steps
],
output
,
updates
=
updates
,
allow_input_downcast
=
True
)
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
state
=
rng
.
uniform
()
steps
=
5
...
...
@@ -404,7 +402,7 @@ class TestScan:
# This assertion fails if savemem optimization failed on scan
if
config
.
mode
!=
"FAST_COMPILE"
:
assert
nodes
[
0
]
.
op
.
_scan_savemem_visited
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
my_f
(
rng
.
uniform
(
size
=
(
3
,)),
4
,
np
.
int64
([
2
,
2
,
3
]))
@pytest.mark.slow
...
...
@@ -451,7 +449,7 @@ class TestScan:
[
u
,
x0
,
W_in
,
W
],
output
,
updates
=
updates
,
allow_input_downcast
=
True
)
# get random initial values
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
v_u
=
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
4
,))
v_x0
=
rng
.
uniform
()
W
=
rng
.
uniform
()
...
...
@@ -468,7 +466,7 @@ class TestScan:
# simple rnn, one input, one state, weights for each; input/state
# are vectors, weights are scalars; using shared variables
def
test_one_sequence_one_output_weights_shared
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
u
=
vector
(
"u"
)
x0
=
scalar
(
"x0"
)
W_in
=
shared
(
asarrayX
(
rng
.
uniform
()),
name
=
"w_in"
)
...
...
@@ -503,7 +501,7 @@ class TestScan:
# some rnn with multiple outputs and multiple inputs; other
# dimension instead of scalars/vectors
def
test_multiple_inputs_multiple_outputs
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW_in2
=
asarrayX
(
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
2
,)))
vW
=
asarrayX
(
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
2
,
2
)))
vWout
=
asarrayX
(
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
2
,)))
...
...
@@ -557,7 +555,7 @@ class TestScan:
def
test_multiple_outs_taps
(
self
):
l
=
5
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW_in2
=
asarrayX
(
rng
.
uniform
(
-
2.0
,
2.0
,
size
=
(
2
,)))
vW
=
asarrayX
(
rng
.
uniform
(
-
2.0
,
2.0
,
size
=
(
2
,
2
)))
...
...
@@ -778,7 +776,9 @@ class TestScan:
# Call verify_grad to ensure the correctness of the second gradients
floatX
=
config
.
floatX
inputs_test_values
=
[
np
.
random
.
random
(
3
)
.
astype
(
floatX
)]
inputs_test_values
=
[
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
(
3
)
.
astype
(
floatX
)
]
utt
.
verify_grad
(
get_sum_of_grad
,
inputs_test_values
)
def
test_verify_second_grad_mitsot1
(
self
):
...
...
@@ -805,11 +805,13 @@ class TestScan:
# Call verify_grad to ensure the correctness of the second gradients
floatX
=
config
.
floatX
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
inputs_test_values
=
[
np
.
random
.
random
((
2
,
3
))
.
astype
(
floatX
),
np
.
random
.
random
(
3
)
.
astype
(
floatX
),
rng
.
random
((
2
,
3
))
.
astype
(
floatX
),
rng
.
random
(
3
)
.
astype
(
floatX
),
]
utt
.
verify_grad
(
get_sum_of_grad
,
inputs_test_values
)
utt
.
verify_grad
(
get_sum_of_grad
,
inputs_test_values
,
rng
=
rng
)
def
test_grad_two_scans
(
self
):
...
...
@@ -818,7 +820,11 @@ class TestScan:
t
=
imatrix
(
"t"
)
# forward pass
W
=
shared
(
np
.
random
.
randn
(
2
,
2
)
.
astype
(
"float32"
),
name
=
"W"
,
borrow
=
True
)
W
=
shared
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
2
,
2
))
.
astype
(
"float32"
),
name
=
"W"
,
borrow
=
True
,
)
def
forward_scanner
(
x_t
):
a2_t
=
dot
(
x_t
,
W
)
...
...
@@ -841,7 +847,7 @@ class TestScan:
# vectors, weights are scalars; using shared variables and past
# taps (sequences and outputs)
def
test_using_taps_input_output
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW
=
asarrayX
(
rng
.
uniform
())
vW_in
=
asarrayX
(
rng
.
uniform
())
vu
=
asarrayX
(
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
4
,)))
...
...
@@ -885,7 +891,7 @@ class TestScan:
# vectors, weights are scalars; using shared variables and past
# taps (sequences and outputs) and future taps for sequences
def
test_past_future_taps_shared
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW
=
asarrayX
(
rng
.
uniform
())
vW_in
=
asarrayX
(
rng
.
uniform
())
vu
=
asarrayX
(
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
6
,)))
...
...
@@ -922,7 +928,7 @@ class TestScan:
# simple rnn ; compute inplace version 1
@utt.assertFailure_fast
def
test_inplace1
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW
=
asarrayX
(
np
.
random
.
uniform
())
vW_in
=
asarrayX
(
np
.
random
.
uniform
())
vu0
=
asarrayX
(
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
3
,)))
...
...
@@ -989,7 +995,7 @@ class TestScan:
# simple rnn ; compute inplace version 2
@utt.assertFailure_fast
def
test_inplace2
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW
=
asarrayX
(
np
.
random
.
uniform
())
vW_in
=
asarrayX
(
np
.
random
.
uniform
())
vu0
=
asarrayX
(
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
3
,)))
...
...
@@ -1057,7 +1063,7 @@ class TestScan:
@utt.assertFailure_fast
def
test_inplace3
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vx0
=
asarrayX
(
rng
.
uniform
())
vx1
=
asarrayX
(
rng
.
uniform
())
...
...
@@ -1079,15 +1085,15 @@ class TestScan:
# Shared variable with updates
def
test_shared_arguments_with_updates
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW1
=
asarrayX
(
rng
.
rand
(
2
,
3
))
vW2
=
asarrayX
(
rng
.
rand
(
3
,
2
))
vu1
=
asarrayX
(
rng
.
rand
(
3
,
2
))
vu2
=
asarrayX
(
rng
.
rand
(
3
,
3
))
vy0
=
asarrayX
(
rng
.
rand
(
3
,
2
))
vy1
=
asarrayX
(
rng
.
rand
(
2
))
vu1
=
asarrayX
(
rng
.
rand
(
3
,
2
))
vW1
=
asarrayX
(
rng
.
rand
om
((
2
,
3
)
))
vW2
=
asarrayX
(
rng
.
rand
om
((
3
,
2
)
))
vu1
=
asarrayX
(
rng
.
rand
om
((
3
,
2
)
))
vu2
=
asarrayX
(
rng
.
rand
om
((
3
,
3
)
))
vy0
=
asarrayX
(
rng
.
rand
om
((
3
,
2
)
))
vy1
=
asarrayX
(
rng
.
rand
om
((
2
)
))
vu1
=
asarrayX
(
rng
.
rand
om
((
3
,
2
)
))
W1
=
shared
(
vW1
,
"W1"
)
W2
=
shared
(
vW2
,
"W2"
)
...
...
@@ -1182,8 +1188,8 @@ class TestScan:
)
my_f
=
function
([],
values
,
updates
=
updates
,
allow_input_downcast
=
True
)
rng_seed
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
.
randint
(
2
**
30
)
rng
=
np
.
random
.
RandomState
(
int
(
rng_seed
))
# int() is for 32bit
rng_seed
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
integers
(
2
**
30
)
rng
=
np
.
random
.
default_rng
(
int
(
rng_seed
))
# int() is for 32bit
numpy_v
=
np
.
zeros
((
10
,
2
))
for
i
in
range
(
10
):
...
...
@@ -1195,8 +1201,8 @@ class TestScan:
utt
.
assert_allclose
(
aesara_v
,
numpy_v
[
5
:,
:])
def
test_gibbs_chain
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
v_W
=
np
.
array
(
rng
.
rand
(
20
,
30
)
-
0.5
,
dtype
=
"float32"
)
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
v_W
=
np
.
array
(
rng
.
rand
om
((
20
,
30
)
)
-
0.5
,
dtype
=
"float32"
)
v_vsample
=
np
.
array
(
rng
.
binomial
(
1
,
...
...
@@ -1205,8 +1211,8 @@ class TestScan:
),
dtype
=
"float32"
,
)
v_bvis
=
np
.
array
(
rng
.
rand
(
20
)
-
0.5
,
dtype
=
"float32"
)
v_bhid
=
np
.
array
(
rng
.
rand
(
30
)
-
0.5
,
dtype
=
"float32"
)
v_bvis
=
np
.
array
(
rng
.
rand
om
((
20
)
)
-
0.5
,
dtype
=
"float32"
)
v_bhid
=
np
.
array
(
rng
.
rand
om
((
30
)
)
-
0.5
,
dtype
=
"float32"
)
W
=
shared
(
v_W
,
"vW"
)
bhid
=
shared
(
v_bhid
,
"vbhid"
)
bvis
=
shared
(
v_bvis
,
"vbvis"
)
...
...
@@ -1231,12 +1237,12 @@ class TestScan:
[
vsample
],
aesara_vsamples
[
-
1
],
updates
=
updates
,
allow_input_downcast
=
True
)
_rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng_seed
=
_rng
.
randint
(
2
**
30
)
nrng1
=
np
.
random
.
RandomState
(
int
(
rng_seed
))
# int() is for 32bit
_rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
rng_seed
=
_rng
.
integers
(
2
**
30
)
nrng1
=
np
.
random
.
default_rng
(
int
(
rng_seed
))
# int() is for 32bit
rng_seed
=
_rng
.
randint
(
2
**
30
)
nrng2
=
np
.
random
.
RandomState
(
int
(
rng_seed
))
# int() is for 32bit
rng_seed
=
_rng
.
integers
(
2
**
30
)
nrng2
=
np
.
random
.
default_rng
(
int
(
rng_seed
))
# int() is for 32bit
def
numpy_implementation
(
vsample
):
for
idx
in
range
(
10
):
...
...
@@ -1256,7 +1262,7 @@ class TestScan:
utt
.
assert_allclose
(
t_result
,
n_result
)
def
test_only_shared_no_input_no_output
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
v_state
=
asarrayX
(
rng
.
uniform
())
state
=
shared
(
v_state
,
"vstate"
)
...
...
@@ -1284,7 +1290,7 @@ class TestScan:
)
f2
=
function
([
u
],
outputs
,
updates
=
updates
,
allow_input_downcast
=
True
)
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
v_u
=
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
5
,))
numpy_result
=
v_u
+
3
...
...
@@ -1299,7 +1305,7 @@ class TestScan:
f
=
function
([
v
],
abs_expr
,
updates
=
abs_updates
,
allow_input_downcast
=
True
)
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vals
=
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
10
,))
abs_vals
=
abs
(
vals
)
aesara_vals
=
f
(
vals
)
...
...
@@ -1328,7 +1334,7 @@ class TestScan:
[
u
,
x0
,
W_in
,
W
],
output
,
updates
=
updates
,
allow_input_downcast
=
True
)
# get random initial values
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
v_u
=
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
4
,))
v_x0
=
rng
.
uniform
()
W
=
rng
.
uniform
()
...
...
@@ -1349,7 +1355,7 @@ class TestScan:
result
,
updates
=
aet_reduce
(
lambda
x
,
y
:
x
+
y
,
v
,
s
)
f
=
function
([
v
,
s
],
result
,
updates
=
updates
,
allow_input_downcast
=
True
)
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
v_v
=
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
5
,))
assert
abs
(
np
.
sum
(
v_v
)
-
f
(
v_v
,
0.0
))
<
1e-3
...
...
@@ -1388,7 +1394,7 @@ class TestScan:
)
# get random initial values
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
v_u
=
np
.
array
(
rng
.
uniform
(
-
0.5
,
0.5
,
size
=
(
10
,)),
dtype
=
config
.
floatX
)
v_x0
=
np
.
array
(
rng
.
uniform
(),
dtype
=
config
.
floatX
)
W
=
np
.
array
(
rng
.
uniform
(),
dtype
=
config
.
floatX
)
...
...
@@ -1401,7 +1407,7 @@ class TestScan:
assert
max_err
<=
1e-2
def
test_grad_multiple_outs
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW_in2
=
asarrayX
(
rng
.
uniform
(
-
0.1
,
0.1
,
size
=
(
2
,)))
vW
=
asarrayX
(
rng
.
uniform
(
-
0.1
,
0.1
,
size
=
(
2
,
2
)))
vWout
=
asarrayX
(
rng
.
uniform
(
-
0.1
,
0.1
,
size
=
(
2
,)))
...
...
@@ -1464,7 +1470,7 @@ class TestScan:
@pytest.mark.slow
def
test_grad_multiple_outs_taps
(
self
):
n
=
5
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW_in2
=
asarrayX
(
rng
.
uniform
(
-
0.2
,
0.2
,
size
=
(
2
,)))
vW
=
asarrayX
(
rng
.
uniform
(
-
0.2
,
0.2
,
size
=
(
2
,
2
)))
...
...
@@ -1543,7 +1549,7 @@ class TestScan:
@pytest.mark.slow
def
test_grad_multiple_outs_taps_backwards
(
self
):
n
=
5
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW_in2
=
asarrayX
(
rng
.
uniform
(
-
0.2
,
0.2
,
size
=
(
2
,)))
vW
=
asarrayX
(
rng
.
uniform
(
-
0.2
,
0.2
,
size
=
(
2
,
2
)))
vWout
=
asarrayX
(
rng
.
uniform
(
-
0.2
,
0.2
,
size
=
(
2
,)))
...
...
@@ -1603,7 +1609,7 @@ class TestScan:
assert
max_err
<=
1e-2
def
test_grad_multiple_outs_some_uncomputable
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW_in
=
asarrayX
(
rng
.
uniform
(
-
3.0
,
3.0
,
size
=
(
2
,
2
)))
v_u
=
asarrayX
(
rng
.
uniform
(
-
3.0
,
3.0
,
size
=
(
5
,
2
)))
v_u2
=
np
.
array
([
1
,
3
,
4
,
6
,
8
],
dtype
=
"int32"
)
...
...
@@ -1652,9 +1658,9 @@ class TestScan:
def
reset_rng_fn
(
fn
,
*
args
):
for
idx
,
arg
in
enumerate
(
fn
.
maker
.
expanded_inputs
):
if
arg
.
value
and
isinstance
(
arg
.
value
.
data
,
np
.
random
.
RandomState
):
if
arg
.
value
and
isinstance
(
arg
.
value
.
data
,
np
.
random
.
Generator
):
obj
=
fn
.
maker
.
expanded_inputs
[
idx
]
.
value
obj
.
data
=
np
.
random
.
RandomState
(
123
)
obj
.
data
=
np
.
random
.
default_rng
(
123
)
fn
.
maker
.
expanded_inputs
[
idx
]
.
value
=
obj
return
fn
(
*
args
)
...
...
@@ -1686,7 +1692,7 @@ class TestScan:
assert
result
==
expected_result
def
test_grad_multiple_outs_some_truncate
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW_in
=
asarrayX
(
rng
.
uniform
(
-
0.1
,
0.1
,
size
=
(
2
,
2
)))
v_u
=
asarrayX
(
rng
.
uniform
(
-
0.1
,
0.1
,
size
=
(
5
,
2
)))
v_x0
=
asarrayX
(
rng
.
uniform
(
-
0.1
,
0.1
,
size
=
(
2
,)))
...
...
@@ -1733,9 +1739,9 @@ class TestScan:
def
reset_rng_fn
(
fn
,
*
args
):
for
idx
,
arg
in
enumerate
(
fn
.
maker
.
expanded_inputs
):
if
arg
.
value
and
isinstance
(
arg
.
value
.
data
,
np
.
random
.
RandomState
):
if
arg
.
value
and
isinstance
(
arg
.
value
.
data
,
np
.
random
.
Generator
):
obj
=
fn
.
maker
.
expanded_inputs
[
idx
]
.
value
obj
.
data
=
np
.
random
.
RandomState
(
123
)
obj
.
data
=
np
.
random
.
default_rng
(
123
)
fn
.
maker
.
expanded_inputs
[
idx
]
.
value
=
obj
out
=
fn
(
*
args
)
return
out
...
...
@@ -1763,7 +1769,7 @@ class TestScan:
def
_grad_mout_helper
(
self
,
n_iters
,
mode
):
# Created on Tue Oct 07 13:28:51 2014
# @author: vaneetke
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
n_hid
=
3
n_in
=
1
n_out
=
1
...
...
@@ -1842,7 +1848,7 @@ class TestScan:
f
=
function
([
x
],
[
y
,
z
],
updates
=
updates
,
allow_input_downcast
=
True
)
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
nx
=
rng
.
uniform
(
size
=
(
10
,
10
))
ny1
,
nz1
=
f
(
nx
)
ny2
,
nz2
=
f
(
nx
)
...
...
@@ -1921,7 +1927,7 @@ class TestScan:
a
=
vector
()
init_a
=
vector
()
b
=
shared
(
np
.
random
.
rand
(
5
,
4
))
b
=
shared
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
5
,
4
)
))
def
inner_func
(
a
):
return
a
+
1
,
OrderedDict
([(
b
,
2
*
b
)])
...
...
@@ -2064,7 +2070,7 @@ class TestScan:
# some rnn with multiple outputs and multiple inputs; other
# dimension instead of scalars/vectors
def
test_reordering
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW_in2
=
asarrayX
(
rng
.
uniform
(
-
0.5
,
0.5
,
size
=
(
2
,)))
vW
=
asarrayX
(
rng
.
uniform
(
-
0.5
,
0.5
,
size
=
(
2
,
2
)))
...
...
@@ -2141,7 +2147,7 @@ class TestScan:
function
(
inputs
=
[
to_scan
,
seq
,
f1
],
outputs
=
t_grad
,
allow_input_downcast
=
True
)
def
test_save_mem
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW_in2
=
asarrayX
(
rng
.
uniform
(
-
0.5
,
0.5
,
size
=
(
2
,)))
vW
=
asarrayX
(
rng
.
uniform
(
-
0.5
,
0.5
,
size
=
(
2
,
2
)))
...
...
@@ -2255,7 +2261,7 @@ class TestScan:
allow_input_downcast
=
True
,
)
# get random initial values
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
v_u
=
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
20
,))
# compute the output in numpy
...
...
@@ -2314,7 +2320,7 @@ class TestScan:
)
# get random initial values
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
v_u
=
rng
.
uniform
(
-
5.0
,
5.0
,
size
=
(
20
,))
# compute the output in numpy
...
...
@@ -2591,7 +2597,7 @@ class TestScan:
scans
=
[
n
for
n
in
topo
if
isinstance
(
n
.
op
,
Scan
)]
assert
len
(
scans
)
==
2
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
x_val
=
rng
.
uniform
(
size
=
(
4
,))
.
astype
(
config
.
floatX
)
y_val
=
rng
.
uniform
(
size
=
(
4
,))
.
astype
(
config
.
floatX
)
# Run it so DebugMode can detect optimization problems.
...
...
@@ -2645,9 +2651,9 @@ class TestScan:
M
=
init_predictive_output
(
inputs
,
targets
,
hyp
,
x_star
,
s_star
)
X
=
np
.
random
.
random
((
10
,
4
))
Y
=
np
.
random
.
random
((
10
,
3
))
test_m
=
np
.
random
.
random
((
4
,))
X
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
10
,
4
))
Y
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
10
,
3
))
test_m
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
4
,))
test_s
=
np
.
eye
(
4
)
# Compute expected outputs (jacobian of M wrt x_star)
...
...
@@ -2737,7 +2743,9 @@ class TestScan:
mem_val
=
np
.
zeros
((
2
,),
dtype
=
"float32"
)
memory
=
shared
(
mem_val
)
W
=
shared
(
np
.
random
.
random
((
5
,
2
))
.
astype
(
"float32"
))
W
=
shared
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
5
,
2
))
.
astype
(
"float32"
)
)
def
f
(
inp
,
mem
):
i
=
aet
.
join
(
0
,
inp
,
mem
)
...
...
@@ -2751,7 +2759,7 @@ class TestScan:
f
=
function
([
x
],
outs
[
0
])
f2
=
function
([
x
],
outs
[
1
])
x_val
=
np
.
random
.
random
((
4
,
3
))
.
astype
(
"float32"
)
x_val
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
4
,
3
))
.
astype
(
"float32"
)
f_vals
=
f
(
x_val
)
memory
.
set_value
(
mem_val
)
...
...
@@ -2858,7 +2866,7 @@ class TestScan:
@pytest.mark.slow
def
test_rop2
(
self
):
seed
=
utt
.
fetch_seed
()
rng
=
np
.
random
.
RandomState
(
seed
)
rng
=
np
.
random
.
default_rng
(
seed
)
floatX
=
config
.
floatX
v_u
=
np
.
array
(
rng
.
uniform
(
size
=
(
3
,
5
))
-
0.5
,
dtype
=
floatX
)
v_W
=
np
.
array
(
rng
.
uniform
(
size
=
(
5
,
5
))
-
0.5
,
dtype
=
floatX
)
...
...
@@ -2940,7 +2948,7 @@ class TestScan:
def
test_rop
(
self
):
seed
=
utt
.
fetch_seed
()
rng
=
np
.
random
.
RandomState
(
seed
)
rng
=
np
.
random
.
default_rng
(
seed
)
floatX
=
config
.
floatX
v_u
=
np
.
array
(
rng
.
uniform
(
size
=
(
20
,
5
)),
dtype
=
floatX
)
v_W
=
np
.
array
(
rng
.
uniform
(
size
=
(
5
,
5
)),
dtype
=
floatX
)
...
...
@@ -3048,7 +3056,7 @@ class TestScan:
assert
len
(
scan_nodes
)
==
0
seed
=
utt
.
fetch_seed
()
rng
=
np
.
random
.
RandomState
(
seed
)
rng
=
np
.
random
.
default_rng
(
seed
)
floatX
=
config
.
floatX
v_h
=
np
.
array
(
rng
.
uniform
(
size
=
(
2
,)),
dtype
=
floatX
)
v_W1
=
np
.
array
(
rng
.
uniform
(
size
=
(
2
,
2
)),
dtype
=
floatX
)
...
...
@@ -3094,8 +3102,8 @@ class TestScan:
# Compare the results of the two implementations
input_values
=
[
np
.
random
.
random
((
5
,
5
))
.
astype
(
"float32"
),
np
.
random
.
random
((
5
,
5
))
.
astype
(
"float32"
),
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
5
,
5
))
.
astype
(
"float32"
),
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
5
,
5
))
.
astype
(
"float32"
),
np
.
arange
(
5
)
.
astype
(
"float32"
),
]
...
...
@@ -3585,9 +3593,13 @@ class TestScan:
# Run the function and validate the outputs
dtype
=
config
.
floatX
seq_value
=
np
.
random
.
random
((
10
,
3
))
.
astype
(
dtype
)
out_init_value
=
np
.
random
.
random
((
3
,
3
))
.
astype
(
dtype
)
non_seq_value
=
np
.
random
.
random
(
3
)
.
astype
(
dtype
)
seq_value
=
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
10
,
3
))
.
astype
(
dtype
)
)
out_init_value
=
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
3
,
3
))
.
astype
(
dtype
)
)
non_seq_value
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
(
3
)
.
astype
(
dtype
)
outputs
=
fct
(
seq_value
,
out_init_value
,
non_seq_value
)
...
...
@@ -3689,7 +3701,7 @@ class TestScan:
assert
tf
([
1.0
,
2.0
,
-
3.0
,
4.0
],
2.0
)
==
42
def
test_return_steps
(
self
):
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vW_in2
=
asarrayX
(
rng
.
uniform
(
-
0.5
,
0.5
,
size
=
(
2
,)))
vW
=
asarrayX
(
rng
.
uniform
(
-
0.5
,
0.5
,
size
=
(
2
,
2
)))
...
...
@@ -3817,8 +3829,14 @@ class TestScan:
f
=
function
(
inputs
=
[
x
,
w
],
outputs
=
get_outputs
(
x
,
w
))
# Test the function to ensure it returns valid results
x_value
=
np
.
random
.
random
((
2
,
2
,
3
))
.
astype
(
config
.
floatX
)
w_value
=
np
.
random
.
random
((
3
,
3
))
.
astype
(
config
.
floatX
)
x_value
=
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
2
,
2
,
3
))
.
astype
(
config
.
floatX
)
)
w_value
=
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
3
,
3
))
.
astype
(
config
.
floatX
)
)
expected_output
=
np
.
tile
(
x_value
[:,
0
]
.
sum
(
0
),
(
3
,
1
))
.
transpose
()
output
=
f
(
x_value
,
w_value
)
...
...
@@ -3846,7 +3864,7 @@ class TestScan:
gw
,
gx
=
grad
(
loss
,
[
w
,
xinit
])
grad_fn
=
function
([
xinit
,
w
],
[
gx
,
gw
],
allow_input_downcast
=
True
)
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
# If numbers are small, the gradients with respect to x are small
# and the numeric differentiation becomes unstable.
# To fix this issue I ensure we are sampling numbers larger in
...
...
@@ -4085,7 +4103,7 @@ class TestScan:
f
=
function
([
v
],
gv
)
# Ensure the output of the function is valid
output
=
f
(
np
.
random
.
random
(
5
))
output
=
f
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
(
5
))
utt
.
assert_allclose
(
output
,
np
.
ones
(
5
))
def
test_dot_optimization
(
self
):
...
...
@@ -4097,7 +4115,7 @@ class TestScan:
outputs_info
=
[
aet
.
zeros_like
(
A
)],
)
f
=
function
([
A
,
B
],
S
.
owner
.
inputs
[
0
][
-
1
])
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
vA
=
rng
.
uniform
(
size
=
(
5
,
5
))
.
astype
(
config
.
floatX
)
vB
=
rng
.
uniform
(
size
=
(
5
,
5
))
.
astype
(
config
.
floatX
)
utt
.
assert_allclose
(
f
(
vA
,
vB
),
np
.
dot
(
vA
.
T
,
vB
))
...
...
@@ -4159,7 +4177,7 @@ class TestScan:
mode
=
Mode
(
linker
=
"py"
),
allow_input_downcast
=
True
,
)
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
v_u
=
asarrayX
(
rng
.
uniform
(
size
=
(
5
,)))
outs
=
f
(
v_u
,
[
0
,
0
,
0
],
0
)
utt
.
assert_allclose
(
outs
[
0
],
v_u
+
1
)
...
...
@@ -4196,7 +4214,7 @@ class TestScan:
mode
=
Mode
(
linker
=
"py"
),
allow_input_downcast
=
True
,
)
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
v_w
=
asarrayX
(
rng
.
uniform
())
outs
=
f
(
v_w
,
[
0
,
0
,
0
],
0
)
utt
.
assert_allclose
(
outs
[
0
],
v_w
+
1
)
...
...
@@ -4627,7 +4645,7 @@ def test_speed_rnn():
np
.
random
.
seed
(
2523452
)
r
=
np
.
arange
(
L
*
N
)
.
astype
(
config
.
floatX
)
.
reshape
(
L
,
N
)
w
=
np
.
random
.
randn
(
N
,
N
)
.
astype
(
config
.
floatX
)
w
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
N
,
N
)
)
.
astype
(
config
.
floatX
)
def
f_py
():
for
i
in
range
(
1
,
L
):
...
...
@@ -4694,7 +4712,7 @@ def test_speed_batchrnn():
np
.
random
.
seed
(
2523452
)
r
=
np
.
arange
(
B
*
L
*
N
)
.
astype
(
config
.
floatX
)
.
reshape
(
L
,
B
,
N
)
w
=
np
.
random
.
randn
(
N
,
N
)
.
astype
(
config
.
floatX
)
w
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
N
,
N
)
)
.
astype
(
config
.
floatX
)
t0
=
time
.
time
()
for
i
in
range
(
1
,
L
):
...
...
@@ -4802,7 +4820,12 @@ def test_compute_test_value_grad_cast():
h
=
matrix
(
"h"
)
h
.
tag
.
test_value
=
np
.
array
([[
1
,
2
,
3
,
4
],
[
5
,
6
,
7
,
8
]],
dtype
=
config
.
floatX
)
w
=
shared
(
np
.
random
.
randn
(
4
,
3
)
.
astype
(
config
.
floatX
),
name
=
"w"
)
w
=
shared
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
4
,
3
))
.
astype
(
config
.
floatX
),
name
=
"w"
,
)
outputs
,
_
=
scan
(
lambda
i
,
h
,
w
:
(
dot
(
h
[
i
],
w
),
i
),
...
...
@@ -4848,7 +4871,9 @@ def test_default_value_broadcasted():
return
np
.
asarray
(
X
,
dtype
=
config
.
floatX
)
def
init_weights
(
shape
,
name
):
return
shared
(
floatx
(
np
.
random
.
randn
(
*
shape
)
*
0.1
),
name
)
return
shared
(
floatx
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
(
shape
)
*
0.1
),
name
)
X
=
matrix
(
"X"
)
in_size
=
2
...
...
@@ -4869,7 +4894,7 @@ def test_default_value_broadcasted():
gW_x
=
grad
(
cost
,
W_x
)
updates
=
[(
W_x
,
W_x
-
0.1
*
gW_x
)]
f
=
function
([
X
],
outputs
=
cost
,
updates
=
updates
)
f
(
np
.
random
.
rand
(
10
,
in_size
)
.
astype
(
X
.
dtype
))
f
(
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
random
((
10
,
in_size
)
)
.
astype
(
X
.
dtype
))
class
TestInconsistentBroadcast
:
...
...
tests/scan/test_opt.py
浏览文件 @
39c11472
...
...
@@ -24,7 +24,7 @@ class TestGaussNewton:
"""
def
setup_method
(
self
):
self
.
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
self
.
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
def
_run
(
self
,
num_features
,
num_timesteps
,
batch_size
,
mode
):
# determine shapes of inputs and targets depending on the batch size
...
...
tests/sparse/sandbox/test_sp.py
浏览文件 @
39c11472
...
...
@@ -33,9 +33,9 @@ class TestSP:
bias
=
dvector
()
kerns
=
dmatrix
()
input
=
dmatrix
()
rng
=
np
.
random
.
RandomState
(
3423489
)
filters
=
rng
.
randn
(
nkern
,
np
.
prod
(
kshp
))
biasvals
=
rng
.
randn
(
nkern
)
rng
=
np
.
random
.
default_rng
(
3423489
)
filters
=
rng
.
standard_normal
((
nkern
,
np
.
prod
(
kshp
)
))
biasvals
=
rng
.
standard_normal
((
nkern
)
)
for
mode
in
(
"FAST_COMPILE"
,
"FAST_RUN"
):
ttot
,
ntot
=
0
,
0
...
...
@@ -133,7 +133,7 @@ class TestSP:
# symbolic stuff
kerns
=
[
dmatrix
(),
dmatrix
()]
input
=
dmatrix
()
# rng = np.random.
RandomState
(3423489)
# rng = np.random.
default_rng
(3423489)
# build actual input images
img2d
=
np
.
arange
(
bsize
*
np
.
prod
(
imshp
))
.
reshape
((
bsize
,)
+
imshp
)
...
...
@@ -184,7 +184,7 @@ class TestSP:
def
test_maxpool
(
self
):
# generate flatted images
maxpoolshps
=
((
2
,
2
),
(
3
,
3
),
(
4
,
4
),
(
5
,
5
),
(
6
,
6
))
imval
=
np
.
random
.
rand
(
4
,
5
,
10
,
10
)
imval
=
np
.
random
.
rand
om
((
4
,
5
,
10
,
10
)
)
images
=
dmatrix
()
for
maxpoolshp
in
maxpoolshps
:
...
...
tests/sparse/test_basic.py
浏览文件 @
39c11472
...
...
@@ -140,8 +140,8 @@ def random_lil(shape, dtype, nnz):
huge
=
2
**
30
for
k
in
range
(
nnz
):
# set non-zeros in random locations (row x, col y)
idx
=
np
.
random
.
randint
(
1
,
huge
+
1
,
size
=
2
)
%
shape
value
=
np
.
random
.
rand
()
idx
=
np
.
random
.
default_rng
()
.
integers
(
1
,
huge
+
1
,
size
=
2
)
%
shape
value
=
np
.
random
.
rand
om
()
# if dtype *int*, value will always be zeros!
if
dtype
in
sparse
.
integer_dtypes
:
value
=
int
(
value
*
100
)
...
...
@@ -201,11 +201,11 @@ def sparse_random_inputs(
if
out_dtype
in
sparse
.
discrete_dtypes
:
if
not
gap
:
value
=
np
.
random
.
randint
(
50
,
size
=
shape
)
value
=
np
.
random
.
default_rng
()
.
integers
(
50
,
size
=
shape
)
elif
len
(
gap
)
==
2
:
value
=
np
.
random
.
randint
(
gap
[
0
],
gap
[
1
],
size
=
shape
)
value
=
np
.
random
.
default_rng
()
.
integers
(
gap
[
0
],
gap
[
1
],
size
=
shape
)
else
:
value
=
np
.
random
.
randint
(
gap
[
0
],
size
=
shape
)
value
=
np
.
random
.
default_rng
()
.
integers
(
gap
[
0
],
size
=
shape
)
else
:
if
not
gap
:
value
=
np
.
random
.
random
(
shape
)
...
...
@@ -240,7 +240,7 @@ def sparse_random_inputs(
if
explicit_zero
:
for
idx
in
range
(
n
):
assert
data
[
idx
]
.
nnz
>
1
,
"can't make a sparse matrix with explicit 0"
d_idx
=
np
.
random
.
randint
(
data
[
idx
]
.
nnz
)
d_idx
=
np
.
random
.
default_rng
()
.
integers
(
data
[
idx
]
.
nnz
)
data
[
idx
]
.
data
[
d_idx
]
=
0
# numpy 1.5.0 with scipy 0.9.0 have sp.sparse.XXX_matrix return
...
...
@@ -379,9 +379,6 @@ class TestVerifyGradSparse:
class
TestTranspose
:
def
setup_method
(
self
):
utt
.
seed_rng
()
def
test_transpose_csc
(
self
):
spe
=
sp
.
sparse
.
csc_matrix
(
sp
.
sparse
.
eye
(
5
,
3
))
a
=
as_sparse_variable
(
spe
)
...
...
@@ -491,7 +488,7 @@ class TestSparseInferShape(utt.InferShapeTester):
[
x
+
y
],
[
sp
.
sparse
.
csr_matrix
(
random_lil
((
10
,
40
),
config
.
floatX
,
3
)),
np
.
random
.
randn
(
10
,
40
)
.
astype
(
config
.
floatX
),
np
.
random
.
standard_normal
((
10
,
40
)
)
.
astype
(
config
.
floatX
),
],
(
AddSD
,
sparse
.
opt
.
AddSD_ccode
),
)
...
...
@@ -517,7 +514,7 @@ class TestSparseInferShape(utt.InferShapeTester):
[
x
*
y
],
[
sp
.
sparse
.
csr_matrix
(
random_lil
((
10
,
40
),
config
.
floatX
,
3
)),
np
.
random
.
randn
(
10
,
40
)
.
astype
(
config
.
floatX
),
np
.
random
.
standard_normal
((
10
,
40
)
)
.
astype
(
config
.
floatX
),
],
MulSD
,
excluding
=
[
"local_mul_s_d"
],
...
...
@@ -621,7 +618,7 @@ class TestSparseInferShape(utt.InferShapeTester):
self
.
_compile_and_check
(
[
x
],
[
csc_from_dense
(
x
)],
[
np
.
random
.
randn
(
10
,
40
)
.
astype
(
config
.
floatX
)],
[
np
.
random
.
standard_normal
((
10
,
40
)
)
.
astype
(
config
.
floatX
)],
csc_from_dense
.
__class__
,
)
...
...
@@ -636,8 +633,8 @@ class TestSparseInferShape(utt.InferShapeTester):
[
out
],
[
np
.
zeros
((
40
,
10
),
dtype
=
config
.
floatX
),
np
.
random
.
randn
(
12
,
10
)
.
astype
(
config
.
floatX
),
np
.
random
.
randint
(
low
=
0
,
high
=
40
,
size
=
(
12
,)),
np
.
random
.
standard_normal
((
12
,
10
)
)
.
astype
(
config
.
floatX
),
np
.
random
.
default_rng
()
.
integers
(
low
=
0
,
high
=
40
,
size
=
(
12
,)),
],
ConstructSparseFromList
,
)
...
...
@@ -670,8 +667,8 @@ class TestConstructSparseFromList:
assert
isinstance
(
g
.
owner
.
op
,
ConstructSparseFromList
)
# Test the sparse grad
valm
=
np
.
random
.
rand
(
5
,
4
)
.
astype
(
config
.
floatX
)
valv
=
np
.
random
.
randint
(
0
,
5
,
10
)
valm
=
np
.
random
.
rand
om
((
5
,
4
)
)
.
astype
(
config
.
floatX
)
valv
=
np
.
random
.
default_rng
()
.
integers
(
0
,
5
,
10
)
m
=
matrix
()
shared_v
=
aesara
.
shared
(
valv
)
...
...
@@ -884,13 +881,11 @@ class TestAddMul:
class
TestComparison
:
def
setup_method
(
self
):
utt
.
seed_rng
()
# took from tensor basic_test.py
def
_rand_ranged
(
self
,
min
,
max
,
shape
):
return
np
.
asarray
(
np
.
random
.
rand
(
*
shape
)
*
(
max
-
min
)
+
min
,
dtype
=
config
.
floatX
np
.
random
.
rand
om
(
shape
)
*
(
max
-
min
)
+
min
,
dtype
=
config
.
floatX
)
tests
=
[
...
...
@@ -1014,12 +1009,9 @@ class TestComparison:
class
TestConversion
:
def
setup_method
(
self
):
utt
.
seed_rng
()
@pytest.mark.skip
def
test_basic
(
self
):
a
=
aet
.
as_tensor_variable
(
np
.
random
.
rand
(
5
))
a
=
aet
.
as_tensor_variable
(
np
.
random
.
rand
om
((
5
)
))
s
=
csc_from_dense
(
a
)
val
=
eval_outputs
([
s
])
assert
str
(
val
.
dtype
)
==
"float64"
...
...
@@ -1027,7 +1019,7 @@ class TestConversion:
@pytest.mark.skip
def
test_basic_1
(
self
):
a
=
aet
.
as_tensor_variable
(
np
.
random
.
rand
(
5
))
a
=
aet
.
as_tensor_variable
(
np
.
random
.
rand
om
((
5
)
))
s
=
csr_from_dense
(
a
)
val
=
eval_outputs
([
s
])
assert
str
(
val
.
dtype
)
==
"float64"
...
...
@@ -1078,9 +1070,6 @@ class TestConversion:
class
TestCsmProperties
:
def
setup_method
(
self
):
utt
.
seed_rng
()
def
test_csm_properties_grad
(
self
):
sp_types
=
{
"csc"
:
sp
.
sparse
.
csc_matrix
,
"csr"
:
sp
.
sparse
.
csr_matrix
}
...
...
@@ -1123,9 +1112,6 @@ class TestCsmProperties:
class
TestCsm
:
def
setup_method
(
self
):
utt
.
seed_rng
()
def
test_csm_grad
(
self
):
sp_types
=
{
"csc"
:
sp
.
sparse
.
csc_matrix
,
"csr"
:
sp
.
sparse
.
csr_matrix
}
...
...
@@ -1223,9 +1209,6 @@ class TestCsm:
class
TestStructuredDot
:
def
setup_method
(
self
):
utt
.
seed_rng
()
def
test_structureddot_csc_grad
(
self
):
# shortcut: testing csc in float32, testing csr in float64
...
...
@@ -1233,7 +1216,7 @@ class TestStructuredDot:
# allocate a random sparse matrix
spmat
=
sp
.
sparse
.
csc_matrix
(
random_lil
((
4
,
3
),
"float32"
,
3
))
mat
=
np
.
asarray
(
np
.
random
.
randn
(
3
,
2
),
"float32"
)
mat
=
np
.
asarray
(
np
.
random
.
standard_normal
((
3
,
2
)
),
"float32"
)
verify_grad_sparse
(
structured_dot
,
[
spmat
,
mat
],
structured
=
True
)
...
...
@@ -1249,7 +1232,7 @@ class TestStructuredDot:
# allocate a random sparse matrix
spmat
=
sp
.
sparse
.
csr_matrix
(
random_lil
((
4
,
3
),
"float64"
,
3
))
mat
=
np
.
asarray
(
np
.
random
.
randn
(
3
,
2
),
"float64"
)
mat
=
np
.
asarray
(
np
.
random
.
standard_normal
((
3
,
2
)
),
"float64"
)
verify_grad_sparse
(
structured_dot
,
[
spmat
,
mat
],
structured
=
True
)
...
...
@@ -1289,7 +1272,9 @@ class TestStructuredDot:
# The lil makes an intc on my computer when sparse_dtype
# is int32.
spmat
.
dtype
=
np
.
dtype
(
sparse_dtype
)
mat
=
np
.
asarray
(
np
.
random
.
randn
(
N
,
K
)
*
9
,
dtype
=
dense_dtype
)
mat
=
np
.
asarray
(
np
.
random
.
standard_normal
((
N
,
K
))
*
9
,
dtype
=
dense_dtype
)
# print 'DTYPES', sparse_dtype, dense_dtype
# print 'sym types', a.type, b.type
# print 'dtype strings', spmat.dtype, mat.dtype
...
...
@@ -1316,9 +1301,9 @@ class TestStructuredDot:
spmat
=
sp
.
sparse
.
lil_matrix
((
4
,
6
),
dtype
=
"int64"
)
for
i
in
range
(
5
):
# set non-zeros in random locations (row x, col y)
x
=
np
.
floor
(
np
.
random
.
rand
()
*
spmat
.
shape
[
0
])
y
=
np
.
floor
(
np
.
random
.
rand
()
*
spmat
.
shape
[
1
])
spmat
[
x
,
y
]
=
np
.
random
.
rand
()
*
10
x
=
np
.
floor
(
np
.
random
.
rand
om
()
*
spmat
.
shape
[
0
])
y
=
np
.
floor
(
np
.
random
.
rand
om
()
*
spmat
.
shape
[
1
])
spmat
[
x
,
y
]
=
np
.
random
.
rand
om
()
*
10
spmat
=
sp
.
sparse
.
csc_matrix
(
spmat
)
images
=
TensorType
(
dtype
=
"float32"
,
broadcastable
=
[
False
,
False
])(
"images"
)
...
...
@@ -1392,7 +1377,7 @@ class TestStructuredDot:
(
400
,
3000
,
200
,
6000
),
]:
spmat
=
sp
.
sparse
.
csc_matrix
(
random_lil
((
M
,
N
),
sparse_dtype
,
nnz
))
mat
=
np
.
asarray
(
np
.
random
.
randn
(
N
,
K
),
dense_dtype
)
mat
=
np
.
asarray
(
np
.
random
.
standard_normal
((
N
,
K
)
),
dense_dtype
)
aesara_times
=
[]
scipy_times
=
[]
for
i
in
range
(
5
):
...
...
@@ -1440,7 +1425,7 @@ class TestStructuredDot:
(
400
,
3000
,
200
,
6000
),
]:
spmat
=
sp
.
sparse
.
csr_matrix
(
random_lil
((
M
,
N
),
sparse_dtype
,
nnz
))
mat
=
np
.
asarray
(
np
.
random
.
randn
(
N
,
K
),
dense_dtype
)
mat
=
np
.
asarray
(
np
.
random
.
standard_normal
((
N
,
K
)
),
dense_dtype
)
t0
=
time
.
time
()
aesara_result
=
f
(
spmat
,
mat
)
t1
=
time
.
time
()
...
...
@@ -1469,7 +1454,6 @@ class TestDots(utt.InferShapeTester):
super
()
.
setup_method
()
x_size
=
(
10
,
100
)
y_size
=
(
100
,
1000
)
utt
.
seed_rng
()
self
.
x_csr
=
sp
.
sparse
.
csr_matrix
(
np
.
random
.
binomial
(
1
,
0.5
,
x_size
),
dtype
=
aesara
.
config
.
floatX
...
...
@@ -1604,7 +1588,9 @@ class TestDots(utt.InferShapeTester):
f
=
aesara
.
function
(
inputs
=
[
I
,
C
],
outputs
=
y
)
i
=
np
.
asarray
([[
4
,
3
,
7
,
7
],
[
2
,
8
,
4
,
5
]],
dtype
=
intX
)
a
=
np
.
asarray
(
np
.
random
.
randint
(
0
,
100
,
(
size
,
size
)),
dtype
=
intX
)
a
=
np
.
asarray
(
np
.
random
.
default_rng
()
.
integers
(
0
,
100
,
(
size
,
size
)),
dtype
=
intX
)
f
(
i
,
a
)
def
test_csr_dense_grad
(
self
):
...
...
@@ -1614,7 +1600,7 @@ class TestDots(utt.InferShapeTester):
# allocate a random sparse matrix
spmat
=
sp
.
sparse
.
csr_matrix
(
random_lil
((
4
,
3
),
"float64"
,
3
))
mat
=
np
.
asarray
(
np
.
random
.
randn
(
2
,
4
),
"float64"
)
mat
=
np
.
asarray
(
np
.
random
.
standard_normal
((
2
,
4
)
),
"float64"
)
def
buildgraph_T
(
mat
):
return
Dot
()(
mat
,
spmat
)
...
...
@@ -1632,7 +1618,7 @@ class TestUsmm:
y_size
=
(
100
,
200
)
z_size
=
(
x_size
[
0
],
y_size
[
1
])
self
.
rng
=
np
.
random
.
RandomState
(
seed
=
utt
.
fetch_seed
())
self
.
rng
=
np
.
random
.
default_rng
(
seed
=
utt
.
fetch_seed
())
self
.
x
=
np
.
asarray
(
self
.
rng
.
binomial
(
1
,
0.5
,
x_size
),
dtype
=
aesara
.
config
.
floatX
)
...
...
@@ -2305,7 +2291,7 @@ class TestRemove0(utt.InferShapeTester):
class
TestGetItem
:
def
setup_method
(
self
):
self
.
rng
=
np
.
random
.
RandomState
(
utt
.
fetch_seed
())
self
.
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
def
test_GetItemList
(
self
):
...
...
@@ -2759,7 +2745,8 @@ class TestAddSSData(utt.InferShapeTester):
variable
=
getattr
(
aesara
.
sparse
,
format
+
"_matrix"
)
rand
=
np
.
array
(
np
.
random
.
randint
(
1
,
4
,
size
=
(
3
,
4
))
-
1
,
dtype
=
aesara
.
config
.
floatX
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
.
integers
(
1
,
4
,
size
=
(
3
,
4
))
-
1
,
dtype
=
aesara
.
config
.
floatX
,
)
constant
=
as_sparse_format
(
rand
,
format
)
...
...
@@ -2833,7 +2820,6 @@ def elemwise_checker(
else
:
self
.
gap_grad
=
gap
# Ensure the test's name is correct.
utt
.
seed_rng
()
assert
eval
(
self
.
__class__
.
__name__
)
is
self
.
__class__
def
test_op
(
self
):
...
...
@@ -3143,16 +3129,13 @@ ConjTester = elemwise_checker(sparse.conj, np.conj, grad_test=False)
class
TestMulSV
:
def
setup_method
(
self
):
utt
.
seed_rng
()
def
test_mul_s_v_grad
(
self
):
sp_types
=
{
"csc"
:
sp
.
sparse
.
csc_matrix
,
"csr"
:
sp
.
sparse
.
csr_matrix
}
for
format
in
[
"csr"
,
"csc"
]:
for
dtype
in
[
"float32"
,
"float64"
]:
spmat
=
sp_types
[
format
](
random_lil
((
4
,
3
),
dtype
,
3
))
mat
=
np
.
asarray
(
np
.
random
.
rand
(
3
),
dtype
=
dtype
)
mat
=
np
.
asarray
(
np
.
random
.
rand
om
((
3
)
),
dtype
=
dtype
)
verify_grad_sparse
(
mul_s_v
,
[
spmat
,
mat
],
structured
=
True
)
...
...
@@ -3166,7 +3149,7 @@ class TestMulSV:
f
=
aesara
.
function
([
x
,
y
],
mul_s_v
(
x
,
y
))
spmat
=
sp_types
[
format
](
random_lil
((
4
,
3
),
dtype
,
3
))
mat
=
np
.
asarray
(
np
.
random
.
rand
(
3
),
dtype
=
dtype
)
mat
=
np
.
asarray
(
np
.
random
.
rand
om
((
3
)
),
dtype
=
dtype
)
out
=
f
(
spmat
,
mat
)
...
...
@@ -3174,16 +3157,13 @@ class TestMulSV:
class
TestStructuredAddSV
:
def
setup_method
(
self
):
utt
.
seed_rng
()
def
test_structured_add_s_v_grad
(
self
):
sp_types
=
{
"csc"
:
sp
.
sparse
.
csc_matrix
,
"csr"
:
sp
.
sparse
.
csr_matrix
}
for
format
in
[
"csr"
,
"csc"
]:
for
dtype
in
[
"float32"
,
"float64"
]:
spmat
=
sp_types
[
format
](
random_lil
((
4
,
3
),
dtype
,
3
))
mat
=
np
.
asarray
(
np
.
random
.
rand
(
3
),
dtype
=
dtype
)
mat
=
np
.
asarray
(
np
.
random
.
rand
om
((
3
)
),
dtype
=
dtype
)
verify_grad_sparse
(
structured_add_s_v
,
[
spmat
,
mat
],
structured
=
True
)
...
...
@@ -3199,7 +3179,7 @@ class TestStructuredAddSV:
spmat
=
sp_types
[
format
](
random_lil
((
4
,
3
),
dtype
,
3
))
spones
=
spmat
.
copy
()
spones
.
data
=
np
.
ones_like
(
spones
.
data
)
mat
=
np
.
asarray
(
np
.
random
.
rand
(
3
),
dtype
=
dtype
)
mat
=
np
.
asarray
(
np
.
random
.
rand
om
((
3
)
),
dtype
=
dtype
)
out
=
f
(
spmat
,
mat
)
...
...
@@ -3285,9 +3265,18 @@ class TestSamplingDot(utt.InferShapeTester):
x
.
append
(
sparse
.
csr_matrix
())
# unsquare shape
a
=
[
np
.
array
(
np
.
random
.
randint
(
1
,
6
,
size
=
(
4
,
3
))
-
1
,
dtype
=
aesara
.
config
.
floatX
),
np
.
array
(
np
.
random
.
randint
(
1
,
6
,
size
=
(
5
,
3
))
-
1
,
dtype
=
aesara
.
config
.
floatX
),
np
.
array
(
np
.
random
.
randint
(
1
,
3
,
size
=
(
4
,
5
))
-
1
,
dtype
=
aesara
.
config
.
floatX
),
np
.
array
(
np
.
random
.
default_rng
()
.
integers
(
1
,
6
,
size
=
(
4
,
3
))
-
1
,
dtype
=
aesara
.
config
.
floatX
,
),
np
.
array
(
np
.
random
.
default_rng
()
.
integers
(
1
,
6
,
size
=
(
5
,
3
))
-
1
,
dtype
=
aesara
.
config
.
floatX
,
),
np
.
array
(
np
.
random
.
default_rng
()
.
integers
(
1
,
3
,
size
=
(
4
,
5
))
-
1
,
dtype
=
aesara
.
config
.
floatX
,
),
]
a
[
2
]
=
sp
.
sparse
.
csr_matrix
(
a
[
2
])
...
...
tests/tensor/test_sort.py
浏览文件 @
39c11472
...
...
@@ -34,9 +34,10 @@ _all_dtypes = integer_dtypes + float_dtypes
def
gen_unique_vector
(
size
,
dtype
):
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
# generate a randomized vector with unique elements
retval
=
np
.
arange
(
size
)
*
3.0
+
np
.
random
.
uniform
(
-
1.0
,
1.0
)
return
(
retval
[
np
.
random
.
permutation
(
size
)]
-
size
*
1.5
)
.
astype
(
dtype
)
retval
=
np
.
arange
(
size
)
*
3.0
+
rng
.
uniform
(
-
1.0
,
1.0
)
return
(
retval
[
rng
.
permutation
(
size
)]
-
size
*
1.5
)
.
astype
(
dtype
)
class
TestSort
:
...
...
@@ -97,81 +98,85 @@ class TestSort:
utt
.
assert_allclose
(
gv
,
gt
)
def
test_grad_vector
(
self
):
data
=
np
.
random
.
random
((
10
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
10
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
sort
,
[
data
])
def
test_grad_none_axis
(
self
):
data
=
np
.
random
.
random
((
10
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
10
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
None
),
[
data
])
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
0
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
None
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
None
),
[
data
])
def
test_grad_negative_axis_2d
(
self
):
data
=
np
.
random
.
random
((
2
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
-
1
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
-
2
),
[
data
])
def
test_grad_negative_axis_3d
(
self
):
data
=
np
.
random
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
-
1
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
-
2
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
-
3
),
[
data
])
def
test_grad_negative_axis_4d
(
self
):
data
=
np
.
random
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
-
1
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
-
2
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
-
3
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
-
4
),
[
data
])
def
test_grad_nonnegative_axis_2d
(
self
):
data
=
np
.
random
.
random
((
2
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
0
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
1
),
[
data
])
def
test_grad_nonnegative_axis_3d
(
self
):
data
=
np
.
random
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
0
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
1
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
2
),
[
data
])
def
test_grad_nonnegative_axis_4d
(
self
):
data
=
np
.
random
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
0
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
1
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
2
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
self
.
rng
.
random
((
2
,
3
,
4
,
2
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
sort
(
x
,
3
),
[
data
])
class
TestSortInferShape
(
utt
.
InferShapeTester
):
def
setup_method
(
self
):
self
.
rng
=
np
.
random
.
default_rng
(
seed
=
utt
.
fetch_seed
())
super
()
.
setup_method
()
def
test_sort
(
self
):
x
=
matrix
()
self
.
_compile_and_check
(
[
x
],
[
sort
(
x
)],
[
np
.
random
.
randn
(
10
,
40
)
.
astype
(
aesara
.
config
.
floatX
)],
[
self
.
rng
.
standard_normal
(
size
=
(
10
,
40
)
)
.
astype
(
aesara
.
config
.
floatX
)],
SortOp
,
)
self
.
_compile_and_check
(
[
x
],
[
sort
(
x
,
axis
=
None
)],
[
np
.
random
.
randn
(
10
,
40
)
.
astype
(
aesara
.
config
.
floatX
)],
[
self
.
rng
.
standard_normal
(
size
=
(
10
,
40
)
)
.
astype
(
aesara
.
config
.
floatX
)],
SortOp
,
)
...
...
@@ -238,14 +243,15 @@ def test_argsort():
def
test_argsort_grad
():
rng
=
np
.
random
.
default_rng
(
seed
=
utt
.
fetch_seed
())
# Testing grad of argsort
data
=
np
.
random
.
random
((
2
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
rng
.
random
((
2
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
argsort
(
x
,
axis
=-
1
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
,
4
,
5
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
rng
.
random
((
2
,
3
,
4
,
5
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
argsort
(
x
,
axis
=-
3
),
[
data
])
data
=
np
.
random
.
random
((
2
,
3
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
data
=
rng
.
random
((
2
,
3
,
3
))
.
astype
(
aesara
.
config
.
floatX
)
utt
.
verify_grad
(
lambda
x
:
argsort
(
x
,
axis
=
2
),
[
data
])
...
...
@@ -434,10 +440,9 @@ class TestTopK:
assert
any
(
[
isinstance
(
n
.
op
,
self
.
op_class
)
for
n
in
fn
.
maker
.
fgraph
.
apply_nodes
]
)
xval
=
np
.
repeat
(
np
.
random
.
uniform
(
-
100.0
,
100.0
,
size
=
size
//
2
)
.
astype
(
dtype
),
2
)
xval
=
xval
[
np
.
random
.
permutation
(
size
)]
rng
=
np
.
random
.
default_rng
(
utt
.
fetch_seed
())
xval
=
np
.
repeat
(
rng
.
uniform
(
-
100.0
,
100.0
,
size
=
size
//
2
)
.
astype
(
dtype
),
2
)
xval
=
xval
[
rng
.
permutation
(
size
)]
yval
=
fn
(
xval
)
idx
=
slice
(
-
k
,
None
)
if
k
>
0
else
slice
(
-
k
)
goal
=
np
.
argsort
(
xval
)[
idx
]
.
astype
(
"int32"
)
...
...
tests/typed_list/test_basic.py
浏览文件 @
39c11472
...
...
@@ -27,7 +27,6 @@ from aesara.typed_list.basic import (
make_list
,
)
from
aesara.typed_list.type
import
TypedListType
from
tests
import
unittest_tools
as
utt
def
rand_ranged_matrix
(
minimum
,
maximum
,
shape
):
...
...
@@ -55,9 +54,6 @@ def random_lil(shape, dtype, nnz):
class
TestGetItem
:
def
setup_method
(
self
):
utt
.
seed_rng
()
def
test_sanity_check_slice
(
self
):
mySymbolicMatricesList
=
TypedListType
(
...
...
tests/typed_list/test_opt.py
浏览文件 @
39c11472
...
...
@@ -7,7 +7,7 @@ from aesara.compile.io import In
from
aesara.tensor.type
import
TensorType
,
matrix
,
scalar
from
aesara.typed_list.basic
import
Append
,
Extend
,
Insert
,
Remove
,
Reverse
from
aesara.typed_list.type
import
TypedListType
from
tests.tensor.utils
import
rand_ranged
from
tests.tensor.utils
import
rand
om
_ranged
class
TestInplace
:
...
...
@@ -26,9 +26,9 @@ class TestInplace:
)
assert
f
.
maker
.
fgraph
.
toposort
()[
0
]
.
op
.
inplace
x
=
rand_ranged
(
-
1000
,
1000
,
[
100
,
101
])
x
=
rand
om
_ranged
(
-
1000
,
1000
,
[
100
,
101
])
y
=
rand_ranged
(
-
1000
,
1000
,
[
100
,
101
])
y
=
rand
om
_ranged
(
-
1000
,
1000
,
[
100
,
101
])
assert
np
.
array_equal
(
f
([
x
,
y
]),
[
y
,
x
])
...
...
@@ -50,9 +50,9 @@ class TestInplace:
)
assert
f
.
maker
.
fgraph
.
toposort
()[
0
]
.
op
.
inplace
x
=
rand_ranged
(
-
1000
,
1000
,
[
100
,
101
])
x
=
rand
om
_ranged
(
-
1000
,
1000
,
[
100
,
101
])
y
=
rand_ranged
(
-
1000
,
1000
,
[
100
,
101
])
y
=
rand
om
_ranged
(
-
1000
,
1000
,
[
100
,
101
])
assert
np
.
array_equal
(
f
([
x
],
y
),
[
x
,
y
])
...
...
@@ -77,9 +77,9 @@ class TestInplace:
)
assert
f
.
maker
.
fgraph
.
toposort
()[
0
]
.
op
.
inplace
x
=
rand_ranged
(
-
1000
,
1000
,
[
100
,
101
])
x
=
rand
om
_ranged
(
-
1000
,
1000
,
[
100
,
101
])
y
=
rand_ranged
(
-
1000
,
1000
,
[
100
,
101
])
y
=
rand
om
_ranged
(
-
1000
,
1000
,
[
100
,
101
])
assert
np
.
array_equal
(
f
([
x
],
[
y
]),
[
x
,
y
])
...
...
@@ -105,9 +105,9 @@ class TestInplace:
)
assert
f
.
maker
.
fgraph
.
toposort
()[
0
]
.
op
.
inplace
x
=
rand_ranged
(
-
1000
,
1000
,
[
100
,
101
])
x
=
rand
om
_ranged
(
-
1000
,
1000
,
[
100
,
101
])
y
=
rand_ranged
(
-
1000
,
1000
,
[
100
,
101
])
y
=
rand
om
_ranged
(
-
1000
,
1000
,
[
100
,
101
])
assert
np
.
array_equal
(
f
([
x
],
np
.
asarray
(
1
,
dtype
=
"int64"
),
y
),
[
x
,
y
])
...
...
@@ -129,9 +129,9 @@ class TestInplace:
)
assert
f
.
maker
.
fgraph
.
toposort
()[
0
]
.
op
.
inplace
x
=
rand_ranged
(
-
1000
,
1000
,
[
100
,
101
])
x
=
rand
om
_ranged
(
-
1000
,
1000
,
[
100
,
101
])
y
=
rand_ranged
(
-
1000
,
1000
,
[
100
,
101
])
y
=
rand
om
_ranged
(
-
1000
,
1000
,
[
100
,
101
])
assert
np
.
array_equal
(
f
([
x
,
y
],
y
),
[
x
])
...
...
tests/typed_list/test_type.py
浏览文件 @
39c11472
...
...
@@ -5,14 +5,10 @@ import aesara
from
aesara.tensor.type
import
TensorType
from
aesara.typed_list.basic
import
TypedListVariable
from
aesara.typed_list.type
import
TypedListType
from
tests
import
unittest_tools
as
utt
from
tests.tensor.utils
import
rand_ranged
from
tests.tensor.utils
import
random_ranged
class
TestTypedListType
:
def
setup_method
(
self
):
utt
.
seed_rng
()
def
test_wrong_input_on_creation
(
self
):
# Typed list type should raises an
# error if the argument passed for
...
...
@@ -63,7 +59,7 @@ class TestTypedListType:
myType
=
TypedListType
(
TensorType
(
aesara
.
config
.
floatX
,
(
False
,
False
)))
x
=
rand_ranged
(
-
1000
,
1000
,
[
100
,
100
])
x
=
rand
om
_ranged
(
-
1000
,
1000
,
[
100
,
100
])
assert
np
.
array_equal
(
myType
.
filter
([
x
]),
[
x
])
...
...
@@ -81,7 +77,7 @@ class TestTypedListType:
def
test_load_alot
(
self
):
myType
=
TypedListType
(
TensorType
(
aesara
.
config
.
floatX
,
(
False
,
False
)))
x
=
rand_ranged
(
-
1000
,
1000
,
[
10
,
10
])
x
=
rand
om
_ranged
(
-
1000
,
1000
,
[
10
,
10
])
testList
=
[]
for
i
in
range
(
10000
):
testList
.
append
(
x
)
...
...
@@ -95,7 +91,7 @@ class TestTypedListType:
myType
=
TypedListType
(
myNestedType
)
x
=
rand_ranged
(
-
1000
,
1000
,
[
100
,
100
])
x
=
rand
om
_ranged
(
-
1000
,
1000
,
[
100
,
100
])
assert
np
.
array_equal
(
myType
.
filter
([[
x
]]),
[[
x
]])
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论