Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
f970c385
提交
f970c385
authored
11月 24, 2025
作者:
ricardoV94
提交者:
Ricardo Vieira
12月 15, 2025
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Specify mode for tests that are C-specific
上级
23d6b3cd
隐藏空白字符变更
内嵌
并排
正在显示
14 个修改的文件
包含
57 行增加
和
76 行删除
+57
-76
test_types.py
tests/compile/function/test_types.py
+2
-2
test_profiling.py
tests/compile/test_profiling.py
+1
-6
test_cmodule.py
tests/link/c/test_cmodule.py
+5
-11
test_op.py
tests/link/c/test_op.py
+1
-3
test_params_type.py
tests/link/c/test_params_type.py
+2
-2
test_type.py
tests/link/c/test_type.py
+3
-5
test_vm.py
tests/link/test_vm.py
+3
-4
test_basic.py
tests/scan/test_basic.py
+1
-1
test_rewriting.py
tests/scan/test_rewriting.py
+3
-3
test_blas.py
tests/tensor/test_blas.py
+25
-28
test_blas_c.py
tests/tensor/test_blas_c.py
+4
-6
test_math.py
tests/tensor/test_math.py
+1
-1
test_sharedvar.py
tests/tensor/test_sharedvar.py
+3
-0
test_printing.py
tests/test_printing.py
+3
-4
没有找到文件。
tests/compile/function/test_types.py
浏览文件 @
f970c385
...
...
@@ -624,7 +624,7 @@ class TestFunction:
def
test_constant_output
(
self
):
# Test that if the output is a constant, we respect the pytensor memory interface
f
=
function
([],
pt
.
constant
([
4
]))
f
=
function
([],
pt
.
constant
([
4
])
,
mode
=
"CVM"
)
# print f.maker.fgraph.toposort()
out
=
f
()
assert
(
out
==
4
)
.
all
()
...
...
@@ -635,7 +635,7 @@ class TestFunction:
assert
(
out2
==
4
)
.
all
()
# Test that if the output is a constant and borrow, we respect the pytensor memory interface
f
=
function
([],
Out
(
pt
.
constant
([
4
]),
borrow
=
True
))
f
=
function
([],
Out
(
pt
.
constant
([
4
]),
borrow
=
True
)
,
mode
=
"CVM"
)
# print f.maker.fgraph.toposort()
out
=
f
()
assert
(
out
==
4
)
.
all
()
...
...
tests/compile/test_profiling.py
浏览文件 @
f970c385
...
...
@@ -33,12 +33,7 @@ class TestProfiling:
p
=
ProfileStats
(
False
,
gpu_checks
=
False
)
if
config
.
mode
in
(
"DebugMode"
,
"DEBUG_MODE"
,
"FAST_COMPILE"
):
m
=
"FAST_RUN"
else
:
m
=
None
f
=
function
(
x
,
z
,
profile
=
p
,
name
=
"test_profiling"
,
mode
=
m
)
f
=
function
(
x
,
z
,
profile
=
p
,
name
=
"test_profiling"
,
mode
=
"CVM"
)
inp
=
[
np
.
arange
(
1024
,
dtype
=
"float32"
)
+
1
for
i
in
range
(
len
(
x
))]
f
(
*
inp
)
...
...
tests/link/c/test_cmodule.py
浏览文件 @
f970c385
...
...
@@ -92,21 +92,15 @@ def test_inter_process_cache():
"""
x
,
y
=
dvectors
(
"xy"
)
f
=
function
([
x
,
y
],
[
MyOp
()(
x
),
MyOp
()(
y
)])
f
=
function
([
x
,
y
],
[
MyOp
()(
x
),
MyOp
()(
y
)]
,
mode
=
"CVM"
)
f
(
np
.
arange
(
60
),
np
.
arange
(
60
))
if
config
.
mode
==
"FAST_COMPILE"
or
config
.
cxx
==
""
:
assert
MyOp
.
nb_called
==
0
else
:
assert
MyOp
.
nb_called
==
1
assert
MyOp
.
nb_called
==
1
# What if we compile a new function with new variables?
x
,
y
=
dvectors
(
"xy"
)
f
=
function
([
x
,
y
],
[
MyOp
()(
x
),
MyOp
()(
y
)])
f
=
function
([
x
,
y
],
[
MyOp
()(
x
),
MyOp
()(
y
)]
,
mode
=
"CVM"
)
f
(
np
.
arange
(
60
),
np
.
arange
(
60
))
if
config
.
mode
==
"FAST_COMPILE"
or
config
.
cxx
==
""
:
assert
MyOp
.
nb_called
==
0
else
:
assert
MyOp
.
nb_called
==
1
assert
MyOp
.
nb_called
==
1
@pytest.mark.filterwarnings
(
"error"
)
...
...
@@ -401,7 +395,7 @@ def _f_build_cache_race_condition(factor):
# optimization passes, so we need these config changes to prevent the
# exceptions from being caught
a
=
pt
.
vector
()
f
=
pytensor
.
function
([
a
],
factor
*
a
)
f
=
pytensor
.
function
([
a
],
factor
*
a
,
mode
=
"CVM"
)
return
f
(
np
.
array
([
1
],
dtype
=
config
.
floatX
))
...
...
tests/link/c/test_op.py
浏览文件 @
f970c385
...
...
@@ -98,9 +98,7 @@ class TestCOp:
def
test_op_struct
(
self
):
sop
=
StructOp
()
c
=
sop
(
pytensor
.
tensor
.
constant
(
0
))
mode
=
None
if
config
.
mode
==
"FAST_COMPILE"
:
mode
=
"FAST_RUN"
mode
=
"CVM"
f
=
pytensor
.
function
([],
c
,
mode
=
mode
)
rval
=
f
()
assert
rval
==
0
...
...
tests/link/c/test_params_type.py
浏览文件 @
f970c385
...
...
@@ -339,8 +339,8 @@ class TestParamsType:
x
=
matrix
(
dtype
=
"float64"
)
y1
=
QuadraticOpFunc
(
a
,
b
,
c
)(
x
)
y2
=
QuadraticCOpFunc
(
a
,
b
,
c
)(
x
)
f1
=
pytensor
.
function
([
x
],
y1
)
f2
=
pytensor
.
function
([
x
],
y2
)
f1
=
pytensor
.
function
([
x
],
y1
,
mode
=
"CVM"
)
f2
=
pytensor
.
function
([
x
],
y2
,
mode
=
"CVM"
)
shape
=
(
100
,
100
)
vx
=
(
np
.
random
.
normal
(
size
=
shape
[
0
]
*
shape
[
1
])
.
astype
(
"float64"
)
.
reshape
(
*
shape
)
...
...
tests/link/c/test_type.py
浏览文件 @
f970c385
...
...
@@ -73,9 +73,7 @@ def test_cdata():
i
=
TensorType
(
"float32"
,
shape
=
(
None
,))()
c
=
ProdOp
()(
i
)
i2
=
GetOp
()(
c
)
mode
=
None
if
pytensor
.
config
.
mode
==
"FAST_COMPILE"
:
mode
=
"FAST_RUN"
mode
=
"CVM"
# This should be a passthrough function for vectors
f
=
pytensor
.
function
([
i
],
i2
,
mode
=
mode
)
...
...
@@ -266,7 +264,7 @@ class TestEnumTypes:
c_sub
=
MyOpEnumList
(
"-"
)(
a
,
b
)
c_multiply
=
MyOpEnumList
(
"*"
)(
a
,
b
)
c_divide
=
MyOpEnumList
(
"/"
)(
a
,
b
)
f
=
pytensor
.
function
([
a
,
b
],
[
c_add
,
c_sub
,
c_multiply
,
c_divide
])
f
=
pytensor
.
function
([
a
,
b
],
[
c_add
,
c_sub
,
c_multiply
,
c_divide
]
,
mode
=
"CVM"
)
va
=
12
vb
=
15
ref
=
[
va
+
vb
,
va
-
vb
,
va
*
vb
,
va
//
vb
]
...
...
@@ -281,7 +279,7 @@ class TestEnumTypes:
million
=
MyOpCEnumType
(
"million"
)()
billion
=
MyOpCEnumType
(
"billion"
)()
two_billions
=
MyOpCEnumType
(
"two_billions"
)()
f
=
pytensor
.
function
([],
[
million
,
billion
,
two_billions
])
f
=
pytensor
.
function
([],
[
million
,
billion
,
two_billions
]
,
mode
=
"CVM"
)
val_million
,
val_billion
,
val_two_billions
=
f
()
assert
val_million
==
1000000
assert
val_billion
==
val_million
*
1000
...
...
tests/link/test_vm.py
浏览文件 @
f970c385
...
...
@@ -13,7 +13,6 @@ from pytensor.graph.fg import FunctionGraph
from
pytensor.graph.op
import
Op
from
pytensor.ifelse
import
ifelse
from
pytensor.link.c.basic
import
OpWiseCLinker
from
pytensor.link.c.exceptions
import
MissingGXX
from
pytensor.link.utils
import
map_storage
from
pytensor.link.vm
import
VM
,
Loop
,
Stack
,
VMLinker
from
pytensor.tensor.math
import
cosh
,
tanh
...
...
@@ -388,10 +387,10 @@ def test_VMLinker_make_vm_no_cvm():
with
config
.
change_flags
(
cxx
=
""
):
# Make sure that GXX isn't present
with
pytest
.
raises
(
MissingGXX
):
import
pytensor.link.c.cvm
#
with pytest.raises(MissingGXX):
import
pytensor.link.c.cvm
reload
(
pytensor
.
link
.
c
.
cvm
)
reload
(
pytensor
.
link
.
c
.
cvm
)
# Make sure that `cvm` module is missing
with
patch
.
dict
(
"sys.modules"
,
{
"pytensor.link.c.cvm"
:
None
}):
...
...
tests/scan/test_basic.py
浏览文件 @
f970c385
...
...
@@ -1112,7 +1112,7 @@ class TestScan:
final_result
=
result
[
-
1
]
f
=
function
(
inputs
=
[
A
,
k
],
outputs
=
final_result
)
f
=
function
(
inputs
=
[
A
,
k
],
outputs
=
final_result
,
mode
=
"CVM"
)
f
(
np
.
asarray
([
2
,
3
,
0.1
,
0
,
1
],
dtype
=
config
.
floatX
),
4
)
# There should be 3 outputs greater than 10: prior_result[0] at step 3,
...
...
tests/scan/test_rewriting.py
浏览文件 @
f970c385
...
...
@@ -6,7 +6,7 @@ import pytensor.tensor as pt
from
pytensor
import
function
,
scan
,
shared
from
pytensor.compile.builders
import
OpFromGraph
from
pytensor.compile.io
import
In
from
pytensor.compile.mode
import
get_default_mode
from
pytensor.compile.mode
import
get_default_mode
,
get_mode
from
pytensor.configdefaults
import
config
from
pytensor.gradient
import
grad
,
jacobian
from
pytensor.graph.basic
import
Constant
,
equal_computations
...
...
@@ -1796,7 +1796,7 @@ def test_inner_replace_dot():
W
=
matrix
(
"W"
)
h
=
matrix
(
"h"
)
mode
=
get_
default_mode
(
)
.
including
(
"scan"
)
# .excluding("BlasOpt")
mode
=
get_
mode
(
"CVM"
)
.
including
(
"scan"
)
# .excluding("BlasOpt")
o
=
scan
(
lambda
hi
,
him1
,
W
:
(
hi
,
dot
(
hi
+
him1
,
W
)),
...
...
@@ -1922,7 +1922,7 @@ def test_opt_order():
A
=
matrix
(
"A"
)
z
=
scan
(
dot
,
sequences
=
[],
non_sequences
=
[
x
,
A
],
n_steps
=
2
,
return_updates
=
False
)
f
=
function
([
x
,
A
],
z
,
mode
=
"
FAST_RUN
"
)
f
=
function
([
x
,
A
],
z
,
mode
=
"
CVM
"
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
any
(
isinstance
(
node
.
op
,
Dot22
)
for
node
in
topo
)
...
...
tests/tensor/test_blas.py
浏览文件 @
f970c385
...
...
@@ -11,7 +11,7 @@ import pytensor.scalar as ps
import
pytensor.tensor
as
pt
from
pytensor.compile.function
import
function
from
pytensor.compile.io
import
In
from
pytensor.compile.mode
import
Mode
from
pytensor.compile.mode
import
Mode
,
get_mode
from
pytensor.compile.sharedvalue
import
shared
from
pytensor.configdefaults
import
config
from
pytensor.gradient
import
grad
...
...
@@ -71,15 +71,7 @@ from tests import unittest_tools as utt
from
tests.tensor.utils
import
inplace_func
,
makeTester
,
random
if
config
.
mode
==
"FAST_COMPILE"
:
mode_not_fast_compile
=
"FAST_RUN"
else
:
mode_not_fast_compile
=
config
.
mode
mode_blas_opt
=
pytensor
.
compile
.
get_default_mode
()
.
including
(
"BlasOpt"
,
"specialize"
,
"InplaceBlasOpt"
)
mode_blas_opt
=
mode_blas_opt
.
excluding
(
"c_blas"
)
mode_blas_opt
=
get_mode
(
"CVM"
)
.
excluding
(
"c_blas"
)
def
test_dot_eq
():
...
...
@@ -214,7 +206,7 @@ class TestGemm:
f
=
function
(
[
a
,
b
],
updates
=
[(
s
,
lr1
*
dot
(
a
,
b
)
+
l2_reg
*
lr2
*
s
)],
mode
=
mode_not_fast_compile
,
mode
=
"CVM"
,
)
.
maker
.
fgraph
.
toposort
()
# [Gemm{inplace}(<TensorType(float64, matrix)>, 0.01,
# <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
...
...
@@ -226,7 +218,7 @@ class TestGemm:
f
=
function
(
[
a
,
b
],
updates
=
[(
s
,
lr1
*
(
dot
(
a
,
b
)
-
l2_reg
*
s
))],
mode
=
mode_not_fast_compile
,
mode
=
"CVM"
,
)
.
maker
.
fgraph
.
toposort
()
# [Gemm{inplace}(<TensorType(float64, matrix)>, 0.01,
# <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
...
...
@@ -238,7 +230,7 @@ class TestGemm:
f
=
function
(
[
a
,
b
],
updates
=
[(
s
,
s
-
lr1
*
(
s
*
0.0002
+
dot
(
a
,
b
)))],
mode
=
mode_not_fast_compile
,
mode
=
"CVM"
,
)
.
maker
.
fgraph
.
toposort
()
# [Gemm{inplace}(<TensorType(float64, matrix)>, -0.01,
# <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
...
...
@@ -448,7 +440,11 @@ class TestGemmNoFlags:
B1
=
self
.
get_variable
(
B
,
transpose_B
,
slice_B
)
C1
=
self
.
get_variable
(
C
,
transpose_C
,
slice_C
)
return
function
([
alpha
,
A
,
B
,
beta
,
C
],
self
.
gemm
(
C1
,
alpha
,
A1
,
B1
,
beta
))
return
function
(
[
alpha
,
A
,
B
,
beta
,
C
],
self
.
gemm
(
C1
,
alpha
,
A1
,
B1
,
beta
),
mode
=
mode_blas_opt
,
)
def
generate_value
(
self
,
dtype
,
width
,
height
,
to_transpose
,
to_slice
,
rng
):
if
to_slice
:
...
...
@@ -583,7 +579,7 @@ def just_gemm(i, o, ishapes=None, max_graphlen=0, expected_nb_gemm=1):
f
=
inplace_func
(
[
In
(
ii
,
mutable
=
True
,
allow_downcast
=
True
)
for
ii
in
i
],
o
,
mode
=
"FAST_RUN"
,
mode
=
mode_blas_opt
,
on_unused_input
=
"ignore"
,
)
nb_gemm
=
0
...
...
@@ -680,7 +676,7 @@ def test_gemm_opt_double_gemm():
f
=
inplace_func
(
[
In
(
ii
,
mutable
=
True
)
for
ii
in
i
],
o
,
mode
=
"FAST_RUN"
,
mode
=
mode_blas_opt
,
on_unused_input
=
"ignore"
,
)
for
node
in
f
.
maker
.
fgraph
.
apply_nodes
:
...
...
@@ -818,10 +814,10 @@ def test_gemm_opt_vector_stuff():
X
,
Y
,
a
=
matrix
(),
matrix
(),
scalar
()
u
,
v
=
vector
(),
vector
()
f
=
inplace_func
([
a
,
u
,
v
],
a
+
dot
(
u
,
v
),
mode
=
"FAST_RUN"
)
f
=
inplace_func
([
a
,
u
,
v
],
a
+
dot
(
u
,
v
),
mode
=
mode_blas_opt
)
assert
gemm_inplace
not
in
[
n
.
op
for
n
in
f
.
maker
.
fgraph
.
apply_nodes
]
f
=
inplace_func
([
a
,
u
,
X
,
Y
],
a
*
u
+
dot
(
X
,
Y
),
mode
=
"FAST_RUN"
)
f
=
inplace_func
([
a
,
u
,
X
,
Y
],
a
*
u
+
dot
(
X
,
Y
),
mode
=
mode_blas_opt
)
assert
gemm_inplace
not
in
[
n
.
op
for
n
in
f
.
maker
.
fgraph
.
apply_nodes
]
...
...
@@ -886,7 +882,7 @@ def test_inplace0():
)
R
,
S
,
c
=
matrix
(
"R"
),
matrix
(
"S"
),
scalar
(
"c"
)
f
=
inplace_func
([
Z
,
b
,
R
,
S
],
[
Z
*
(
Z
+
b
*
dot
(
R
,
S
)
.
T
)],
mode
=
"FAST_RUN"
)
f
=
inplace_func
([
Z
,
b
,
R
,
S
],
[
Z
*
(
Z
+
b
*
dot
(
R
,
S
)
.
T
)],
mode
=
mode_blas_opt
)
assert
gemm_inplace
not
in
[
n
.
op
for
n
in
f
.
maker
.
fgraph
.
apply_nodes
]
assert
gemm_no_inplace
in
[
n
.
op
for
n
in
f
.
maker
.
fgraph
.
apply_nodes
]
...
...
@@ -894,7 +890,7 @@ def test_inplace0():
f
=
inplace_func
(
[
X
,
Y
,
Z
,
a
,
b
,
R
,
S
,
c
],
[
Z
*
(
c
*
Z
+
a
*
dot
(
X
,
Y
)
+
b
*
dot
(
R
,
S
)
.
T
)],
mode
=
"FAST_RUN"
,
mode
=
mode_blas_opt
,
)
assert
gemm_inplace
in
[
n
.
op
for
n
in
f
.
maker
.
fgraph
.
apply_nodes
]
...
...
@@ -902,7 +898,7 @@ def test_inplace0():
def
test_inplace1
():
X
,
Y
,
Z
,
_a
,
_b
=
XYZab
()
# with > 2 terms in the overall addition
f
=
inplace_func
([
X
,
Y
,
Z
],
[
Z
+
Z
+
dot
(
X
,
Y
)],
mode
=
"FAST_RUN"
)
f
=
inplace_func
([
X
,
Y
,
Z
],
[
Z
+
Z
+
dot
(
X
,
Y
)],
mode
=
mode_blas_opt
)
# pytensor.printing.debugprint(f)
# it doesn't work inplace because we didn't mark Z as mutable input
assert
[
n
.
op
for
n
in
f
.
maker
.
fgraph
.
apply_nodes
]
==
[
gemm_no_inplace
]
...
...
@@ -1119,7 +1115,7 @@ def test_dot22scalar_cast():
def
test_local_dot22_to_dot22scalar
():
# This test that the bug in gh-1507 is really fixed
A
=
dmatrix
()
mode
=
pytensor
.
compile
.
mode
.
get_default_mode
(
)
mode
=
get_mode
(
"CVM"
)
opt
=
in2out
(
local_dot22_to_dot22scalar
)
mode
=
mode
.
__class__
(
optimizer
=
opt
)
...
...
@@ -1359,7 +1355,7 @@ class TestGemv(unittest_tools.OptimizationTestMixin):
beta
=
shared
(
np
.
asarray
(
1.0
,
dtype
=
config
.
floatX
),
name
=
"beta"
)
z
=
beta
*
y
+
alpha
*
dot
(
A
,
x
)
f
=
function
([
A
,
x
,
y
],
z
)
f
=
function
([
A
,
x
,
y
],
z
,
mode
=
mode_blas_opt
)
# Matrix value
A_val
=
np
.
ones
((
5
,
3
),
dtype
=
config
.
floatX
)
...
...
@@ -1726,8 +1722,7 @@ class TestGer(unittest_tools.OptimizationTestMixin):
shared
=
staticmethod
(
shared
)
def
setup_method
(
self
):
self
.
mode
=
pytensor
.
compile
.
get_default_mode
()
.
including
(
"fast_run"
)
self
.
mode
=
self
.
mode
.
excluding
(
"c_blas"
,
"scipy_blas"
)
self
.
mode
=
get_mode
(
"cvm"
)
.
excluding
(
"c_blas"
,
"scipy_blas"
)
dtype
=
self
.
dtype
=
"float64"
# optimization isn't dtype-dependent
self
.
A
=
tensor
(
dtype
=
dtype
,
shape
=
(
None
,
None
))
self
.
a
=
tensor
(
dtype
=
dtype
,
shape
=
())
...
...
@@ -1940,8 +1935,7 @@ class TestGer(unittest_tools.OptimizationTestMixin):
class
TestBlasStrides
:
dtype
=
"float64"
mode
=
pytensor
.
compile
.
get_default_mode
()
mode
=
mode
.
including
(
"fast_run"
)
.
excluding
(
"gpu"
,
"c_blas"
,
"scipy_blas"
)
mode
=
get_mode
(
"cvm"
)
.
excluding
(
"c_blas"
,
"scipy_blas"
)
def
random
(
self
,
*
shape
,
rng
=
None
):
return
np
.
asarray
(
rng
.
random
(
shape
),
dtype
=
self
.
dtype
)
...
...
@@ -2326,6 +2320,8 @@ class TestBlasStrides:
class
TestInferShape
(
unittest_tools
.
InferShapeTester
):
mode
=
mode_blas_opt
def
test_dot22
(
self
):
rng
=
np
.
random
.
default_rng
(
unittest_tools
.
fetch_seed
())
x
,
y
=
matrices
(
"xy"
)
...
...
@@ -2475,6 +2471,7 @@ TestBatchedDot = makeTester(
bad_dim1
=
(
random
(
3
,
5
,
7
,
rng
=
rng
),
random
(
3
,
5
,
7
,
rng
=
rng
)),
bad_dim2
=
(
random
(
3
,
5
,
7
,
rng
=
rng
),
random
(
3
,
8
,
3
,
rng
=
rng
)),
),
mode
=
mode_blas_opt
,
)
...
...
@@ -2536,7 +2533,7 @@ def test_batched_dot_not_contiguous():
def
test_batched_dot_blas_flags
():
"""Test that BatchedDot works regardless of presence of BLAS flags"""
mode
=
"FAST_RUN"
mode
=
mode_blas_opt
rng
=
np
.
random
.
default_rng
(
2708
)
x
=
tensor
(
"x"
,
shape
=
(
2
,
5
,
3
))
...
...
tests/tensor/test_blas_c.py
浏览文件 @
f970c385
...
...
@@ -5,6 +5,7 @@ import pytest
import
pytensor
import
pytensor.tensor
as
pt
from
pytensor.compile
import
get_mode
from
pytensor.tensor.basic
import
AllocEmpty
from
pytensor.tensor.blas
import
Ger
from
pytensor.tensor.blas_c
import
CGemv
,
CGer
,
must_initialize_y_gemv
...
...
@@ -22,9 +23,7 @@ from tests.tensor.test_blas import BaseGemv, TestBlasStrides
from
tests.unittest_tools
import
OptimizationTestMixin
mode_blas_opt
=
pytensor
.
compile
.
get_default_mode
()
.
including
(
"BlasOpt"
,
"specialize"
,
"InplaceBlasOpt"
,
"c_blas"
)
mode_blas_opt
=
get_mode
(
"CVM"
)
def
skip_if_blas_ldflags_empty
(
*
functions_detected
):
...
...
@@ -46,7 +45,7 @@ class TestCGer(OptimizationTestMixin):
def
manual_setup_method
(
self
,
dtype
=
"float64"
):
# This tests can run even when pytensor.config.blas__ldflags is empty.
self
.
dtype
=
dtype
self
.
mode
=
pytensor
.
compile
.
get_default_mode
()
.
including
(
"fast_run"
)
self
.
mode
=
mode_blas_opt
self
.
A
=
tensor
(
dtype
=
dtype
,
shape
=
(
None
,
None
))
self
.
a
=
tensor
(
dtype
=
dtype
,
shape
=
())
self
.
x
=
tensor
(
dtype
=
dtype
,
shape
=
(
None
,))
...
...
@@ -130,10 +129,9 @@ class TestCGemv(OptimizationTestMixin):
"""
def
setup_method
(
self
):
# This tests can run even when pytensor.config.blas__ldflags is empty.
dtype
=
"float64"
self
.
dtype
=
dtype
self
.
mode
=
pytensor
.
compile
.
get_default_mode
()
.
including
(
"fast_run"
)
self
.
mode
=
mode_blas_opt
# matrix
self
.
A
=
tensor
(
"A"
,
dtype
=
dtype
,
shape
=
(
None
,
None
))
self
.
Aval
=
np
.
ones
((
2
,
3
),
dtype
=
dtype
)
...
...
tests/tensor/test_math.py
浏览文件 @
f970c385
...
...
@@ -3752,7 +3752,7 @@ class TestMatMul:
def
test_dot22_opt
(
self
):
x
,
y
=
matrices
(
"xy"
)
fn
=
function
([
x
,
y
],
x
@
y
,
mode
=
"
FAST_RUN
"
)
fn
=
function
([
x
,
y
],
x
@
y
,
mode
=
"
CVM
"
)
[
node
]
=
fn
.
maker
.
fgraph
.
apply_nodes
assert
isinstance
(
node
.
op
,
Dot22
)
...
...
tests/tensor/test_sharedvar.py
浏览文件 @
f970c385
...
...
@@ -513,6 +513,7 @@ def makeSharedTester(
updates
=
[
(
s_shared
,
pytensor
.
tensor
.
dot
(
a_shared
,
b_shared
)
+
s_shared
)
],
mode
=
"CVM"
,
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
f
()
...
...
@@ -546,6 +547,7 @@ def makeSharedTester(
pytensor
.
tensor
.
dot
(
a_shared
,
b_shared
)
+
s_shared_specify
,
)
],
mode
=
"CVM"
,
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
shp
=
f
()
...
...
@@ -577,6 +579,7 @@ def makeSharedTester(
pytensor
.
tensor
.
dot
(
a_shared
,
b_shared
)
+
s_shared_specify
,
)
],
mode
=
"CVM"
,
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
shp
=
f
()
...
...
tests/test_printing.py
浏览文件 @
f970c385
...
...
@@ -158,8 +158,7 @@ def test_debugprint():
F
=
D
+
E
G
=
C
+
F
mode
=
pytensor
.
compile
.
get_default_mode
()
.
including
(
"fusion"
)
g
=
pytensor
.
function
([
A
,
B
,
D
,
E
],
G
,
mode
=
mode
)
g
=
pytensor
.
function
([
A
,
B
,
D
,
E
],
G
)
# just test that it work
s
=
StringIO
()
...
...
@@ -250,7 +249,7 @@ def test_debugprint():
assert
s
==
reference
# Test the `profile` handling when profile data is missing
g
=
pytensor
.
function
([
A
,
B
,
D
,
E
],
G
,
mode
=
mode
,
profile
=
True
)
g
=
pytensor
.
function
([
A
,
B
,
D
,
E
],
G
,
profile
=
True
)
s
=
StringIO
()
debugprint
(
g
,
file
=
s
,
id_type
=
""
,
print_storage
=
True
)
...
...
@@ -291,7 +290,7 @@ def test_debugprint():
J
=
dvector
()
s
=
StringIO
()
debugprint
(
pytensor
.
function
([
A
,
B
,
D
,
J
],
A
+
(
B
.
dot
(
J
)
-
D
),
mode
=
"
FAST_RUN
"
),
pytensor
.
function
([
A
,
B
,
D
,
J
],
A
+
(
B
.
dot
(
J
)
-
D
),
mode
=
"
CVM
"
),
file
=
s
,
id_type
=
""
,
print_destroy_map
=
True
,
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论