Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
d3dd34e7
提交
d3dd34e7
authored
1月 27, 2024
作者:
Virgile Andreani
提交者:
Ricardo Vieira
2月 05, 2024
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Manual simplification of RUF005 fixes
上级
8aeda39b
显示空白字符变更
内嵌
并排
正在显示
30 个修改的文件
包含
75 行增加
和
81 行删除
+75
-81
breakpoint.py
pytensor/breakpoint.py
+1
-1
basic.py
pytensor/graph/rewriting/basic.py
+1
-1
unify.py
pytensor/graph/rewriting/unify.py
+1
-1
ifelse.py
pytensor/ifelse.py
+1
-1
type.py
pytensor/link/c/type.py
+2
-2
elemwise.py
pytensor/link/numba/dispatch/elemwise.py
+1
-1
extra_ops.py
pytensor/link/numba/dispatch/extra_ops.py
+1
-1
random.py
pytensor/link/numba/dispatch/random.py
+1
-1
scalar.py
pytensor/link/numba/dispatch/scalar.py
+1
-1
basic.py
pytensor/scalar/basic.py
+3
-3
rewriting.py
pytensor/scan/rewriting.py
+5
-5
basic.py
pytensor/tensor/basic.py
+3
-3
abstract_conv.py
pytensor/tensor/conv/abstract_conv.py
+27
-31
extra_ops.py
pytensor/tensor/extra_ops.py
+1
-1
fourier.py
pytensor/tensor/fourier.py
+4
-4
basic.py
pytensor/tensor/random/basic.py
+1
-1
blockwise.py
pytensor/tensor/rewriting/blockwise.py
+1
-1
linalg.py
pytensor/tensor/rewriting/linalg.py
+1
-1
math.py
pytensor/tensor/rewriting/math.py
+2
-2
slinalg.py
pytensor/tensor/slinalg.py
+2
-2
subtensor.py
pytensor/tensor/subtensor.py
+2
-2
test_random.py
tests/link/jax/test_random.py
+1
-1
test_random.py
tests/link/numba/test_random.py
+1
-1
test_sp.py
tests/sparse/sandbox/test_sp.py
+1
-1
test_abstract_conv.py
tests/tensor/conv/test_abstract_conv.py
+1
-1
test_basic.py
tests/tensor/rewriting/test_basic.py
+1
-1
test_subtensor.py
tests/tensor/rewriting/test_subtensor.py
+5
-7
test_basic.py
tests/tensor/test_basic.py
+1
-1
test_math.py
tests/tensor/test_math.py
+1
-1
test_ifelse.py
tests/test_ifelse.py
+1
-1
没有找到文件。
pytensor/breakpoint.py
浏览文件 @
d3dd34e7
...
...
@@ -92,7 +92,7 @@ class PdbBreakpoint(Op):
new_op
.
inp_types
.
append
(
monitored_vars
[
i
]
.
type
)
# Build the Apply node
inputs
=
[
condition
,
*
list
(
monitored_vars
)
]
inputs
=
[
condition
,
*
monitored_vars
]
outputs
=
[
inp
.
type
()
for
inp
in
monitored_vars
]
return
Apply
(
op
=
new_op
,
inputs
=
inputs
,
outputs
=
outputs
)
...
...
pytensor/graph/rewriting/basic.py
浏览文件 @
d3dd34e7
...
...
@@ -1139,7 +1139,7 @@ def node_rewriter(
if
inplace
:
dh_handler
=
dh
.
DestroyHandler
req
=
(
*
tuple
(
requirements
)
,
*
requirements
,
lambda
fgraph
:
fgraph
.
attach_feature
(
dh_handler
()),
)
rval
=
FromFunctionNodeRewriter
(
f
,
tracks
,
req
)
...
...
pytensor/graph/rewriting/unify.py
浏览文件 @
d3dd34e7
...
...
@@ -283,7 +283,7 @@ def convert_strs_to_vars(
var_map
[
pattern
]
=
v
return
v
elif
isinstance
(
y
,
tuple
):
return
etuple
(
*
tuple
(
_convert
(
e
)
for
e
in
y
))
return
etuple
(
*
(
_convert
(
e
)
for
e
in
y
))
elif
isinstance
(
y
,
(
Number
,
np
.
ndarray
)):
from
pytensor.tensor
import
as_tensor_variable
...
...
pytensor/ifelse.py
浏览文件 @
d3dd34e7
...
...
@@ -397,7 +397,7 @@ def ifelse(
new_ifelse
=
IfElse
(
n_outs
=
len
(
then_branch
),
as_view
=
False
,
name
=
name
)
ins
=
[
condition
,
*
list
(
then_branch
),
*
list
(
else_branch
)
]
ins
=
[
condition
,
*
then_branch
,
*
else_branch
]
rval
=
new_ifelse
(
*
ins
,
return_list
=
True
)
if
rval_type
is
None
:
...
...
pytensor/link/c/type.py
浏览文件 @
d3dd34e7
...
...
@@ -508,8 +508,8 @@ class EnumType(CType, dict):
(
type
(
self
),
self
.
ctype
,
*
tuple
((
k
,
self
[
k
])
for
k
in
sorted
(
self
.
keys
())),
*
tuple
((
a
,
self
.
aliases
[
a
])
for
a
in
sorted
(
self
.
aliases
.
keys
())),
*
((
k
,
self
[
k
])
for
k
in
sorted
(
self
.
keys
())),
*
((
a
,
self
.
aliases
[
a
])
for
a
in
sorted
(
self
.
aliases
.
keys
())),
)
)
...
...
pytensor/link/numba/dispatch/elemwise.py
浏览文件 @
d3dd34e7
...
...
@@ -447,7 +447,7 @@ def jit_compile_reducer(
def
create_axis_apply_fn
(
fn
,
axis
,
ndim
,
dtype
):
axis
=
normalize_axis_index
(
axis
,
ndim
)
reaxis_first
=
(
*
tuple
(
i
for
i
in
range
(
ndim
)
if
i
!=
axis
),
axis
)
reaxis_first
=
(
*
(
i
for
i
in
range
(
ndim
)
if
i
!=
axis
),
axis
)
@numba_basic.numba_njit
(
boundscheck
=
False
)
def
axis_apply_fn
(
x
):
...
...
pytensor/link/numba/dispatch/extra_ops.py
浏览文件 @
d3dd34e7
...
...
@@ -44,7 +44,7 @@ def numba_funcify_CumOp(op: CumOp, node: Apply, **kwargs):
if
axis
<
0
or
axis
>=
ndim
:
raise
ValueError
(
f
"Invalid axis {axis} for array with ndim {ndim}"
)
reaxis_first
=
(
axis
,
*
tuple
(
i
for
i
in
range
(
ndim
)
if
i
!=
axis
))
reaxis_first
=
(
axis
,
*
(
i
for
i
in
range
(
ndim
)
if
i
!=
axis
))
reaxis_first_inv
=
tuple
(
np
.
argsort
(
reaxis_first
))
if
mode
==
"add"
:
...
...
pytensor/link/numba/dispatch/random.py
浏览文件 @
d3dd34e7
...
...
@@ -240,7 +240,7 @@ def create_numba_random_fn(
np_global_env
[
"numba_vectorize"
]
=
numba_basic
.
numba_vectorize
unique_names
=
unique_name_generator
(
[
np_random_fn_name
,
*
list
(
np_global_env
.
keys
()
),
"rng"
,
"size"
,
"dtype"
],
[
np_random_fn_name
,
*
np_global_env
.
keys
(
),
"rng"
,
"size"
,
"dtype"
],
suffix_sep
=
"_"
,
)
...
...
pytensor/link/numba/dispatch/scalar.py
浏览文件 @
d3dd34e7
...
...
@@ -115,7 +115,7 @@ def {scalar_op_fn_name}({input_names}):
global_env
.
update
(
input_tmp_dtype_names
)
unique_names
=
unique_name_generator
(
[
scalar_op_fn_name
,
"scalar_func_numba"
,
*
list
(
global_env
.
keys
()
)],
[
scalar_op_fn_name
,
"scalar_func_numba"
,
*
global_env
.
keys
(
)],
suffix_sep
=
"_"
,
)
...
...
pytensor/scalar/basic.py
浏览文件 @
d3dd34e7
...
...
@@ -416,7 +416,7 @@ class ScalarType(CType, HasDataType, HasShape):
)
def
upcast
(
self
,
*
others
):
return
upcast
(
*
[
x
.
dtype
for
x
in
[
self
,
*
list
(
others
)
]])
return
upcast
(
*
[
x
.
dtype
for
x
in
[
self
,
*
others
]])
def
make_variable
(
self
,
name
=
None
):
return
ScalarVariable
(
self
,
None
,
name
=
name
)
...
...
@@ -1501,7 +1501,7 @@ class IsNan(FixedLogicalComparison):
def
c_code_cache_version
(
self
):
scalarop_version
=
super
()
.
c_code_cache_version
()
return
(
*
tuple
(
scalarop_version
)
,
3
)
return
(
*
scalarop_version
,
3
)
isnan
=
IsNan
()
...
...
@@ -1529,7 +1529,7 @@ class IsInf(FixedLogicalComparison):
def
c_code_cache_version
(
self
):
scalarop_version
=
super
()
.
c_code_cache_version
()
return
(
*
tuple
(
scalarop_version
)
,
3
)
return
(
*
scalarop_version
,
3
)
isinf
=
IsInf
()
...
...
pytensor/scan/rewriting.py
浏览文件 @
d3dd34e7
...
...
@@ -203,7 +203,7 @@ def remove_constants_and_unused_inputs_scan(fgraph, node):
allow_gc
=
op
.
allow_gc
,
)
nw_outs
=
nwScan
(
*
nw_outer
,
return_list
=
True
)
return
dict
([(
"remove"
,
[
node
]),
*
list
(
zip
(
node
.
outputs
,
nw_outs
)
)])
return
dict
([(
"remove"
,
[
node
]),
*
zip
(
node
.
outputs
,
nw_outs
)])
else
:
return
False
...
...
@@ -1664,7 +1664,7 @@ def save_mem_new_scan(fgraph, node):
)
else
:
fslice
=
sanitize
(
cnf_slice
[
0
])
nw_slice
=
(
fslice
,
*
tuple
(
old_slices
[
1
:])
)
nw_slice
=
(
fslice
,
*
old_slices
[
1
:]
)
nw_pos
=
inv_compress_map
[
idx
]
...
...
@@ -1711,7 +1711,7 @@ def save_mem_new_scan(fgraph, node):
sanitize
(
stop
),
sanitize
(
cnf_slice
[
0
]
.
step
),
),
*
tuple
(
old_slices
[
1
:])
,
*
old_slices
[
1
:]
,
)
else
:
...
...
@@ -1726,7 +1726,7 @@ def save_mem_new_scan(fgraph, node):
cnf_slice
[
0
]
-
nw_steps
-
init_l
[
pos
]
+
store_steps
[
pos
]
)
nw_slice
=
(
sanitize
(
position
),
*
tuple
(
old_slices
[
1
:])
)
nw_slice
=
(
sanitize
(
position
),
*
old_slices
[
1
:]
)
subtens
=
Subtensor
(
nw_slice
)
sl_ins
=
get_slice_elements
(
nw_slice
,
lambda
entry
:
isinstance
(
entry
,
Variable
)
...
...
@@ -2275,7 +2275,7 @@ def scan_merge_inouts(fgraph, node):
new_outer_out_mit_mot
.
append
(
outer_omm
)
na
.
outer_out_mit_mot
=
new_outer_out_mit_mot
if
remove
:
return
dict
([(
"remove"
,
remove
),
*
list
(
zip
(
node
.
outputs
,
na
.
outer_outputs
)
)])
return
dict
([(
"remove"
,
remove
),
*
zip
(
node
.
outputs
,
na
.
outer_outputs
)])
return
na
.
outer_outputs
...
...
pytensor/tensor/basic.py
浏览文件 @
d3dd34e7
...
...
@@ -2394,7 +2394,7 @@ class Join(COp):
"Only tensors with the same number of dimensions can be joined"
)
inputs
=
[
as_tensor_variable
(
axis
),
*
list
(
tensors
)
]
inputs
=
[
as_tensor_variable
(
axis
),
*
tensors
]
if
inputs
[
0
]
.
type
.
dtype
not
in
int_dtypes
:
raise
TypeError
(
f
"Axis value {inputs[0]} must be an integer type"
)
...
...
@@ -2854,7 +2854,7 @@ def flatten(x, ndim=1):
raise
ValueError
(
f
"ndim {ndim} out of bound [1, {_x.ndim + 1})"
)
if
ndim
>
1
:
dims
=
(
*
tuple
(
_x
.
shape
[:
ndim
-
1
])
,
-
1
)
dims
=
(
*
_x
.
shape
[:
ndim
-
1
]
,
-
1
)
else
:
dims
=
(
-
1
,)
...
...
@@ -4217,7 +4217,7 @@ def _make_along_axis_idx(arr_shape, indices, axis):
raise
IndexError
(
"`indices` must be an integer array"
)
shape_ones
=
(
1
,)
*
indices
.
ndim
dest_dims
=
[
*
list
(
range
(
axis
)),
None
,
*
list
(
range
(
axis
+
1
,
indices
.
ndim
)
)]
dest_dims
=
[
*
range
(
axis
),
None
,
*
range
(
axis
+
1
,
indices
.
ndim
)]
# build a fancy index, consisting of orthogonal aranges, with the
# requested index inserted at the right location
...
...
pytensor/tensor/conv/abstract_conv.py
浏览文件 @
d3dd34e7
...
...
@@ -1883,12 +1883,12 @@ def frac_bilinear_upsampling(input, frac_ratio):
pad_kern
=
pt
.
concatenate
(
(
pt
.
zeros
(
(
*
tuple
(
kern
.
shape
[:
2
])
,
pad
[
0
],
kern
.
shape
[
-
1
]),
(
*
kern
.
shape
[:
2
]
,
pad
[
0
],
kern
.
shape
[
-
1
]),
dtype
=
config
.
floatX
,
),
kern
,
pt
.
zeros
(
(
*
tuple
(
kern
.
shape
[:
2
])
,
double_pad
[
0
]
-
pad
[
0
],
kern
.
shape
[
-
1
]),
(
*
kern
.
shape
[:
2
]
,
double_pad
[
0
]
-
pad
[
0
],
kern
.
shape
[
-
1
]),
dtype
=
config
.
floatX
,
),
),
...
...
@@ -1896,10 +1896,10 @@ def frac_bilinear_upsampling(input, frac_ratio):
)
pad_kern
=
pt
.
concatenate
(
(
pt
.
zeros
((
*
tuple
(
pad_kern
.
shape
[:
3
])
,
pad
[
1
]),
dtype
=
config
.
floatX
),
pt
.
zeros
((
*
pad_kern
.
shape
[:
3
]
,
pad
[
1
]),
dtype
=
config
.
floatX
),
pad_kern
,
pt
.
zeros
(
(
*
tuple
(
pad_kern
.
shape
[:
3
])
,
double_pad
[
1
]
-
pad
[
1
]),
(
*
pad_kern
.
shape
[:
3
]
,
double_pad
[
1
]
-
pad
[
1
]),
dtype
=
config
.
floatX
,
),
),
...
...
@@ -2520,7 +2520,7 @@ class AbstractConv(BaseAbstractConv):
(
img
.
shape
[
0
],
img
.
shape
[
1
],
*
tuple
(
*
(
img
.
shape
[
i
+
2
]
+
pad
[
i
][
0
]
+
pad
[
i
][
1
]
for
i
in
range
(
self
.
convdim
)
),
...
...
@@ -2531,7 +2531,7 @@ class AbstractConv(BaseAbstractConv):
(
slice
(
None
),
slice
(
None
),
*
tuple
(
*
(
slice
(
pad
[
i
][
0
],
img
.
shape
[
i
+
2
]
+
pad
[
i
][
0
])
for
i
in
range
(
self
.
convdim
)
),
...
...
@@ -2584,8 +2584,8 @@ class AbstractConv(BaseAbstractConv):
axes_order
=
(
0
,
1
+
self
.
convdim
,
*
tuple
(
range
(
1
,
1
+
self
.
convdim
)
),
*
tuple
(
range
(
2
+
self
.
convdim
,
kern
.
ndim
)
),
*
range
(
1
,
1
+
self
.
convdim
),
*
range
(
2
+
self
.
convdim
,
kern
.
ndim
),
)
kern
=
kern
.
transpose
(
axes_order
)
...
...
@@ -2601,9 +2601,7 @@ class AbstractConv(BaseAbstractConv):
(
slice
(
None
),
slice
(
None
),
*
tuple
(
slice
(
None
,
None
,
self
.
subsample
[
i
])
for
i
in
range
(
self
.
convdim
)
),
*
(
slice
(
None
,
None
,
self
.
subsample
[
i
])
for
i
in
range
(
self
.
convdim
)),
)
]
o
[
0
]
=
node
.
outputs
[
0
]
.
type
.
filter
(
conv_out
)
...
...
@@ -2860,7 +2858,7 @@ class AbstractConv_gradWeights(BaseAbstractConv):
(
img
.
shape
[
0
],
img
.
shape
[
1
],
*
tuple
(
*
(
img
.
shape
[
i
+
2
]
+
pad
[
i
][
0
]
+
pad
[
i
][
1
]
for
i
in
range
(
self
.
convdim
)
),
...
...
@@ -2871,7 +2869,7 @@ class AbstractConv_gradWeights(BaseAbstractConv):
(
slice
(
None
),
slice
(
None
),
*
tuple
(
*
(
slice
(
pad
[
i
][
0
],
img
.
shape
[
i
+
2
]
+
pad
[
i
][
0
])
for
i
in
range
(
self
.
convdim
)
),
...
...
@@ -2883,16 +2881,14 @@ class AbstractConv_gradWeights(BaseAbstractConv):
new_shape
=
(
topgrad
.
shape
[
0
],
topgrad
.
shape
[
1
],
*
tuple
(
img
.
shape
[
i
+
2
]
-
dil_shape
[
i
]
+
1
for
i
in
range
(
self
.
convdim
)
),
*
(
img
.
shape
[
i
+
2
]
-
dil_shape
[
i
]
+
1
for
i
in
range
(
self
.
convdim
)),
)
new_topgrad
=
np
.
zeros
((
new_shape
),
dtype
=
topgrad
.
dtype
)
new_topgrad
[
(
slice
(
None
),
slice
(
None
),
*
tuple
(
*
(
slice
(
None
,
None
,
self
.
subsample
[
i
])
for
i
in
range
(
self
.
convdim
)
),
...
...
@@ -2900,7 +2896,7 @@ class AbstractConv_gradWeights(BaseAbstractConv):
]
=
topgrad
topgrad
=
new_topgrad
axes_order
=
(
1
,
0
,
*
tuple
(
range
(
2
,
self
.
convdim
+
2
)
))
axes_order
=
(
1
,
0
,
*
range
(
2
,
self
.
convdim
+
2
))
topgrad
=
topgrad
.
transpose
(
axes_order
)
img
=
img
.
transpose
(
axes_order
)
...
...
@@ -2908,7 +2904,7 @@ class AbstractConv_gradWeights(BaseAbstractConv):
mshp0
=
mat
.
shape
[
0
]
//
self
.
num_groups
mshp1
=
mat
.
shape
[
1
]
*
self
.
num_groups
mat
=
mat
.
reshape
((
self
.
num_groups
,
mshp0
)
+
mat
.
shape
[
1
:])
mat
=
mat
.
transpose
((
1
,
0
,
2
,
*
tuple
(
range
(
3
,
3
+
self
.
convdim
)
)))
mat
=
mat
.
transpose
((
1
,
0
,
2
,
*
range
(
3
,
3
+
self
.
convdim
)))
mat
=
mat
.
reshape
((
mshp0
,
mshp1
)
+
mat
.
shape
[
-
self
.
convdim
:])
return
mat
...
...
@@ -2941,9 +2937,9 @@ class AbstractConv_gradWeights(BaseAbstractConv):
# to (nFilters, out_rows, out_cols, nChannels, kH, kW)
kern_axes
=
(
1
,
*
tuple
(
range
(
2
,
self
.
convdim
+
2
)
),
*
range
(
2
,
self
.
convdim
+
2
),
0
,
*
tuple
(
range
(
self
.
convdim
+
2
,
kern
.
ndim
)
),
*
range
(
self
.
convdim
+
2
,
kern
.
ndim
),
)
else
:
flip_topgrad
=
flip_kern
=
(
slice
(
None
),
slice
(
None
))
+
(
...
...
@@ -2951,7 +2947,7 @@ class AbstractConv_gradWeights(BaseAbstractConv):
)
*
self
.
convdim
topgrad
=
topgrad
[
flip_topgrad
]
kern
=
self
.
conv
(
img
,
topgrad
,
mode
=
"valid"
,
num_groups
=
self
.
num_groups
)
kern_axes
=
(
1
,
0
,
*
tuple
(
range
(
2
,
self
.
convdim
+
2
)
))
kern_axes
=
(
1
,
0
,
*
range
(
2
,
self
.
convdim
+
2
))
kern
=
kern
.
transpose
(
kern_axes
)
...
...
@@ -3249,7 +3245,7 @@ class AbstractConv_gradInputs(BaseAbstractConv):
new_shape
=
(
topgrad
.
shape
[
0
],
topgrad
.
shape
[
1
],
*
tuple
(
*
(
shape
[
i
]
+
pad
[
i
][
0
]
+
pad
[
i
][
1
]
-
dil_kernshp
[
i
]
+
1
for
i
in
range
(
self
.
convdim
)
),
...
...
@@ -3259,7 +3255,7 @@ class AbstractConv_gradInputs(BaseAbstractConv):
(
slice
(
None
),
slice
(
None
),
*
tuple
(
*
(
slice
(
None
,
None
,
self
.
subsample
[
i
])
for
i
in
range
(
self
.
convdim
)
),
...
...
@@ -3291,9 +3287,9 @@ class AbstractConv_gradInputs(BaseAbstractConv):
# for 2D -> (1, 2, 3, 0, 4, 5, 6)
mat
=
mat
.
transpose
(
(
*
tuple
(
range
(
1
,
2
+
self
.
convdim
)
),
*
range
(
1
,
2
+
self
.
convdim
),
0
,
*
tuple
(
range
(
2
+
self
.
convdim
,
mat
.
ndim
)
),
*
range
(
2
+
self
.
convdim
,
mat
.
ndim
),
)
)
mat
=
mat
.
reshape
(
...
...
@@ -3303,7 +3299,7 @@ class AbstractConv_gradInputs(BaseAbstractConv):
+
mat
.
shape
[
-
self
.
convdim
:]
)
else
:
mat
=
mat
.
transpose
((
1
,
0
,
2
,
*
tuple
(
range
(
3
,
3
+
self
.
convdim
)
)))
mat
=
mat
.
transpose
((
1
,
0
,
2
,
*
range
(
3
,
3
+
self
.
convdim
)))
mat
=
mat
.
reshape
((
mshp0
,
mshp1
)
+
mat
.
shape
[
-
self
.
convdim
:])
return
mat
...
...
@@ -3315,8 +3311,8 @@ class AbstractConv_gradInputs(BaseAbstractConv):
axes_order
=
(
1
+
self
.
convdim
,
0
,
*
tuple
(
range
(
1
,
1
+
self
.
convdim
)
),
*
tuple
(
range
(
2
+
self
.
convdim
,
kern
.
ndim
)
),
*
range
(
1
,
1
+
self
.
convdim
),
*
range
(
2
+
self
.
convdim
,
kern
.
ndim
),
)
kern
=
kern
.
transpose
(
axes_order
)
if
not
self
.
filter_flip
:
...
...
@@ -3334,7 +3330,7 @@ class AbstractConv_gradInputs(BaseAbstractConv):
direction
=
"backprop inputs"
,
)
else
:
axes_order
=
(
1
,
0
,
*
tuple
(
range
(
2
,
2
+
self
.
convdim
)
))
axes_order
=
(
1
,
0
,
*
range
(
2
,
2
+
self
.
convdim
))
kern
=
kern
.
transpose
(
axes_order
)
flip_filters
=
(
slice
(
None
),
slice
(
None
))
+
(
slice
(
None
,
None
,
-
1
),
...
...
@@ -3356,7 +3352,7 @@ class AbstractConv_gradInputs(BaseAbstractConv):
(
slice
(
None
),
slice
(
None
),
*
tuple
(
*
(
slice
(
pad
[
i
][
0
],
img
.
shape
[
i
+
2
]
-
pad
[
i
][
1
])
for
i
in
range
(
self
.
convdim
)
),
...
...
pytensor/tensor/extra_ops.py
浏览文件 @
d3dd34e7
...
...
@@ -1427,7 +1427,7 @@ def ravel_multi_index(multi_index, dims, mode="raise", order="C"):
"""
if
not
isinstance
(
multi_index
,
(
tuple
,
list
)):
raise
TypeError
(
"multi_index must be a tuple or a list."
)
args
=
(
*
tuple
(
multi_index
)
,
dims
)
args
=
(
*
multi_index
,
dims
)
return
RavelMultiIndex
(
mode
=
mode
,
order
=
order
)(
*
args
)
...
...
pytensor/tensor/fourier.py
浏览文件 @
d3dd34e7
...
...
@@ -108,9 +108,9 @@ class Fourier(Op):
return
[(
n
,)]
elif
isinstance
(
axis
,
TensorConstant
):
out_shape
=
[
*
list
(
shape_a
[
0
:
axis
.
data
.
item
()])
,
*
shape_a
[
0
:
axis
.
data
.
item
()]
,
n
,
*
list
(
shape_a
[
axis
.
data
+
1
:])
,
*
shape_a
[
axis
.
data
+
1
:]
,
]
else
:
l
=
len
(
shape_a
)
...
...
@@ -172,9 +172,9 @@ class Fourier(Op):
# insures that gradient shape conforms to input shape:
out_shape
=
[
*
list
(
np
.
arange
(
0
,
axis
)
),
*
np
.
arange
(
0
,
axis
),
a
.
ndim
-
1
,
*
list
(
np
.
arange
(
axis
,
a
.
ndim
-
1
)
),
*
np
.
arange
(
axis
,
a
.
ndim
-
1
),
]
res
=
res
.
dimshuffle
(
*
out_shape
)
return
[
res
,
None
,
None
]
...
...
pytensor/tensor/random/basic.py
浏览文件 @
d3dd34e7
...
...
@@ -841,7 +841,7 @@ def safe_multivariate_normal(mean, cov, size=None, rng=None):
)
if
size
is
not
None
:
res
=
res
.
reshape
([
*
list
(
size
)
,
-
1
])
res
=
res
.
reshape
([
*
size
,
-
1
])
return
res
...
...
pytensor/tensor/rewriting/blockwise.py
浏览文件 @
d3dd34e7
...
...
@@ -193,7 +193,7 @@ def local_blockwise_alloc(fgraph, node):
alloc
(
new_out
,
*
batch_shape
,
*
tuple
(
new_out
.
shape
)
[
batch_ndim
-
missing_ndim
:],
*
new_out
.
shape
[
batch_ndim
-
missing_ndim
:],
)
for
new_out
in
new_outs
]
...
...
pytensor/tensor/rewriting/linalg.py
浏览文件 @
d3dd34e7
...
...
@@ -38,7 +38,7 @@ def is_matrix_transpose(x: TensorVariable) -> bool:
ndims
=
inp
.
type
.
ndim
if
ndims
<
2
:
return
False
transpose_order
=
(
*
tuple
(
range
(
ndims
-
2
)
),
ndims
-
1
,
ndims
-
2
)
transpose_order
=
(
*
range
(
ndims
-
2
),
ndims
-
1
,
ndims
-
2
)
return
cast
(
bool
,
node
.
op
.
new_order
==
transpose_order
)
return
False
...
...
pytensor/tensor/rewriting/math.py
浏览文件 @
d3dd34e7
...
...
@@ -1697,7 +1697,7 @@ def local_reduce_join(fgraph, node):
return
if
not
isinstance
(
inp
.
op
,
DimShuffle
)
or
inp
.
op
.
new_order
!=
(
"x"
,
*
tuple
(
range
(
inp
.
inputs
[
0
]
.
ndim
)
),
*
range
(
inp
.
inputs
[
0
]
.
ndim
),
):
return
new_inp
.
append
(
inp
.
inputs
[
0
])
...
...
@@ -3354,7 +3354,7 @@ def compute_mul(tree):
)
elif
isinstance
(
inputs
,
list
):
# Recurse through inputs.
rval
=
mul
(
*
list
(
map
(
compute_mul
,
inputs
)
))
rval
=
mul
(
*
map
(
compute_mul
,
inputs
))
else
:
rval
=
inputs
if
neg
:
...
...
pytensor/tensor/slinalg.py
浏览文件 @
d3dd34e7
...
...
@@ -589,7 +589,7 @@ def kron(a, b):
)
o
=
ptm
.
outer
(
a
,
b
)
o
=
o
.
reshape
(
ptb
.
concatenate
((
a
.
shape
,
b
.
shape
)),
ndim
=
a
.
ndim
+
b
.
ndim
)
shf
=
o
.
dimshuffle
(
0
,
2
,
1
,
*
list
(
range
(
3
,
o
.
ndim
)
))
shf
=
o
.
dimshuffle
(
0
,
2
,
1
,
*
range
(
3
,
o
.
ndim
))
if
shf
.
ndim
==
3
:
shf
=
o
.
dimshuffle
(
1
,
0
,
2
)
o
=
shf
.
flatten
()
...
...
@@ -598,7 +598,7 @@ def kron(a, b):
(
o
.
shape
[
0
]
*
o
.
shape
[
2
],
o
.
shape
[
1
]
*
o
.
shape
[
3
],
*
tuple
(
o
.
shape
[
i
]
for
i
in
range
(
4
,
o
.
ndim
)),
*
(
o
.
shape
[
i
]
for
i
in
range
(
4
,
o
.
ndim
)),
)
)
return
o
...
...
pytensor/tensor/subtensor.py
浏览文件 @
d3dd34e7
...
...
@@ -1941,7 +1941,7 @@ def _sum_grad_over_bcasted_dims(x, gx):
assert
gx
.
ndim
>
x
.
ndim
for
i
in
range
(
x_dim_added
):
assert
gx
.
broadcastable
[
i
]
gx
=
gx
.
dimshuffle
(
*
list
(
range
(
x_dim_added
,
gx
.
ndim
)
))
gx
=
gx
.
dimshuffle
(
*
range
(
x_dim_added
,
gx
.
ndim
))
assert
gx
.
broadcastable
==
x
.
broadcastable
return
gx
...
...
@@ -2719,7 +2719,7 @@ class AdvancedIncSubtensor(Op):
new_inputs
.
append
(
inp
)
return
Apply
(
self
,
(
x
,
y
,
*
tuple
(
new_inputs
)
),
(
x
,
y
,
*
new_inputs
),
[
tensor
(
dtype
=
x
.
type
.
dtype
,
...
...
tests/link/jax/test_random.py
浏览文件 @
d3dd34e7
...
...
@@ -501,7 +501,7 @@ def test_random_RandomVariable(rv_op, dist_params, base_size, cdf_name, params_c
bcast_dist_args
=
np
.
broadcast_arrays
(
*
[
i
.
tag
.
test_value
for
i
in
dist_params
])
for
idx
in
np
.
ndindex
(
*
base_size
):
cdf_params
=
params_conv
(
*
tuple
(
arg
[
idx
]
for
arg
in
bcast_dist_args
))
cdf_params
=
params_conv
(
*
(
arg
[
idx
]
for
arg
in
bcast_dist_args
))
test_res
=
stats
.
cramervonmises
(
samples
[(
Ellipsis
,
*
idx
)],
cdf_name
,
args
=
cdf_params
)
...
...
tests/link/numba/test_random.py
浏览文件 @
d3dd34e7
...
...
@@ -435,7 +435,7 @@ def test_unaligned_RandomVariable(rv_op, dist_args, base_size, cdf_name, params_
bcast_dist_args
=
np
.
broadcast_arrays
(
*
[
i
.
tag
.
test_value
for
i
in
dist_args
])
for
idx
in
np
.
ndindex
(
*
base_size
):
cdf_params
=
params_conv
(
*
tuple
(
arg
[
idx
]
for
arg
in
bcast_dist_args
))
cdf_params
=
params_conv
(
*
(
arg
[
idx
]
for
arg
in
bcast_dist_args
))
test_res
=
stats
.
cramervonmises
(
samples
[(
Ellipsis
,
*
idx
)],
cdf_name
,
args
=
cdf_params
)
...
...
tests/sparse/sandbox/test_sp.py
浏览文件 @
d3dd34e7
...
...
@@ -64,7 +64,7 @@ class TestSP:
else
:
fulloutshp
=
np
.
array
(
imshp
)
+
np
.
array
(
kshp
)
-
1
ntime1
=
time
.
perf_counter
()
refout
=
np
.
zeros
((
bsize
,
*
tuple
(
fulloutshp
)
,
nkern
))
refout
=
np
.
zeros
((
bsize
,
*
fulloutshp
,
nkern
))
for
b
in
range
(
bsize
):
for
n
in
range
(
nkern
):
refout
[
b
,
...
,
n
]
=
convolve2d
(
...
...
tests/tensor/conv/test_abstract_conv.py
浏览文件 @
d3dd34e7
...
...
@@ -474,7 +474,7 @@ class BaseTestConv:
return
(
batch_size
,
num_filters
,
*
tuple
(
*
(
None
if
i
is
None
or
k
is
None
else
(
i
+
2
*
pad
-
((
k
-
1
)
*
fd
+
1
))
//
d
+
1
...
...
tests/tensor/rewriting/test_basic.py
浏览文件 @
d3dd34e7
...
...
@@ -1442,7 +1442,7 @@ def test_local_flatten_lift(i):
x_np
=
np
.
random
.
random
((
5
,
4
,
3
,
2
))
.
astype
(
config
.
floatX
)
out_np
=
f
(
x_np
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
shape_out_np
=
(
*
tuple
(
x_np
.
shape
[:
i
-
1
])
,
np
.
prod
(
x_np
.
shape
[
i
-
1
:]))
shape_out_np
=
(
*
x_np
.
shape
[:
i
-
1
]
,
np
.
prod
(
x_np
.
shape
[
i
-
1
:]))
assert
shape_out_np
==
out_np
.
shape
reshape_nodes
=
[
n
for
n
in
topo
if
isinstance
(
n
.
op
,
Reshape
)]
...
...
tests/tensor/rewriting/test_subtensor.py
浏览文件 @
d3dd34e7
...
...
@@ -535,7 +535,7 @@ class TestSubtensorIncSubtensor:
y
=
set_subtensor
((
2
*
x
)[
indices
],
val
,
inplace
=
False
)
assert
y
.
owner
.
op
.
inplace
is
False
f
=
function
(
[
x
,
val
,
*
list
(
indices
)
],
[
x
,
val
,
*
indices
],
y
,
mode
=
self
.
mode
.
including
(
"inplace"
),
)
...
...
@@ -2015,10 +2015,8 @@ def test_local_subtensor_SpecifyShape_lift(x, s, idx, x_val, s_val):
rewrites
=
RewriteDatabaseQuery
(
include
=
[
None
])
no_rewrites_mode
=
Mode
(
optimizer
=
rewrites
)
y_val_fn
=
function
(
[
x
,
*
list
(
s
)],
y
,
on_unused_input
=
"ignore"
,
mode
=
no_rewrites_mode
)
y_val
=
y_val_fn
(
*
([
x_val
,
*
list
(
s_val
)]))
y_val_fn
=
function
([
x
,
*
s
],
y
,
on_unused_input
=
"ignore"
,
mode
=
no_rewrites_mode
)
y_val
=
y_val_fn
(
*
([
x_val
,
*
s_val
]))
# This optimization should appear in the canonicalizations
y_opt
=
rewrite_graph
(
y
,
clone
=
False
)
...
...
@@ -2030,8 +2028,8 @@ def test_local_subtensor_SpecifyShape_lift(x, s, idx, x_val, s_val):
else
:
assert
isinstance
(
y_opt
.
owner
.
op
,
SpecifyShape
)
y_opt_fn
=
function
([
x
,
*
list
(
s
)
],
y_opt
,
on_unused_input
=
"ignore"
)
y_opt_val
=
y_opt_fn
(
*
([
x_val
,
*
list
(
s_val
)
]))
y_opt_fn
=
function
([
x
,
*
s
],
y_opt
,
on_unused_input
=
"ignore"
)
y_opt_val
=
y_opt_fn
(
*
([
x_val
,
*
s_val
]))
assert
np
.
allclose
(
y_val
,
y_opt_val
)
...
...
tests/tensor/test_basic.py
浏览文件 @
d3dd34e7
...
...
@@ -2380,7 +2380,7 @@ def test_tile():
if
use_symbolic_reps
:
rep_symbols
=
[
iscalar
()
for
_
in
range
(
len
(
reps
))]
f
=
function
([
x
,
*
rep_symbols
],
tile
(
x
,
rep_symbols
))
return
f
(
*
([
x_
,
*
list
(
reps
)
]))
return
f
(
*
([
x_
,
*
reps
]))
else
:
f
=
function
([
x
],
tile
(
x
,
reps
))
return
f
(
x_
)
...
...
tests/tensor/test_math.py
浏览文件 @
d3dd34e7
...
...
@@ -2456,7 +2456,7 @@ class TestArithmeticCast:
op
(
numpy_arg_1
,
numpy_arg_2
)
.
dtype
,
op
(
numpy_arg_2
,
numpy_arg_1
)
.
dtype
,
]
numpy_dtype
=
ps
.
upcast
(
*
list
(
map
(
str
,
numpy_dtypes
)
))
numpy_dtype
=
ps
.
upcast
(
*
map
(
str
,
numpy_dtypes
))
if
numpy_dtype
==
pytensor_dtype
:
# Same data type found, all is good!
...
...
tests/test_ifelse.py
浏览文件 @
d3dd34e7
...
...
@@ -90,7 +90,7 @@ class TestIfelse(utt.OptimizationTestMixin):
"constant_folding"
,
"constant_folding"
,
)
y2
=
reduce
(
lambda
x
,
y
:
x
+
y
,
[
y
,
*
list
(
range
(
200
)
)])
y2
=
reduce
(
lambda
x
,
y
:
x
+
y
,
[
y
,
*
range
(
200
)])
f
=
function
([
c
,
x
,
y
],
ifelse
(
c
,
x
,
y2
),
mode
=
mode
)
# For not inplace ifelse
ifnode
=
[
n
for
n
in
f
.
maker
.
fgraph
.
toposort
()
if
isinstance
(
n
.
op
,
IfElse
)]
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论