Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
7b0a3924
提交
7b0a3924
authored
1月 20, 2025
作者:
Ricardo Vieira
提交者:
Ricardo Vieira
5月 09, 2025
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Lift Subtensor over AdvancedSubtensor
上级
58f1fd2b
显示空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
125 行增加
和
2 行删除
+125
-2
subtensor_lift.py
pytensor/tensor/rewriting/subtensor_lift.py
+80
-1
test_subtensor_lift.py
tests/tensor/rewriting/test_subtensor_lift.py
+45
-1
没有找到文件。
pytensor/tensor/rewriting/subtensor_lift.py
浏览文件 @
7b0a3924
...
...
@@ -4,6 +4,7 @@ from typing import cast
import
numpy
as
np
from
pytensor
import
Variable
from
pytensor.compile
import
optdb
from
pytensor.graph
import
Constant
,
FunctionGraph
,
node_rewriter
from
pytensor.graph.rewriting.basic
import
NodeRewriter
,
copy_stack_trace
from
pytensor.npy_2_compat
import
normalize_axis_index
,
normalize_axis_tuple
...
...
@@ -37,8 +38,10 @@ from pytensor.tensor.shape import (
)
from
pytensor.tensor.special
import
Softmax
,
softmax
from
pytensor.tensor.subtensor
import
(
AdvancedSubtensor
,
AdvancedSubtensor1
,
Subtensor
,
_non_consecutive_adv_indexing
,
as_index_literal
,
get_canonical_form_slice
,
get_constant_idx
,
...
...
@@ -46,7 +49,7 @@ from pytensor.tensor.subtensor import (
indices_from_subtensor
,
)
from
pytensor.tensor.type
import
TensorType
from
pytensor.tensor.type_other
import
SliceType
from
pytensor.tensor.type_other
import
NoneTypeT
,
SliceType
from
pytensor.tensor.variable
import
TensorVariable
...
...
@@ -769,3 +772,79 @@ def local_subtensor_shape_constant(fgraph, node):
return
[
as_tensor
([
1
]
*
len
(
shape_parts
),
dtype
=
np
.
int64
,
ndim
=
1
)]
elif
shape_parts
:
return
[
as_tensor
(
1
,
dtype
=
np
.
int64
)]
@node_rewriter
([
Subtensor
])
def
local_subtensor_of_adv_subtensor
(
fgraph
,
node
):
"""Lift a simple Subtensor through an AdvancedSubtensor, when basic index dimensions are to the left of any advanced ones.
x[:, :, vec_idx][i, j] -> x[i, j][vec_idx]
x[:, vec_idx][i, j, k] -> x[i][vec_idx][j, k]
Restricted to a single advanced indexing dimension.
An alternative approach could have fused the basic and advanced indices,
so it is not clear this rewrite should be canonical or a specialization.
Users must include it manually if it fits their use case.
"""
adv_subtensor
,
*
idxs
=
node
.
inputs
if
not
(
adv_subtensor
.
owner
and
isinstance
(
adv_subtensor
.
owner
.
op
,
AdvancedSubtensor
)
):
return
None
if
len
(
fgraph
.
clients
[
adv_subtensor
])
>
1
:
# AdvancedSubtensor involves a full_copy, so we don't want to do it twice
return
None
x
,
*
adv_idxs
=
adv_subtensor
.
owner
.
inputs
# Advanced indexing is a minefield, avoid all cases except for consecutive integer indices
if
any
(
(
isinstance
(
adv_idx
.
type
,
NoneTypeT
)
or
(
isinstance
(
adv_idx
.
type
,
TensorType
)
and
adv_idx
.
type
.
dtype
==
"bool"
)
or
(
isinstance
(
adv_idx
.
type
,
SliceType
)
and
not
is_full_slice
(
adv_idx
))
)
for
adv_idx
in
adv_idxs
)
or
_non_consecutive_adv_indexing
(
adv_idxs
):
return
None
for
first_adv_idx_dim
,
adv_idx
in
enumerate
(
adv_idxs
):
# We already made sure there were only None slices besides integer indexes
if
isinstance
(
adv_idx
.
type
,
TensorType
):
break
else
:
# no-break
# Not sure if this should ever happen, but better safe than sorry
return
None
basic_idxs
=
indices_from_subtensor
(
idxs
,
node
.
op
.
idx_list
)
basic_idxs_lifted
=
basic_idxs
[:
first_adv_idx_dim
]
basic_idxs_kept
=
((
slice
(
None
),)
*
len
(
basic_idxs_lifted
))
+
basic_idxs
[
first_adv_idx_dim
:
]
if
all
(
basic_idx
==
slice
(
None
)
for
basic_idx
in
basic_idxs_lifted
):
# All basic indices happen to the right of the advanced indices
return
None
[
basic_subtensor
]
=
node
.
outputs
dropped_dims
=
_dims_dropped_by_basic_index
(
basic_idxs_lifted
)
x_indexed
=
x
[
basic_idxs_lifted
]
copy_stack_trace
([
basic_subtensor
,
adv_subtensor
],
x_indexed
)
x_after_index_lift
=
expand_dims
(
x_indexed
,
dropped_dims
)
x_after_adv_idx
=
adv_subtensor
.
owner
.
op
(
x_after_index_lift
,
*
adv_idxs
)
copy_stack_trace
([
basic_subtensor
,
adv_subtensor
],
x_after_adv_idx
)
new_out
=
squeeze
(
x_after_adv_idx
[
basic_idxs_kept
],
dropped_dims
)
return
[
new_out
]
# Rewrite will only be included if tagged by name
r
=
local_subtensor_of_adv_subtensor
optdb
[
"canonicalize"
]
.
register
(
r
.
__name__
,
r
,
use_db_name_as_tag
=
False
)
optdb
[
"specialize"
]
.
register
(
r
.
__name__
,
r
,
use_db_name_as_tag
=
False
)
del
r
tests/tensor/rewriting/test_subtensor_lift.py
浏览文件 @
7b0a3924
...
...
@@ -46,7 +46,7 @@ from pytensor.tensor.rewriting.subtensor_lift import (
)
from
pytensor.tensor.shape
import
SpecifyShape
,
_shape
from
pytensor.tensor.special
import
softmax
from
pytensor.tensor.subtensor
import
Subtensor
from
pytensor.tensor.subtensor
import
AdvancedSubtensor
,
Subtensor
mode_opt
=
config
.
mode
...
...
@@ -695,3 +695,47 @@ def test_local_subtensor_shape_constant():
x
=
shape
(
Variable
(
MyType
(),
None
,
None
))[
0
]
assert
not
local_subtensor_shape_constant
.
transform
(
None
,
x
.
owner
)
@pytest.mark.parametrize
(
"original_fn, supported"
,
[
(
lambda
x
:
x
[:,
[
0
,
1
]][
0
],
True
),
(
lambda
x
:
x
[:,
[
0
,
1
],
[
0
,
0
]][
1
:],
True
),
(
lambda
x
:
x
[:,
[[
0
,
1
],
[
0
,
0
]]][
1
:],
True
),
# Not supported, basic indexing on advanced indexing dim
(
lambda
x
:
x
[[
0
,
1
]][
0
],
False
),
# Not implemented, basic indexing on the right of advanced indexing
(
lambda
x
:
x
[[
0
,
1
]][:,
0
],
False
),
# Not implemented, complex flavors of advanced indexing
(
lambda
x
:
x
[:,
None
,
[
0
,
1
]][
0
],
False
),
(
lambda
x
:
x
[:,
5
:,
[
0
,
1
]][
0
],
False
),
(
lambda
x
:
x
[:,
:,
np
.
array
([
True
,
False
,
False
])][
0
],
False
),
(
lambda
x
:
x
[[
0
,
1
],
:,
[
0
,
1
]][:,
0
],
False
),
],
)
def
test_local_subtensor_of_adv_subtensor
(
original_fn
,
supported
):
rng
=
np
.
random
.
default_rng
(
257
)
x
=
pt
.
tensor3
(
"x"
,
shape
=
(
7
,
5
,
3
))
x_test
=
rng
.
normal
(
size
=
x
.
type
.
shape
)
.
astype
(
x
.
dtype
)
out
=
original_fn
(
x
)
opt_out
=
rewrite_graph
(
out
,
include
=
(
"canonicalize"
,
"local_subtensor_of_adv_subtensor"
)
)
# The graphs generated are too complicated to assert
# We simply check that the happens before the advanced subtensor
toposort
=
FunctionGraph
(
outputs
=
[
opt_out
],
clone
=
False
)
.
toposort
()
[
idx_subtensor
]
=
[
i
for
i
,
node
in
enumerate
(
toposort
)
if
isinstance
(
node
.
op
,
Subtensor
)
]
[
idx_adv_subtensor
]
=
[
i
for
i
,
node
in
enumerate
(
toposort
)
if
isinstance
(
node
.
op
,
AdvancedSubtensor
)
]
swapped
=
idx_subtensor
<
idx_adv_subtensor
correct
=
swapped
if
supported
else
not
swapped
assert
correct
,
debugprint
(
opt_out
,
print_type
=
True
)
np
.
testing
.
assert_allclose
(
opt_out
.
eval
({
x
:
x_test
},
mode
=
NO_OPTIMIZATION_MODE
),
out
.
eval
({
x
:
x_test
},
mode
=
NO_OPTIMIZATION_MODE
),
)
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论