Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
e1ce1c35
提交
e1ce1c35
authored
7月 01, 2025
作者:
Ricardo Vieira
提交者:
Ricardo Vieira
7月 02, 2025
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Refactor `lower_aligned` helper
上级
45a33ada
显示空白字符变更
内嵌
并排
正在显示
3 个修改的文件
包含
23 行增加
和
40 行删除
+23
-40
shape.py
pytensor/xtensor/rewriting/shape.py
+2
-9
utils.py
pytensor/xtensor/rewriting/utils.py
+12
-0
vectorization.py
pytensor/xtensor/rewriting/vectorization.py
+9
-31
没有找到文件。
pytensor/xtensor/rewriting/shape.py
浏览文件 @
e1ce1c35
...
@@ -9,6 +9,7 @@ from pytensor.tensor import (
...
@@ -9,6 +9,7 @@ from pytensor.tensor import (
)
)
from
pytensor.xtensor.basic
import
tensor_from_xtensor
,
xtensor_from_tensor
from
pytensor.xtensor.basic
import
tensor_from_xtensor
,
xtensor_from_tensor
from
pytensor.xtensor.rewriting.basic
import
register_lower_xtensor
from
pytensor.xtensor.rewriting.basic
import
register_lower_xtensor
from
pytensor.xtensor.rewriting.utils
import
lower_aligned
from
pytensor.xtensor.shape
import
(
from
pytensor.xtensor.shape
import
(
Concat
,
Concat
,
ExpandDims
,
ExpandDims
,
...
@@ -70,15 +71,7 @@ def lower_concat(fgraph, node):
...
@@ -70,15 +71,7 @@ def lower_concat(fgraph, node):
concat_axis
=
out_dims
.
index
(
concat_dim
)
concat_axis
=
out_dims
.
index
(
concat_dim
)
# Convert input XTensors to Tensors and align batch dimensions
# Convert input XTensors to Tensors and align batch dimensions
tensor_inputs
=
[]
tensor_inputs
=
[
lower_aligned
(
inp
,
out_dims
)
for
inp
in
node
.
inputs
]
for
inp
in
node
.
inputs
:
inp_dims
=
inp
.
type
.
dims
order
=
[
inp_dims
.
index
(
out_dim
)
if
out_dim
in
inp_dims
else
"x"
for
out_dim
in
out_dims
]
tensor_inp
=
tensor_from_xtensor
(
inp
)
.
dimshuffle
(
order
)
tensor_inputs
.
append
(
tensor_inp
)
# Broadcast non-concatenated dimensions of each input
# Broadcast non-concatenated dimensions of each input
non_concat_shape
=
[
None
]
*
len
(
out_dims
)
non_concat_shape
=
[
None
]
*
len
(
out_dims
)
...
...
pytensor/xtensor/rewriting/utils.py
浏览文件 @
e1ce1c35
import
typing
from
collections.abc
import
Sequence
from
pytensor.compile
import
optdb
from
pytensor.compile
import
optdb
from
pytensor.graph.rewriting.basic
import
NodeRewriter
,
in2out
from
pytensor.graph.rewriting.basic
import
NodeRewriter
,
in2out
from
pytensor.graph.rewriting.db
import
EquilibriumDB
,
RewriteDatabase
from
pytensor.graph.rewriting.db
import
EquilibriumDB
,
RewriteDatabase
from
pytensor.tensor.rewriting.ofg
import
inline_ofg_expansion
from
pytensor.tensor.rewriting.ofg
import
inline_ofg_expansion
from
pytensor.tensor.variable
import
TensorVariable
from
pytensor.xtensor.type
import
XTensorVariable
lower_xtensor_db
=
EquilibriumDB
(
ignore_newtrees
=
False
)
lower_xtensor_db
=
EquilibriumDB
(
ignore_newtrees
=
False
)
...
@@ -49,3 +54,10 @@ def register_lower_xtensor(
...
@@ -49,3 +54,10 @@ def register_lower_xtensor(
**
kwargs
,
**
kwargs
,
)
)
return
node_rewriter
return
node_rewriter
def
lower_aligned
(
x
:
XTensorVariable
,
out_dims
:
Sequence
[
str
])
->
TensorVariable
:
"""Lower an XTensorVariable to a TensorVariable so that it's dimensions are aligned with "out_dims"."""
inp_dims
=
{
d
:
i
for
i
,
d
in
enumerate
(
x
.
type
.
dims
)}
ds_order
=
tuple
(
inp_dims
.
get
(
dim
,
"x"
)
for
dim
in
out_dims
)
return
typing
.
cast
(
TensorVariable
,
x
.
values
.
dimshuffle
(
ds_order
))
pytensor/xtensor/rewriting/vectorization.py
浏览文件 @
e1ce1c35
...
@@ -2,8 +2,8 @@ from pytensor.graph import node_rewriter
...
@@ -2,8 +2,8 @@ from pytensor.graph import node_rewriter
from
pytensor.tensor.blockwise
import
Blockwise
from
pytensor.tensor.blockwise
import
Blockwise
from
pytensor.tensor.elemwise
import
Elemwise
from
pytensor.tensor.elemwise
import
Elemwise
from
pytensor.tensor.random.utils
import
compute_batch_shape
from
pytensor.tensor.random.utils
import
compute_batch_shape
from
pytensor.xtensor.basic
import
tensor_from_xtensor
,
xtensor_from_tensor
from
pytensor.xtensor.basic
import
xtensor_from_tensor
from
pytensor.xtensor.rewriting.utils
import
register_lower_xtensor
from
pytensor.xtensor.rewriting.utils
import
lower_aligned
,
register_lower_xtensor
from
pytensor.xtensor.vectorization
import
XRV
,
XBlockwise
,
XElemwise
from
pytensor.xtensor.vectorization
import
XRV
,
XBlockwise
,
XElemwise
...
@@ -13,15 +13,7 @@ def lower_elemwise(fgraph, node):
...
@@ -13,15 +13,7 @@ def lower_elemwise(fgraph, node):
out_dims
=
node
.
outputs
[
0
]
.
type
.
dims
out_dims
=
node
.
outputs
[
0
]
.
type
.
dims
# Convert input XTensors to Tensors and align batch dimensions
# Convert input XTensors to Tensors and align batch dimensions
tensor_inputs
=
[]
tensor_inputs
=
[
lower_aligned
(
inp
,
out_dims
)
for
inp
in
node
.
inputs
]
for
inp
in
node
.
inputs
:
inp_dims
=
inp
.
type
.
dims
order
=
[
inp_dims
.
index
(
out_dim
)
if
out_dim
in
inp_dims
else
"x"
for
out_dim
in
out_dims
]
tensor_inp
=
tensor_from_xtensor
(
inp
)
.
dimshuffle
(
order
)
tensor_inputs
.
append
(
tensor_inp
)
tensor_outs
=
Elemwise
(
scalar_op
=
node
.
op
.
scalar_op
)(
tensor_outs
=
Elemwise
(
scalar_op
=
node
.
op
.
scalar_op
)(
*
tensor_inputs
,
return_list
=
True
*
tensor_inputs
,
return_list
=
True
...
@@ -42,17 +34,10 @@ def lower_blockwise(fgraph, node):
...
@@ -42,17 +34,10 @@ def lower_blockwise(fgraph, node):
batch_dims
=
node
.
outputs
[
0
]
.
type
.
dims
[:
batch_ndim
]
batch_dims
=
node
.
outputs
[
0
]
.
type
.
dims
[:
batch_ndim
]
# Convert input Tensors to XTensors, align batch dimensions and place core dimension at the end
# Convert input Tensors to XTensors, align batch dimensions and place core dimension at the end
tensor_inputs
=
[]
tensor_inputs
=
[
for
inp
,
core_dims
in
zip
(
node
.
inputs
,
op
.
core_dims
[
0
]):
lower_aligned
(
inp
,
batch_dims
+
core_dims
)
inp_dims
=
inp
.
type
.
dims
for
inp
,
core_dims
in
zip
(
node
.
inputs
,
op
.
core_dims
[
0
],
strict
=
True
)
# Align the batch dims of the input, and place the core dims on the right
batch_order
=
[
inp_dims
.
index
(
batch_dim
)
if
batch_dim
in
inp_dims
else
"x"
for
batch_dim
in
batch_dims
]
]
core_order
=
[
inp_dims
.
index
(
core_dim
)
for
core_dim
in
core_dims
]
tensor_inp
=
tensor_from_xtensor
(
inp
)
.
dimshuffle
(
batch_order
+
core_order
)
tensor_inputs
.
append
(
tensor_inp
)
signature
=
op
.
signature
or
getattr
(
op
.
core_op
,
"gufunc_signature"
,
None
)
signature
=
op
.
signature
or
getattr
(
op
.
core_op
,
"gufunc_signature"
,
None
)
if
signature
is
None
:
if
signature
is
None
:
...
@@ -92,17 +77,10 @@ def lower_rv(fgraph, node):
...
@@ -92,17 +77,10 @@ def lower_rv(fgraph, node):
param_batch_dims
=
old_out
.
type
.
dims
[
len
(
op
.
extra_dims
)
:
batch_ndim
]
param_batch_dims
=
old_out
.
type
.
dims
[
len
(
op
.
extra_dims
)
:
batch_ndim
]
# Convert params Tensors to XTensors, align batch dimensions and place core dimension at the end
# Convert params Tensors to XTensors, align batch dimensions and place core dimension at the end
tensor_params
=
[]
tensor_params
=
[
for
inp
,
core_dims
in
zip
(
params
,
op
.
core_dims
[
0
]):
lower_aligned
(
inp
,
param_batch_dims
+
core_dims
)
inp_dims
=
inp
.
type
.
dims
for
inp
,
core_dims
in
zip
(
params
,
op
.
core_dims
[
0
],
strict
=
True
)
# Align the batch dims of the input, and place the core dims on the right
batch_order
=
[
inp_dims
.
index
(
batch_dim
)
if
batch_dim
in
inp_dims
else
"x"
for
batch_dim
in
param_batch_dims
]
]
core_order
=
[
inp_dims
.
index
(
core_dim
)
for
core_dim
in
core_dims
]
tensor_inp
=
tensor_from_xtensor
(
inp
)
.
dimshuffle
(
batch_order
+
core_order
)
tensor_params
.
append
(
tensor_inp
)
size
=
None
size
=
None
if
op
.
extra_dims
:
if
op
.
extra_dims
:
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论