Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
61d10d72
提交
61d10d72
authored
5月 17, 2008
作者:
Olivier Breuleux
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
added ShuffleRule
上级
d2bc9167
显示空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
85 行增加
和
15 行删除
+85
-15
elemwise.py
elemwise.py
+85
-15
没有找到文件。
elemwise.py
浏览文件 @
61d10d72
...
@@ -2,7 +2,7 @@
...
@@ -2,7 +2,7 @@
import
elemwise_cgen
as
cgen
import
elemwise_cgen
as
cgen
import
numpy
import
numpy
from
gof
import
Op
,
Apply
from
gof
import
Op
,
Macro
,
Apply
import
scalar
import
scalar
from
scalar
import
Scalar
from
scalar
import
Scalar
import
gof
import
gof
...
@@ -29,6 +29,50 @@ def TensorConstant(*inputs, **kwargs):
...
@@ -29,6 +29,50 @@ def TensorConstant(*inputs, **kwargs):
### DimShuffle ###
### DimShuffle ###
##################
##################
## TODO: rule-based version of DimShuffle
## would allow for Transpose, LComplete, RComplete, etc.
## Can be optimized into DimShuffle later on.
class
ShuffleRule
(
Macro
):
"""
ABSTRACT Op - it has no perform and no c_code
Apply ExpandMacros to this node to obtain
an equivalent DimShuffle which can be performed.
"""
def
__init__
(
self
,
rule
=
None
,
name
=
None
):
if
rule
is
not
None
:
self
.
rule
=
rule
self
.
name
=
name
def
make_node
(
self
,
input
,
*
models
):
pattern
=
self
.
rule
(
input
.
type
.
broadcastable
,
*
(
model
.
type
.
broadcastable
for
model
in
models
))
return
gof
.
Apply
(
self
,
(
input
,)
+
models
,
[
Tensor
(
dtype
=
input
.
type
.
dtype
,
broadcastable
=
[
x
==
'x'
for
x
in
pattern
])
.
make_result
()])
def
expand
(
self
,
r
):
input
,
models
=
r
.
owner
.
inputs
[
0
],
r
.
owner
.
inputs
[
1
:]
new_order
=
self
.
rule
(
input
.
type
.
broadcastable
,
*
(
model
.
type
.
broadcastable
for
model
in
models
))
return
DimShuffle
(
input
.
type
.
broadcastable
,
new_order
)(
input
)
def
__eq__
(
self
,
other
):
return
type
(
self
)
==
type
(
other
)
and
self
.
rule
==
other
.
rule
def
__hash__
(
self
,
other
):
return
hash
(
self
.
rule
)
def
__str__
(
self
):
if
self
.
name
is
not
None
:
return
self
.
name
else
:
return
"ShuffleRule{
%
s}"
%
self
.
role
_transpose
=
ShuffleRule
(
rule
=
lambda
input
:
range
(
len
(
input
)
-
1
,
-
1
,
-
1
),
name
=
'transpose'
)
lcomplete
=
ShuffleRule
(
rule
=
lambda
input
,
*
models
:
[
'x'
]
*
(
max
([
0
]
+
map
(
len
,
models
))
-
len
(
input
))
+
range
(
len
(
input
)),
name
=
'lcomplete'
)
rcomplete
=
ShuffleRule
(
rule
=
lambda
input
,
*
models
:
range
(
len
(
input
))
+
[
'x'
]
*
(
max
(
map
(
len
,
models
))
-
len
(
input
)),
name
=
'rcomplete'
)
class
DimShuffle
(
Op
):
class
DimShuffle
(
Op
):
"""
"""
...
@@ -182,6 +226,23 @@ class DimShuffle(Op):
...
@@ -182,6 +226,23 @@ class DimShuffle(Op):
return
DimShuffle
(
gz
.
type
.
broadcastable
,
grad_order
)(
gz
),
return
DimShuffle
(
gz
.
type
.
broadcastable
,
grad_order
)(
gz
),
# class LComplete(Op):
# view_map = {0: [0]}
# def make_node(self, x, y):
# x, y = map(as_tensor, (x, y))
# xd, yd = x.type.ndim, y.type.ndim
# if xd > yd:
# raise TypeError("The tensor to left-complete has more dimensions than the model.")
# return gof.Apply(self,
# [x, y],
# [Tensor(dtype = x.type.dtype,
# broadcastable = (True,)*(yd-xd) + x.type.broadcastable).make_result()])
# def perform(self, node, (x, y), (z, )):
# return x.reshape((1, )*(y.ndim - x.ndim) + tuple(x.shape))
# def grad(self, node, (x, ), (gz, )):
# xd, gzd = x.type.ndim, gz.type.ndim
# return DimShuffle(gz.broadcastable, range(gzd-xd, xd))(gz)
################
################
### Elemwise ###
### Elemwise ###
...
@@ -243,20 +304,23 @@ class Elemwise(Op):
...
@@ -243,20 +304,23 @@ class Elemwise(Op):
shadow
=
self
.
scalar_op
.
make_node
(
*
[
Scalar
(
dtype
=
t
.
type
.
dtype
)()
for
t
in
inputs
])
shadow
=
self
.
scalar_op
.
make_node
(
*
[
Scalar
(
dtype
=
t
.
type
.
dtype
)()
for
t
in
inputs
])
target_length
=
max
([
input
.
type
.
ndim
for
input
in
inputs
])
target_length
=
max
([
input
.
type
.
ndim
for
input
in
inputs
])
args
=
[]
if
len
(
inputs
)
>
1
:
for
input
in
inputs
:
inputs
=
[
lcomplete
(
input
,
*
inputs
)
for
input
in
inputs
]
length
=
input
.
type
.
ndim
# args = []
difference
=
target_length
-
length
# for input in inputs:
if
not
difference
:
# length = input.type.ndim
args
.
append
(
input
)
# difference = target_length - length
else
:
# if not difference:
args
.
append
(
DimShuffle
(
range
(
length
),
[
'x'
]
*
difference
+
range
(
length
))(
input
))
# args.append(input)
inputs
=
args
# else:
# # TODO: use LComplete instead
# try:
# args.append(DimShuffle(input.type.broadcastable, ['x']*difference + range(length))(input))
# assert len(set([len(input.type.broadcastable) for input in inputs])) == 1
# inputs = args
# except (AssertionError, AttributeError):
# raise TypeError("All inputs to a Broadcast subclass must be Tensor instances and their broadcastable fields must all have the same length.", inputs)
try
:
assert
len
(
set
([
len
(
input
.
type
.
broadcastable
)
for
input
in
inputs
]))
==
1
except
(
AssertionError
,
AttributeError
):
raise
TypeError
(
"All inputs to a Broadcast subclass must be Tensor instances and their broadcastable fields must all have the same length."
,
inputs
)
out_broadcastables
=
[[
all
(
bcast
)
for
bcast
in
zip
(
*
[
input
.
type
.
broadcastable
for
input
in
inputs
])]]
*
shadow
.
nout
out_broadcastables
=
[[
all
(
bcast
)
for
bcast
in
zip
(
*
[
input
.
type
.
broadcastable
for
input
in
inputs
])]]
*
shadow
.
nout
inplace_pattern
=
self
.
inplace_pattern
inplace_pattern
=
self
.
inplace_pattern
...
@@ -509,6 +573,12 @@ class CAReduce(Op):
...
@@ -509,6 +573,12 @@ class CAReduce(Op):
broadcastable
=
[
x
for
i
,
x
in
enumerate
(
input
.
type
.
broadcastable
)
if
i
not
in
axis
])()
broadcastable
=
[
x
for
i
,
x
in
enumerate
(
input
.
type
.
broadcastable
)
if
i
not
in
axis
])()
return
Apply
(
self
,
[
input
],
[
output
])
return
Apply
(
self
,
[
input
],
[
output
])
def
__eq__
(
self
,
other
):
return
type
(
self
)
==
type
(
other
)
and
self
.
scalar_op
==
other
.
scalar_op
and
self
.
axis
==
other
.
axis
def
__hash__
(
self
):
return
hash
(
self
.
scalar_op
)
^
hash
(
self
.
axis
)
def
__str__
(
self
):
def
__str__
(
self
):
if
self
.
axis
is
not
None
:
if
self
.
axis
is
not
None
:
return
"Reduce{
%
s}{
%
s}"
%
(
self
.
scalar_op
,
", "
.
join
(
str
(
x
)
for
x
in
self
.
axis
))
return
"Reduce{
%
s}{
%
s}"
%
(
self
.
scalar_op
,
", "
.
join
(
str
(
x
)
for
x
in
self
.
axis
))
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论