Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
93a7a5e3
提交
93a7a5e3
authored
9月 06, 2013
作者:
Frédéric Bastien
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1515 from lamblin/fix_1507
Fix in local_dot22_to_dot22scalar
上级
959d7cd4
9a5e2e97
显示空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
73 行增加
和
21 行删除
+73
-21
blas.py
theano/tensor/blas.py
+19
-21
test_blas.py
theano/tensor/tests/test_blas.py
+54
-0
没有找到文件。
theano/tensor/blas.py
浏览文件 @
93a7a5e3
...
@@ -1906,21 +1906,22 @@ def local_dot22_to_dot22scalar(node):
...
@@ -1906,21 +1906,22 @@ def local_dot22_to_dot22scalar(node):
d
=
node
.
inputs
[
dot22_idx
]
d
=
node
.
inputs
[
dot22_idx
]
i_scalar
=
[
_as_scalar
(
x
,
dtype
=
d
.
dtype
)
for
x
in
node
.
inputs
]
i_scalar
=
[
_as_scalar
(
x
,
dtype
=
d
.
dtype
)
for
x
in
node
.
inputs
]
if
not
any
(
i_scalar
):
if
not
any
(
i_scalar
):
i_mul
=
[
x
.
owner
and
x
.
owner
.
op
==
T
.
mul
for
x
in
node
.
inputs
]
# Check if we can reorder the graph as this mul have a mul in inputs.
# We support only 1 additional level of mul.
# The canonizer should have merged those mul together.
i_mul
=
[
x
.
owner
and
x
.
owner
.
op
==
T
.
mul
and
any
([
_as_scalar
(
x_i
,
dtype
=
d
.
dtype
)
for
x_i
in
x
.
owner
.
inputs
])
for
x
in
node
.
inputs
]
if
not
any
(
i_mul
):
if
not
any
(
i_mul
):
#no scalar in input and no multiplication
#no scalar in input and no multiplication
#if their was a multiplication we couls reorder the graph
#if their was a multiplication we couls reorder the graph
#by the associativity of the graph.
#by the associativity of the graph.
return
False
return
False
#maybe we can reorder the graph as this mul have a mul in input.
mul_idx
=
i_mul
.
index
(
True
)
# The first one should always work
#The canonizer should have merged those mul together.
#We support only 1 additional level of mul.
mul_idx
=
i_mul
.
index
(
True
)
# we take the first mul!
m
=
node
.
inputs
[
mul_idx
]
m
=
node
.
inputs
[
mul_idx
]
if
len
(
m
.
owner
.
inputs
)
==
2
and
any
([
_as_scalar
(
x
,
dtype
=
d
.
dtype
)
for
x
in
m
.
owner
.
inputs
]):
scalar_idx
=
-
1
scalar_idx
=
-
1
for
i
,
x
in
enumerate
(
m
.
owner
.
inputs
):
for
i
,
x
in
enumerate
(
m
.
owner
.
inputs
):
if
_as_scalar
(
x
,
dtype
=
d
.
dtype
)
and
(
theano
.
scalar
.
upcast
(
if
_as_scalar
(
x
,
dtype
=
d
.
dtype
)
and
(
theano
.
scalar
.
upcast
(
...
@@ -1940,21 +1941,18 @@ def local_dot22_to_dot22scalar(node):
...
@@ -1940,21 +1941,18 @@ def local_dot22_to_dot22scalar(node):
assert
not
a
.
type
.
ndim
assert
not
a
.
type
.
ndim
dot
=
_dot22scalar
(
d
.
owner
.
inputs
[
0
],
d
.
owner
.
inputs
[
1
],
a
)
dot
=
_dot22scalar
(
d
.
owner
.
inputs
[
0
],
d
.
owner
.
inputs
[
1
],
a
)
# What about t
he other inputs to the original node that were
# T
he other inputs to the original node that were
# neither part of the dot22 or this mul?
# neither part of the dot22 or this mul should be
# I'm asserting there are no such inputs here:
# factors in the returned "mul" node.
assert
dot22_idx
!=
mul_idx
assert
dot22_idx
!=
mul_idx
assert
all
((
i
in
(
dot22_idx
,
mul_idx
))
other_factors
=
[
inpt
for
i
in
xrange
(
len
(
node
.
inputs
)))
for
i
,
inpt
in
enumerate
(
node
.
inputs
)
if
i
not
in
(
dot22_idx
,
mul_idx
)]
return
[
T
.
mul
(
m
.
owner
.
inputs
[
1
-
i
],
dot
)]
other_m_inputs
=
[
inpt
elif
m
.
owner
and
m
.
owner
.
op
==
T
.
mul
:
for
i
,
inpt
in
enumerate
(
m
.
owner
.
inputs
)
_logger
.
info
(
'Not optimizing dot22 with inputs
%
s
%
s
%
s
%
s. '
if
i
!=
scalar_idx
]
'we need to check in a recursive way in the mul if we can '
'reorder the graph. The canonizer should have done this.'
,
return
[
T
.
mul
(
dot
,
*
(
other_factors
+
other_m_inputs
))]
d
,
m
,
d
.
type
,
m
.
type
)
else
:
return
False
scalar_idx
=
-
1
scalar_idx
=
-
1
for
i
,
x
in
enumerate
(
node
.
inputs
):
for
i
,
x
in
enumerate
(
node
.
inputs
):
...
...
theano/tensor/tests/test_blas.py
浏览文件 @
93a7a5e3
...
@@ -1024,6 +1024,60 @@ def test_dot22scalar_cast():
...
@@ -1024,6 +1024,60 @@ def test_dot22scalar_cast():
assert
_dot22scalar
in
[
x
.
op
for
x
in
f
.
maker
.
fgraph
.
toposort
()]
assert
_dot22scalar
in
[
x
.
op
for
x
in
f
.
maker
.
fgraph
.
toposort
()]
def
test_local_dot22_to_dot22scalar
():
"""
This test that the bug in gh-1507 is really fixed
"""
A
=
T
.
dmatrix
()
mode
=
theano
.
compile
.
mode
.
get_default_mode
()
opt
=
theano
.
tensor
.
opt
.
in2out
(
theano
.
tensor
.
blas
.
local_dot22_to_dot22scalar
)
mode
=
mode
.
__class__
(
optimizer
=
opt
)
x
=
T
.
dscalar
()
y
=
T
.
dscalar
()
z
=
T
.
dscalar
()
# make sure to don't have dimshuffle as we don't opt those cases
m
=
T
.
dmatrix
()
r
=
T
.
drow
()
for
idx
,
node
in
enumerate
([
#Old working cases
T
.
mul
(
_dot22
(
A
,
A
),
x
),
T
.
mul
(
_dot22
(
A
,
A
),
x
,
y
),
T
.
mul
(
_dot22
(
A
,
A
),
x
,
r
),
T
.
mul
(
_dot22
(
A
,
A
),
m
,
x
),
T
.
mul
(
_dot22
(
A
,
A
),
x
,
m
),
T
.
mul
(
_dot22
(
A
,
A
),
x
,
(
m
*
y
)),
T
.
mul
(
_dot22
(
A
,
A
),
(
m
*
y
),
x
),
T
.
mul
(
_dot22
(
A
,
A
),
x
,
(
r
*
y
)),
T
.
mul
(
_dot22
(
A
,
A
),
(
r
*
y
),
x
),
T
.
mul
(
_dot22
(
A
,
A
),
(
x
*
y
),
(
m
*
x
)),
T
.
mul
(
_dot22
(
A
,
A
),
(
r
*
y
),
(
y
*
x
)),
# Case that was raising an assert that is fixed in gh-1507
T
.
mul
(
_dot22
(
A
,
A
),
(
m
*
y
),
m
),
T
.
mul
(
_dot22
(
A
,
A
),
m
,
(
m
*
y
)),
T
.
mul
(
_dot22
(
A
,
A
),
(
r
*
y
),
(
m
*
x
)),
# assert fixed in gh-1507 and opt case added in gh-1515
T
.
mul
(
_dot22
(
A
,
A
),
(
m
*
y
*
z
),
m
),
T
.
mul
(
_dot22
(
A
,
A
),
m
,
(
m
*
y
*
z
)),
# Opt case added in gh-1515
T
.
mul
(
_dot22
(
A
,
A
),
T
.
mul
(
m
,
y
,
z
),
m
),
T
.
mul
(
_dot22
(
A
,
A
),
m
,
T
.
mul
(
m
,
y
,
z
)),
#Case that opt later in gh-1515
T
.
mul
(
_dot22
(
A
,
A
),
(
r
*
m
),
(
m
*
x
)),
]):
node2
=
theano
.
tensor
.
blas
.
local_dot22_to_dot22scalar
.
transform
(
node
.
owner
)
assert
node2
f
=
theano
.
function
([
x
,
y
,
z
,
m
,
r
,
A
],
node
,
mode
=
mode
,
on_unused_input
=
'ignore'
)
f
(
.
1
,
.
2
,
.
3
,
[[
1
,
2
],
[
3
,
4
]],
[[
5
,
6
]],
[[
7
,
8
],
[
9
,
10
]])
def
test_dot_w_self
():
def
test_dot_w_self
():
# This can trigger problems in the optimization because what would
# This can trigger problems in the optimization because what would
# normally be a gemm must not be because the output is aliased to
# normally be a gemm must not be because the output is aliased to
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论