Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
6be35ca3
提交
6be35ca3
authored
2月 25, 2015
作者:
Pascal Lamblin
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #2533 from nouiz/cleanup
Code simplification/small speed up for downsample
上级
6f346499
6b4c592f
隐藏空白字符变更
内嵌
并排
正在显示
4 个修改的文件
包含
47 行增加
和
25 行删除
+47
-25
opt.py
theano/gof/opt.py
+23
-11
opt.py
theano/sandbox/cuda/opt.py
+5
-1
test_opt.py
theano/sandbox/cuda/tests/test_opt.py
+12
-1
downsample.py
theano/tensor/signal/downsample.py
+7
-12
没有找到文件。
theano/gof/opt.py
浏览文件 @
6be35ca3
...
@@ -1676,8 +1676,10 @@ class OpKeyOptimizer(NavigatorOptimizer):
...
@@ -1676,8 +1676,10 @@ class OpKeyOptimizer(NavigatorOptimizer):
class
ChangeTracker
:
class
ChangeTracker
:
def
__init__
(
self
):
def
__init__
(
self
):
self
.
changed
=
False
self
.
changed
=
False
self
.
nb_imported
=
0
def
on_import
(
self
,
fgraph
,
node
,
reason
):
def
on_import
(
self
,
fgraph
,
node
,
reason
):
self
.
nb_imported
+=
1
self
.
changed
=
True
self
.
changed
=
True
def
on_change_input
(
self
,
fgraph
,
node
,
i
,
r
,
new_r
,
reason
):
def
on_change_input
(
self
,
fgraph
,
node
,
i
,
r
,
new_r
,
reason
):
...
@@ -1742,13 +1744,14 @@ class EquilibriumOptimizer(NavigatorOptimizer):
...
@@ -1742,13 +1744,14 @@ class EquilibriumOptimizer(NavigatorOptimizer):
def
add_requirements
(
self
,
fgraph
):
def
add_requirements
(
self
,
fgraph
):
super
(
EquilibriumOptimizer
,
self
)
.
add_requirements
(
fgraph
)
super
(
EquilibriumOptimizer
,
self
)
.
add_requirements
(
fgraph
)
fgraph
.
attach_feature
(
ChangeTracker
())
for
opt
in
self
.
get_local_optimizers
():
for
opt
in
self
.
get_local_optimizers
():
opt
.
add_requirements
(
fgraph
)
opt
.
add_requirements
(
fgraph
)
for
opt
in
self
.
global_optimizers
:
for
opt
in
self
.
global_optimizers
:
opt
.
add_requirements
(
fgraph
)
opt
.
add_requirements
(
fgraph
)
def
apply
(
self
,
fgraph
,
start_from
=
None
):
def
apply
(
self
,
fgraph
,
start_from
=
None
):
change_tracker
=
ChangeTracker
()
fgraph
.
attach_feature
(
change_tracker
)
if
start_from
is
None
:
if
start_from
is
None
:
start_from
=
fgraph
.
outputs
start_from
=
fgraph
.
outputs
else
:
else
:
...
@@ -1769,9 +1772,11 @@ class EquilibriumOptimizer(NavigatorOptimizer):
...
@@ -1769,9 +1772,11 @@ class EquilibriumOptimizer(NavigatorOptimizer):
time_opts
=
{}
time_opts
=
{}
io_toposort_timing
=
[]
io_toposort_timing
=
[]
nb_nodes
=
[]
nb_nodes
=
[]
node_created
=
{}
for
opt
in
self
.
global_optimizers
+
list
(
self
.
get_local_optimizers
()):
for
opt
in
self
.
global_optimizers
+
list
(
self
.
get_local_optimizers
()):
global_process_count
.
setdefault
(
opt
,
0
)
global_process_count
.
setdefault
(
opt
,
0
)
time_opts
.
setdefault
(
opt
,
0
)
time_opts
.
setdefault
(
opt
,
0
)
node_created
.
setdefault
(
opt
,
0
)
while
changed
and
not
max_use_abort
:
while
changed
and
not
max_use_abort
:
process_count
=
{}
process_count
=
{}
...
@@ -1780,15 +1785,17 @@ class EquilibriumOptimizer(NavigatorOptimizer):
...
@@ -1780,15 +1785,17 @@ class EquilibriumOptimizer(NavigatorOptimizer):
#apply global optimizers
#apply global optimizers
for
gopt
in
self
.
global_optimizers
:
for
gopt
in
self
.
global_optimizers
:
fgraph
.
change_tracker
.
reset
()
change_tracker
.
reset
()
nb
=
change_tracker
.
nb_imported
t_opt
=
time
.
time
()
t_opt
=
time
.
time
()
gopt
.
apply
(
fgraph
)
gopt
.
apply
(
fgraph
)
time_opts
[
gopt
]
+=
time
.
time
()
-
t_opt
time_opts
[
gopt
]
+=
time
.
time
()
-
t_opt
if
fgraph
.
change_tracker
.
changed
:
if
change_tracker
.
changed
:
process_count
.
setdefault
(
gopt
,
0
)
process_count
.
setdefault
(
gopt
,
0
)
process_count
[
gopt
]
+=
1
process_count
[
gopt
]
+=
1
global_process_count
[
gopt
]
+=
1
global_process_count
[
gopt
]
+=
1
changed
=
True
changed
=
True
node_created
[
gopt
]
+=
change_tracker
.
nb_imported
-
nb
if
global_process_count
[
gopt
]
>
max_use
:
if
global_process_count
[
gopt
]
>
max_use
:
max_use_abort
=
True
max_use_abort
=
True
opt_name
=
(
getattr
(
gopt
,
"name"
,
None
)
opt_name
=
(
getattr
(
gopt
,
"name"
,
None
)
...
@@ -1825,6 +1832,7 @@ class EquilibriumOptimizer(NavigatorOptimizer):
...
@@ -1825,6 +1832,7 @@ class EquilibriumOptimizer(NavigatorOptimizer):
for
lopt
in
(
self
.
local_optimizers_all
+
for
lopt
in
(
self
.
local_optimizers_all
+
self
.
local_optimizers_map
.
get
(
type
(
node
.
op
),
[])
+
self
.
local_optimizers_map
.
get
(
type
(
node
.
op
),
[])
+
self
.
local_optimizers_map
.
get
(
node
.
op
,
[])):
self
.
local_optimizers_map
.
get
(
node
.
op
,
[])):
nb
=
change_tracker
.
nb_imported
t_opt
=
time
.
time
()
t_opt
=
time
.
time
()
lopt_change
=
self
.
process_node
(
fgraph
,
node
,
lopt
)
lopt_change
=
self
.
process_node
(
fgraph
,
node
,
lopt
)
time_opts
[
lopt
]
+=
time
.
time
()
-
t_opt
time_opts
[
lopt
]
+=
time
.
time
()
-
t_opt
...
@@ -1833,6 +1841,7 @@ class EquilibriumOptimizer(NavigatorOptimizer):
...
@@ -1833,6 +1841,7 @@ class EquilibriumOptimizer(NavigatorOptimizer):
process_count
[
lopt
]
+=
1
process_count
[
lopt
]
+=
1
global_process_count
[
lopt
]
+=
1
global_process_count
[
lopt
]
+=
1
changed
=
True
changed
=
True
node_created
[
lopt
]
+=
change_tracker
.
nb_imported
-
nb
if
global_process_count
[
lopt
]
>
max_use
:
if
global_process_count
[
lopt
]
>
max_use
:
max_use_abort
=
True
max_use_abort
=
True
opt_name
=
(
getattr
(
lopt
,
"name"
,
None
)
opt_name
=
(
getattr
(
lopt
,
"name"
,
None
)
...
@@ -1853,10 +1862,11 @@ class EquilibriumOptimizer(NavigatorOptimizer):
...
@@ -1853,10 +1862,11 @@ class EquilibriumOptimizer(NavigatorOptimizer):
+
". You can safely raise the current threshold of "
+
". You can safely raise the current threshold of "
+
"
%
f with the theano flag 'optdb.max_use_ratio'."
%
+
"
%
f with the theano flag 'optdb.max_use_ratio'."
%
config
.
optdb
.
max_use_ratio
)
config
.
optdb
.
max_use_ratio
)
fgraph
.
remove_feature
(
change_tracker
)
return
(
self
,
loop_timing
,
loop_process_count
,
return
(
self
,
loop_timing
,
loop_process_count
,
(
start_nb_nodes
,
end_nb_nodes
,
max_nb_nodes
),
(
start_nb_nodes
,
end_nb_nodes
,
max_nb_nodes
),
global_opt_timing
,
nb_nodes
,
time_opts
,
io_toposort_timing
)
global_opt_timing
,
nb_nodes
,
time_opts
,
io_toposort_timing
,
node_created
)
def
print_summary
(
self
,
stream
=
sys
.
stdout
,
level
=
0
,
depth
=-
1
):
def
print_summary
(
self
,
stream
=
sys
.
stdout
,
level
=
0
,
depth
=-
1
):
name
=
getattr
(
self
,
'name'
,
None
)
name
=
getattr
(
self
,
'name'
,
None
)
...
@@ -1871,7 +1881,8 @@ class EquilibriumOptimizer(NavigatorOptimizer):
...
@@ -1871,7 +1881,8 @@ class EquilibriumOptimizer(NavigatorOptimizer):
def
print_profile
(
stream
,
prof
,
level
=
0
):
def
print_profile
(
stream
,
prof
,
level
=
0
):
(
opt
,
loop_timing
,
loop_process_count
,
(
opt
,
loop_timing
,
loop_process_count
,
(
start_nb_nodes
,
end_nb_nodes
,
max_nb_nodes
),
(
start_nb_nodes
,
end_nb_nodes
,
max_nb_nodes
),
global_opt_timing
,
nb_nodes
,
time_opts
,
io_toposort_timing
)
=
prof
global_opt_timing
,
nb_nodes
,
time_opts
,
io_toposort_timing
,
node_created
)
=
prof
blanc
=
(
' '
*
level
)
blanc
=
(
' '
*
level
)
print
>>
stream
,
blanc
,
"EquilibriumOptimizer"
,
print
>>
stream
,
blanc
,
"EquilibriumOptimizer"
,
...
@@ -1915,18 +1926,19 @@ class EquilibriumOptimizer(NavigatorOptimizer):
...
@@ -1915,18 +1926,19 @@ class EquilibriumOptimizer(NavigatorOptimizer):
process_count
[
o
]
+=
v
process_count
[
o
]
+=
v
for
opt
,
count
in
process_count
.
iteritems
():
for
opt
,
count
in
process_count
.
iteritems
():
if
count
>
0
:
if
count
>
0
:
count_opt
.
append
((
time_opts
[
opt
],
count
,
opt
))
count_opt
.
append
((
time_opts
[
opt
],
count
,
node_created
[
opt
],
opt
))
else
:
else
:
not_used
.
append
((
time_opts
[
opt
],
opt
))
not_used
.
append
((
time_opts
[
opt
],
opt
))
not_used_time
+=
time_opts
[
opt
]
not_used_time
+=
time_opts
[
opt
]
if
count_opt
:
if
count_opt
:
print
>>
stream
,
blanc
,
\
print
>>
stream
,
blanc
,
\
' times - times applied - name:'
' times - times applied - n
b node created - n
ame:'
count_opt
.
sort
()
count_opt
.
sort
()
for
(
t
,
count
,
opt
)
in
count_opt
[::
-
1
]:
for
(
t
,
count
,
n_created
,
opt
)
in
count_opt
[::
-
1
]:
print
>>
stream
,
blanc
,
'
%.3
fs -
%
d -
%
s'
%
(
print
>>
stream
,
blanc
,
'
%.3
fs -
%
d -
%
d -
%
s'
%
(
t
,
count
,
opt
)
t
,
count
,
n_created
,
opt
)
print
>>
stream
,
blanc
,
'
%.3
fs - in
%
d optimization that where not used (display only those with a runtime > 0)'
%
(
print
>>
stream
,
blanc
,
'
%.3
fs - in
%
d optimization that where not used (display only those with a runtime > 0)'
%
(
not_used_time
,
len
(
not_used
))
not_used_time
,
len
(
not_used
))
not_used
.
sort
()
not_used
.
sort
()
...
...
theano/sandbox/cuda/opt.py
浏览文件 @
6be35ca3
...
@@ -304,7 +304,11 @@ def local_gpu_elemwise_1(node):
...
@@ -304,7 +304,11 @@ def local_gpu_elemwise_1(node):
def
local_gpu_split
(
node
):
def
local_gpu_split
(
node
):
if
isinstance
(
node
.
op
,
tensor
.
Split
):
if
isinstance
(
node
.
op
,
tensor
.
Split
):
input
=
node
.
inputs
[
0
]
input
=
node
.
inputs
[
0
]
if
input
.
owner
and
isinstance
(
input
.
owner
.
op
,
HostFromGpu
):
outs_clients
=
reduce
(
list
.
__add__
,
[
out
.
clients
for
out
in
node
.
outputs
])
if
(
input
.
owner
and
isinstance
(
input
.
owner
.
op
,
HostFromGpu
)
or
any
([
c
!=
'output'
and
isinstance
(
c
.
op
,
GpuFromHost
)
for
c
,
idx
in
outs_clients
])):
new_op
=
GpuSplit
(
node
.
op
.
len_splits
)
new_op
=
GpuSplit
(
node
.
op
.
len_splits
)
split_res
=
new_op
(
gpu_from_host
(
input
),
*
node
.
inputs
[
1
:])
split_res
=
new_op
(
gpu_from_host
(
input
),
*
node
.
inputs
[
1
:])
return
[
host_from_gpu
(
o
)
for
o
in
split_res
]
return
[
host_from_gpu
(
o
)
for
o
in
split_res
]
...
...
theano/sandbox/cuda/tests/test_opt.py
浏览文件 @
6be35ca3
...
@@ -289,7 +289,7 @@ def test_local_gpu_subtensor():
...
@@ -289,7 +289,7 @@ def test_local_gpu_subtensor():
assert
any
([
isinstance
(
node
.
op
,
cuda
.
GpuElemwise
)
for
node
in
topo
])
assert
any
([
isinstance
(
node
.
op
,
cuda
.
GpuElemwise
)
for
node
in
topo
])
def
test_local_split
():
def
test_local_
gpu_
split
():
""" Test that the GpuSplit op is being applied and works """
""" Test that the GpuSplit op is being applied and works """
# Construct symbolic split
# Construct symbolic split
x
=
tensor
.
fvector
()
x
=
tensor
.
fvector
()
...
@@ -310,6 +310,17 @@ def test_local_split():
...
@@ -310,6 +310,17 @@ def test_local_split():
# Check equality
# Check equality
assert
all
([(
cpu
==
gpu
)
.
all
()
for
cpu
,
gpu
in
zip
(
cpu_res
,
gpu_res
)])
assert
all
([(
cpu
==
gpu
)
.
all
()
for
cpu
,
gpu
in
zip
(
cpu_res
,
gpu_res
)])
# Test the other path of the optimizer, when it is the output that
# is moved to the GPU.
ra
=
cuda
.
gpu_from_host
(
ra
)
f
=
theano
.
function
([
x
,
splits
],
[
ra
,
rb
,
rc
],
mode
=
mode_with_gpu
.
excluding
(
"InputToGpuOptimizer"
))
gpu_res
=
f
([
0
,
1
,
2
,
3
,
4
,
5
],
[
3
,
2
,
1
])
l
=
f
.
maker
.
fgraph
.
toposort
()
assert
any
([
isinstance
(
o
.
op
,
theano
.
sandbox
.
cuda
.
GpuSplit
)
for
o
in
l
])
# Check equality
assert
all
([(
cpu
==
gpu
)
.
all
()
for
cpu
,
gpu
in
zip
(
cpu_res
,
gpu_res
)])
def
test_print_op
():
def
test_print_op
():
""" Test that print ops don't block gpu optimization"""
""" Test that print ops don't block gpu optimization"""
...
...
theano/tensor/signal/downsample.py
浏览文件 @
6be35ca3
...
@@ -197,13 +197,11 @@ class DownsampleFactorMax(Op):
...
@@ -197,13 +197,11 @@ class DownsampleFactorMax(Op):
'DownsampleFactorMax requires 4D input for now'
)
'DownsampleFactorMax requires 4D input for now'
)
z_shape
=
self
.
out_shape
(
x
.
shape
,
self
.
ds
,
self
.
ignore_border
,
self
.
st
)
z_shape
=
self
.
out_shape
(
x
.
shape
,
self
.
ds
,
self
.
ignore_border
,
self
.
st
)
if
(
z
[
0
]
is
None
)
or
(
z
[
0
]
.
shape
!=
z_shape
):
if
(
z
[
0
]
is
None
)
or
(
z
[
0
]
.
shape
!=
z_shape
):
z
[
0
]
=
numpy
.
zeros
(
self
.
out_shape
(
x
.
shape
,
self
.
ds
,
z
[
0
]
=
numpy
.
empty
(
self
.
out_shape
(
x
.
shape
,
self
.
ds
,
self
.
ignore_border
,
self
.
st
)
)
self
.
ignore_border
,
self
.
st
)
,
z
[
0
]
=
theano
.
_asarray
(
z
[
0
],
dtype
=
x
.
dtype
)
dtype
=
x
.
dtype
)
zz
=
z
[
0
]
zz
=
z
[
0
]
## zz needs to be initialized with -inf for the following to work
zz
-=
numpy
.
inf
#number of pooling output rows
#number of pooling output rows
pr
=
zz
.
shape
[
-
2
]
pr
=
zz
.
shape
[
-
2
]
#number of pooling output cols
#number of pooling output cols
...
@@ -221,11 +219,8 @@ class DownsampleFactorMax(Op):
...
@@ -221,11 +219,8 @@ class DownsampleFactorMax(Op):
for
c
in
xrange
(
pc
):
for
c
in
xrange
(
pc
):
col_st
=
c
*
st1
col_st
=
c
*
st1
col_end
=
__builtin__
.
min
(
col_st
+
ds1
,
img_cols
)
col_end
=
__builtin__
.
min
(
col_st
+
ds1
,
img_cols
)
for
row_ind
in
xrange
(
row_st
,
row_end
):
zz
[
n
,
k
,
r
,
c
]
=
x
[
for
col_ind
in
xrange
(
col_st
,
col_end
):
n
,
k
,
row_st
:
row_end
,
col_st
:
col_end
]
.
max
()
zz
[
n
,
k
,
r
,
c
]
=
\
__builtin__
.
max
(
zz
[
n
,
k
,
r
,
c
],
x
[
n
,
k
,
row_ind
,
col_ind
])
def
infer_shape
(
self
,
node
,
in_shapes
):
def
infer_shape
(
self
,
node
,
in_shapes
):
shp
=
self
.
out_shape
(
in_shapes
[
0
],
self
.
ds
,
shp
=
self
.
out_shape
(
in_shapes
[
0
],
self
.
ds
,
...
@@ -594,8 +589,8 @@ class DownsampleFactorMaxGradGrad(Op):
...
@@ -594,8 +589,8 @@ class DownsampleFactorMaxGradGrad(Op):
z_shape
=
self
.
out_shape
(
x
.
shape
,
self
.
ds
,
self
.
ignore_border
,
self
.
st
)
z_shape
=
self
.
out_shape
(
x
.
shape
,
self
.
ds
,
self
.
ignore_border
,
self
.
st
)
if
(
z
[
0
]
is
None
)
or
(
z
[
0
]
.
shape
!=
z_shape
):
if
(
z
[
0
]
is
None
)
or
(
z
[
0
]
.
shape
!=
z_shape
):
z
[
0
]
=
numpy
.
zeros
(
self
.
out_shape
(
x
.
shape
,
self
.
ds
,
z
[
0
]
=
numpy
.
zeros
(
self
.
out_shape
(
x
.
shape
,
self
.
ds
,
self
.
ignore_border
,
self
.
st
)
)
self
.
ignore_border
,
self
.
st
)
,
z
[
0
]
=
theano
.
_asarray
(
z
[
0
],
dtype
=
x
.
dtype
)
dtype
=
x
.
dtype
)
ggz
=
z
[
0
]
ggz
=
z
[
0
]
#number of pooling output rows
#number of pooling output rows
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论