Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
c972873e
提交
c972873e
authored
3月 27, 2015
作者:
Pascal Lamblin
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #2688 from carriepl/scan_mem_usage
Add test for gpu memory usage
上级
bec10668
2b155686
隐藏空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
80 行增加
和
0 行删除
+80
-0
test_scan.py
theano/scan_module/tests/test_scan.py
+80
-0
没有找到文件。
theano/scan_module/tests/test_scan.py
浏览文件 @
c972873e
...
@@ -207,6 +207,7 @@ class T_Scan(unittest.TestCase):
...
@@ -207,6 +207,7 @@ class T_Scan(unittest.TestCase):
def
setUp
(
self
):
def
setUp
(
self
):
utt
.
seed_rng
()
utt
.
seed_rng
()
super
(
T_Scan
,
self
)
.
setUp
()
# generator network, only one output , type scalar ; no sequence or
# generator network, only one output , type scalar ; no sequence or
# non sequence arguments
# non sequence arguments
...
@@ -2696,6 +2697,85 @@ class T_Scan(unittest.TestCase):
...
@@ -2696,6 +2697,85 @@ class T_Scan(unittest.TestCase):
f2_vals
=
f2
(
x_val
)
f2_vals
=
f2
(
x_val
)
utt
.
assert_allclose
(
f_vals
,
f2_vals
)
utt
.
assert_allclose
(
f_vals
,
f2_vals
)
def
test_gpu_memory_usage
(
self
):
# This test validates that the memory usage of the defined theano
# function is reasonnable when executed on the GPU. It checks for
# a bug in which one of scan's optimization was not applied which
# made the scan node compute large and unnecessary outputs which
# brought memory usage on the GPU to ~12G.
# The test must be performed on the GPU
from
theano.sandbox
import
cuda
if
not
cuda
.
cuda_available
:
raise
SkipTest
(
'Optional package cuda disabled'
)
# Dimensionality of input and output data (not one-hot coded)
n_in
=
100
n_out
=
100
# Number of neurons in hidden layer
n_hid
=
4000
# Number of minibatches
mb_size
=
2
# Time steps in minibatch
mb_length
=
200
# Define input variables
xin
=
tensor
.
ftensor3
(
name
=
'xin'
)
yout
=
tensor
.
ftensor3
(
name
=
'yout'
)
# Initialize the network parameters
floatX
=
theano
.
config
.
floatX
U
=
theano
.
shared
(
numpy
.
zeros
((
n_in
,
n_hid
),
dtype
=
"float32"
),
name
=
'W_xin_to_l1'
)
V
=
theano
.
shared
(
numpy
.
zeros
((
n_hid
,
n_hid
),
dtype
=
"float32"
),
name
=
'W_l1_to_l1'
)
W
=
theano
.
shared
(
numpy
.
zeros
((
n_hid
,
n_out
),
dtype
=
"float32"
),
name
=
'W_l1_to_l2'
)
nparams
=
[
U
,
V
,
W
]
# Build the forward pass
l1_base
=
tensor
.
dot
(
xin
,
U
)
def
scan_l
(
baseline
,
last_step
):
return
baseline
+
tensor
.
dot
(
last_step
,
V
)
zero_output
=
tensor
.
alloc
(
numpy
.
asarray
(
0.
,
dtype
=
"float32"
),
mb_size
,
n_hid
)
l1_out
,
_
=
theano
.
scan
(
scan_l
,
sequences
=
[
l1_base
],
outputs_info
=
[
zero_output
],
mode
=
mode_with_gpu
)
l2_out
=
tensor
.
dot
(
l1_out
,
W
)
# Compute the cost and take the gradient wrt params
cost
=
tensor
.
sum
((
l2_out
-
yout
)
**
2
)
grads
=
tensor
.
grad
(
cost
,
nparams
)
updates
=
zip
(
nparams
,
[
n
-
g
for
n
,
g
in
zip
(
nparams
,
grads
)])
# Compile the theano function
feval_backprop
=
theano
.
function
([
xin
,
yout
],
cost
,
updates
=
updates
,
mode
=
mode_with_gpu
)
# Validate that the PushOutScanOutput optimization has been applied
# by checking the number of outputs of the grad Scan node in the
#compiled function.
nodes
=
feval_backprop
.
maker
.
fgraph
.
toposort
()
scan_nodes
=
[
n
for
n
in
nodes
if
isinstance
(
n
.
op
,
theano
.
scan_module
.
scan_op
.
Scan
)]
# The grad scan is always the 2nd one according to toposort. If the
# optimization has been applied, it has 2 outputs, otherwise 3.
grad_scan_node
=
scan_nodes
[
1
]
assert
len
(
grad_scan_node
.
outputs
)
==
2
# Call the theano function to ensure the absence of a memory error
feval_backprop
(
numpy
.
zeros
((
mb_length
,
mb_size
,
n_in
),
dtype
=
"float32"
),
numpy
.
zeros
((
mb_length
,
mb_size
,
n_out
),
dtype
=
"float32"
))
def
test_reduce_memory_consumption
(
self
):
def
test_reduce_memory_consumption
(
self
):
x
=
theano
.
shared
(
numpy
.
asarray
(
x
=
theano
.
shared
(
numpy
.
asarray
(
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论