Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
992f9046
提交
992f9046
authored
6月 30, 2015
作者:
Frederic
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Move softmaxgrad dnn test to test_dnn.py file
上级
0844dcb4
显示空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
135 行增加
和
169 行删除
+135
-169
test_dnn.py
theano/sandbox/gpuarray/tests/test_dnn.py
+119
-0
test_nnet.py
theano/sandbox/gpuarray/tests/test_nnet.py
+16
-169
没有找到文件。
theano/sandbox/gpuarray/tests/test_dnn.py
浏览文件 @
992f9046
...
@@ -16,6 +16,7 @@ from .. import dnn
...
@@ -16,6 +16,7 @@ from .. import dnn
from
..basic_ops
import
GpuAllocEmpty
from
..basic_ops
import
GpuAllocEmpty
from
.test_basic_ops
import
mode_with_gpu
,
mode_without_gpu
from
.test_basic_ops
import
mode_with_gpu
,
mode_without_gpu
from
.
import
test_nnet
def
test_dnn_conv_desc_merge
():
def
test_dnn_conv_desc_merge
():
...
@@ -686,3 +687,121 @@ def test_version():
...
@@ -686,3 +687,121 @@ def test_version():
if
not
dnn
.
dnn_available
():
if
not
dnn
.
dnn_available
():
raise
SkipTest
(
dnn
.
dnn_available
.
msg
)
raise
SkipTest
(
dnn
.
dnn_available
.
msg
)
assert
isinstance
(
dnn
.
version
(),
(
int
,
tuple
))
assert
isinstance
(
dnn
.
version
(),
(
int
,
tuple
))
class
test_SoftMax
(
test_nnet
.
test_SoftMax
):
gpu_op
=
dnn
.
GpuDnnSoftmax
gpu_grad_op
=
dnn
.
GpuDnnSoftmaxGrad
mode
=
mode_with_gpu
def
test_softmax_shape_0
(
self
):
raise
SkipTest
(
"Cudnn do not suport 0 shapes"
)
def
test_softmax_grad
(
self
):
def
cmp
(
n
,
m
,
f
,
f_gpu
):
data
=
numpy
.
arange
(
n
*
m
,
dtype
=
'float32'
)
.
reshape
(
n
,
m
)
gdata
=
numpy
.
asarray
(
data
)[:,
:,
None
,
None
]
out
=
f
(
data
)
gout
=
numpy
.
asarray
(
f_gpu
(
gdata
))[:,
:,
0
,
0
]
assert
numpy
.
allclose
(
out
,
gout
),
numpy
.
absolute
(
out
-
gout
)
x
=
T
.
matrix
(
'x'
,
'float32'
)
x_gpu
=
T
.
tensor4
(
'x_gpu'
,
'float32'
)
f_z
=
T
.
nnet
.
softmax_op
f_gpu
=
dnn
.
GpuDnnSoftmax
(
'bc01'
,
'accurate'
,
'channel'
)
# Verify the grad operation
dims
=
(
2
,
3
,
4
,
5
)
gdata
=
numpy
.
arange
(
numpy
.
product
(
dims
),
dtype
=
'float32'
)
.
reshape
(
dims
)
T
.
verify_grad
(
f_gpu
,
[
gdata
],
rng
=
numpy
.
random
,
mode
=
mode_with_gpu
)
# Verify that the CPU and GPU implementations return the same results
# up to a tolerance.
self
.
_test_softmax
(
x
,
x_gpu
,
f_z
,
f_gpu
,
cmp
,
self
.
mode
,
)
self
.
_test_softmax
(
x
,
x
,
f_z
,
f_z
,
self
.
_cmp
,
self
.
mode
)
# Verify that the SoftmaxGrad -> Gpu[Dnn]SoftmaxGrad
# optimization is applied when cudnn is required
y
=
T
.
fvector
(
'y'
)
f
=
theano
.
function
(
[
y
],
T
.
grad
(
T
.
nnet
.
softmax
(
y
)
.
mean
(),
y
),
mode
=
mode_with_gpu
)
sorted_f
=
f
.
maker
.
fgraph
.
toposort
()
assert
(
len
([
i
for
i
in
sorted_f
if
isinstance
(
i
.
op
,
self
.
gpu_grad_op
)])
==
1
)
assert
(
len
([
i
for
i
in
sorted_f
if
isinstance
(
i
.
op
,
theano
.
tensor
.
nnet
.
SoftmaxGrad
)])
==
0
)
# Verify that the SoftmaxGrad -> Gpu[Dnn]SoftmaxGrad
# optimization is not applied when cudnn is excluded or not
# available
mode_wo_cudnn
=
mode_with_gpu
.
excluding
(
"cudnn"
)
y
=
T
.
fvector
(
'y'
)
f
=
theano
.
function
(
[
y
],
T
.
grad
(
T
.
nnet
.
softmax
(
y
)
.
mean
(),
y
),
mode
=
mode_wo_cudnn
)
sorted_f
=
f
.
maker
.
fgraph
.
toposort
()
assert
(
len
([
i
for
i
in
sorted_f
if
isinstance
(
i
.
op
,
self
.
gpu_grad_op
)])
==
0
)
assert
(
len
([
i
for
i
in
sorted_f
if
isinstance
(
i
.
op
,
theano
.
tensor
.
nnet
.
SoftmaxGrad
)])
==
1
)
# Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad do not
# crash with manual graph
y
=
T
.
fvector
(
'y'
)
o
=
theano
.
tensor
.
nnet
.
SoftmaxGrad
()(
y
,
y
*
2
)
f
=
theano
.
function
([
y
],
o
,
mode
=
mode_with_gpu
)
sorted_f
=
f
.
maker
.
fgraph
.
toposort
()
assert
(
len
([
i
for
i
in
sorted_f
if
isinstance
(
i
.
op
,
self
.
gpu_grad_op
)])
==
1
)
assert
(
len
([
i
for
i
in
sorted_f
if
isinstance
(
i
.
op
,
theano
.
tensor
.
nnet
.
SoftmaxGrad
)])
==
0
)
theano/sandbox/gpuarray/tests/test_nnet.py
浏览文件 @
992f9046
...
@@ -12,12 +12,13 @@ from theano.sandbox import gpuarray
...
@@ -12,12 +12,13 @@ from theano.sandbox import gpuarray
# We let that import do the init of the back-end if needed.
# We let that import do the init of the back-end if needed.
from
.test_basic_ops
import
(
mode_with_gpu
,
from
.test_basic_ops
import
(
mode_with_gpu
,
mode_without_gpu
)
mode_without_gpu
)
from
..nnet
import
(
from
..nnet
import
(
GpuCrossentropySoftmaxArgmax1HotWithBias
,
GpuCrossentropySoftmaxArgmax1HotWithBias
,
GpuCrossentropySoftmax1HotWithBiasDx
,
GpuCrossentropySoftmax1HotWithBiasDx
,
GpuSoftmaxWithBias
,
GpuSoftmax
)
GpuSoftmaxWithBias
,
GpuSoftmax
)
mode_wo_cudnn
=
mode_with_gpu
.
excluding
(
"cudnn"
)
def
test_GpuCrossentropySoftmaxArgmax1HotWithBias
():
def
test_GpuCrossentropySoftmaxArgmax1HotWithBias
():
"""
"""
...
@@ -294,6 +295,9 @@ def softmax_unittest_template(dtypeInput):
...
@@ -294,6 +295,9 @@ def softmax_unittest_template(dtypeInput):
class
test_SoftMax
(
unittest
.
TestCase
):
class
test_SoftMax
(
unittest
.
TestCase
):
gpu_op
=
GpuSoftmax
mode
=
mode_wo_cudnn
def
_test_softmax
(
def
_test_softmax
(
self
,
self
,
x
,
x
,
...
@@ -301,8 +305,7 @@ class test_SoftMax(unittest.TestCase):
...
@@ -301,8 +305,7 @@ class test_SoftMax(unittest.TestCase):
f_z
,
f_z
,
f_gpu_z
,
f_gpu_z
,
cmp
,
cmp
,
gpu_mode
,
gpu_mode
check_types
):
):
"""
"""
This is basic test for GpuSoftmax and GpuDnnSoftmax
This is basic test for GpuSoftmax and GpuDnnSoftmax
...
@@ -314,8 +317,8 @@ class test_SoftMax(unittest.TestCase):
...
@@ -314,8 +317,8 @@ class test_SoftMax(unittest.TestCase):
f_gpu_z_out
=
f_gpu_z
(
x_gpu
)
f_gpu_z_out
=
f_gpu_z
(
x_gpu
)
f
=
theano
.
function
([
x
],
f_z_out
,
mode
=
mode_without_gpu
)
f
=
theano
.
function
([
x
],
f_z_out
,
mode
=
mode_without_gpu
)
f_gpu
=
theano
.
function
([
x_gpu
],
f_gpu_z_out
,
mode
=
gpu_
mode
)
f_gpu
=
theano
.
function
([
x_gpu
],
f_gpu_z_out
,
mode
=
self
.
mode
)
check_types
(
f
,
f_gpu
)
self
.
_check_types
(
f
,
f_gpu
,
T
.
nnet
.
Softmax
,
self
.
gpu_op
)
# we need to test n>32*1024 to check that we make the block loop.
# we need to test n>32*1024 to check that we make the block loop.
cmp
(
1
,
5
,
f
,
f_gpu
)
cmp
(
1
,
5
,
f
,
f_gpu
)
...
@@ -349,195 +352,39 @@ class test_SoftMax(unittest.TestCase):
...
@@ -349,195 +352,39 @@ class test_SoftMax(unittest.TestCase):
gout
=
f_gpu
(
data
)
gout
=
f_gpu
(
data
)
assert
numpy
.
allclose
(
out
,
gout
),
numpy
.
absolute
(
out
-
gout
)
assert
numpy
.
allclose
(
out
,
gout
),
numpy
.
absolute
(
out
-
gout
)
def
_check_types
(
self
,
graph
,
graph_gpu
,
topo_idx
,
f_type
,
f_gpu_type
):
def
_check_types
(
self
,
graph
,
graph_gpu
,
f_type
,
f_gpu_type
):
assert
isinstance
(
graph
.
maker
.
fgraph
.
toposort
()[
-
1
]
.
op
,
f_type
)
assert
isinstance
(
graph
.
maker
.
fgraph
.
toposort
()[
-
1
]
.
op
,
f_type
)
assert
isinstance
(
assert
len
([
node
for
node
in
graph_gpu
.
maker
.
fgraph
.
toposort
()
graph_gpu
.
maker
.
fgraph
.
toposort
()[
topo_idx
]
.
op
,
if
isinstance
(
node
.
op
,
f_gpu_type
)])
==
1
f_gpu_type
)
def
test_softmax
(
self
):
def
test_softmax
(
self
):
x
=
T
.
fmatrix
(
'x'
)
x
=
T
.
fmatrix
(
'x'
)
z
=
T
.
nnet
.
softmax_op
z
=
T
.
nnet
.
softmax_op
def
check_types_without_cudnn
(
graph
,
graph_gpu
):
self
.
_check_types
(
graph
,
graph_gpu
,
-
2
,
type
(
z
),
GpuSoftmax
)
mode_wo_cudnn
=
mode_with_gpu
.
excluding
(
"cudnn"
)
f
,
f_gpu
=
self
.
_test_softmax
(
f
,
f_gpu
=
self
.
_test_softmax
(
x
,
x
,
x
,
x
,
z
,
z
,
z
,
z
,
self
.
_cmp
,
self
.
_cmp
,
mode_wo_cudnn
,
self
.
mode
,
check_types_without_cudnn
)
)
# cuDNN R1 cannot handle these test cases but the Theano softmax can so
# cuDNN R1 cannot handle these test cases but the Theano softmax can so
# we test them only for the Theano softmax.
# we test them only for the Theano softmax.
self
.
_cmp
(
2
<<
15
,
5
,
f
,
f_gpu
)
self
.
_cmp
(
2
<<
15
,
5
,
f
,
f_gpu
)
self
.
_cmp
(
0
,
10
,
f
,
f_gpu
)
def
test_softmax_cudnn
(
self
):
def
test_softmax_shape_0
(
self
):
from
..
import
dnn
if
not
dnn
.
dnn_available
():
raise
SkipTest
(
dnn
.
dnn_available
.
msg
)
x
=
T
.
fmatrix
(
'x'
)
x
=
T
.
fmatrix
(
'x'
)
z
=
T
.
nnet
.
softmax_op
z
=
T
.
nnet
.
softmax_op
def
check_types_with_cudnn
(
graph
,
graph_gpu
):
self
.
_check_types
(
graph
,
graph_gpu
,
-
3
,
type
(
z
),
dnn
.
GpuDnnSoftmax
)
f
,
f_gpu
=
self
.
_test_softmax
(
f
,
f_gpu
=
self
.
_test_softmax
(
x
,
x
,
x
,
x
,
z
,
z
,
z
,
z
,
self
.
_cmp
,
self
.
_cmp
,
mode_with_gpu
,
self
.
mode
,
check_types_with_cudnn
)
)
# Theano can handle that case, but cudnn can't
def
test_cudnn_softmax_grad
(
self
):
self
.
_cmp
(
0
,
10
,
f
,
f_gpu
)
from
..
import
dnn
if
not
dnn
.
dnn_available
():
raise
SkipTest
(
dnn
.
dnn_available
.
msg
)
def
cmp
(
n
,
m
,
f
,
f_gpu
):
data
=
numpy
.
arange
(
n
*
m
,
dtype
=
'float32'
)
.
reshape
(
n
,
m
)
gdata
=
numpy
.
asarray
(
data
)[:,
:,
None
,
None
]
out
=
f
(
data
)
gout
=
numpy
.
asarray
(
f_gpu
(
gdata
))[:,
:,
0
,
0
]
assert
numpy
.
allclose
(
out
,
gout
),
numpy
.
absolute
(
out
-
gout
)
x
=
T
.
matrix
(
'x'
,
'float32'
)
x_gpu
=
T
.
tensor4
(
'x_gpu'
,
'float32'
)
f_z
=
T
.
nnet
.
softmax_op
f_gpu
=
dnn
.
GpuDnnSoftmax
(
'bc01'
,
'accurate'
,
'channel'
)
# Verify the grad operation
dims
=
(
2
,
3
,
4
,
5
)
gdata
=
numpy
.
arange
(
numpy
.
product
(
dims
),
dtype
=
'float32'
)
.
reshape
(
dims
)
T
.
verify_grad
(
f_gpu
,
[
gdata
],
rng
=
numpy
.
random
,
mode
=
mode_with_gpu
)
def
check_types
(
graph
,
graph_gpu
):
self
.
_check_types
(
graph
,
graph_gpu
,
-
1
,
type
(
f_z
),
dnn
.
GpuDnnSoftmax
)
def
check_types_opt
(
graph
,
graph_gpu
):
assert
isinstance
(
graph
.
maker
.
fgraph
.
toposort
()[
-
1
]
.
op
,
type
(
f_z
))
assert
len
([
n
for
n
in
graph_gpu
.
maker
.
fgraph
.
toposort
()
if
isinstance
(
n
.
op
,
dnn
.
GpuDnnSoftmax
)])
==
1
# Verify that the CPU and GPU implementations return the same results
# up to a tolerance.
self
.
_test_softmax
(
x
,
x_gpu
,
f_z
,
f_gpu
,
cmp
,
mode_with_gpu
,
check_types
)
mode_w_cudnn
=
mode_with_gpu
.
including
(
"cudnn"
)
self
.
_test_softmax
(
x
,
x
,
f_z
,
f_z
,
self
.
_cmp
,
mode_w_cudnn
,
check_types_opt
)
# Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad optimization is
# applied when cudnn is required
y
=
T
.
fvector
(
'y'
)
f
=
theano
.
function
(
[
y
],
T
.
grad
(
T
.
nnet
.
softmax
(
y
)
.
mean
(),
y
),
mode
=
mode_with_gpu
)
sorted_f
=
f
.
maker
.
fgraph
.
toposort
()
assert
(
len
([
i
for
i
in
sorted_f
if
isinstance
(
i
.
op
,
dnn
.
GpuDnnSoftmaxGrad
)])
==
1
)
assert
(
len
([
i
for
i
in
sorted_f
if
isinstance
(
i
.
op
,
theano
.
tensor
.
nnet
.
SoftmaxGrad
)])
==
0
)
# Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad optimization is not
# applied when cudnn is excluded or not available
mode_wo_cudnn
=
mode_with_gpu
.
excluding
(
"cudnn"
)
y
=
T
.
fvector
(
'y'
)
f
=
theano
.
function
(
[
y
],
T
.
grad
(
T
.
nnet
.
softmax
(
y
)
.
mean
(),
y
),
mode
=
mode_wo_cudnn
)
sorted_f
=
f
.
maker
.
fgraph
.
toposort
()
assert
(
len
([
i
for
i
in
sorted_f
if
isinstance
(
i
.
op
,
dnn
.
GpuDnnSoftmaxGrad
)])
==
0
)
assert
(
len
([
i
for
i
in
sorted_f
if
isinstance
(
i
.
op
,
theano
.
tensor
.
nnet
.
SoftmaxGrad
)])
==
1
)
# Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad do not
# crash with manual graph
y
=
T
.
fvector
(
'y'
)
o
=
theano
.
tensor
.
nnet
.
SoftmaxGrad
()(
y
,
y
*
2
)
f
=
theano
.
function
([
y
],
o
,
mode
=
mode_with_gpu
)
sorted_f
=
f
.
maker
.
fgraph
.
toposort
()
assert
(
len
([
i
for
i
in
sorted_f
if
isinstance
(
i
.
op
,
dnn
.
GpuDnnSoftmaxGrad
)])
==
1
)
assert
(
len
([
i
for
i
in
sorted_f
if
isinstance
(
i
.
op
,
theano
.
tensor
.
nnet
.
SoftmaxGrad
)])
==
0
)
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论