Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
c81f8c36
提交
c81f8c36
authored
11月 10, 2015
作者:
Samira Shabanian
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
pep8 typed list directory
上级
33c97605
显示空白字符变更
内嵌
并排
正在显示
4 个修改的文件
包含
60 行增加
和
58 行删除
+60
-58
conv.py
theano/tensor/signal/conv.py
+7
-5
downsample.py
theano/tensor/signal/downsample.py
+29
-23
test_conv.py
theano/tensor/signal/tests/test_conv.py
+5
-6
test_downsample.py
theano/tensor/signal/tests/test_downsample.py
+19
-24
没有找到文件。
theano/tensor/signal/conv.py
浏览文件 @
c81f8c36
...
@@ -3,8 +3,6 @@ Contains a wrapper function for tensor.nnet.ConvOp, which can be used to perform
...
@@ -3,8 +3,6 @@ Contains a wrapper function for tensor.nnet.ConvOp, which can be used to perform
generic 2D convolution.
generic 2D convolution.
"""
"""
__docformat__
=
"restructuredtext en"
import
warnings
import
warnings
import
theano
import
theano
...
@@ -12,6 +10,10 @@ import theano.tensor as tensor
...
@@ -12,6 +10,10 @@ import theano.tensor as tensor
from
theano.tensor.nnet
import
conv
from
theano.tensor.nnet
import
conv
import
logging
import
logging
__docformat__
=
"restructuredtext en"
_logger
=
logging
.
getLogger
(
"theano.tensor.signal.conv"
)
_logger
=
logging
.
getLogger
(
"theano.tensor.signal.conv"
)
...
@@ -52,7 +54,7 @@ def conv2d(input, filters, image_shape=None, filter_shape=None,
...
@@ -52,7 +54,7 @@ def conv2d(input, filters, image_shape=None, filter_shape=None,
assert
input
.
ndim
in
(
2
,
3
)
assert
input
.
ndim
in
(
2
,
3
)
assert
filters
.
ndim
in
(
2
,
3
)
assert
filters
.
ndim
in
(
2
,
3
)
#
##
use shape information if it is given to us ###
# use shape information if it is given to us ###
if
filter_shape
and
image_shape
:
if
filter_shape
and
image_shape
:
if
input
.
ndim
==
3
:
if
input
.
ndim
==
3
:
bsize
=
image_shape
[
0
]
bsize
=
image_shape
[
0
]
...
@@ -69,7 +71,7 @@ def conv2d(input, filters, image_shape=None, filter_shape=None,
...
@@ -69,7 +71,7 @@ def conv2d(input, filters, image_shape=None, filter_shape=None,
nkern
,
kshp
=
None
,
None
nkern
,
kshp
=
None
,
None
bsize
,
imshp
=
None
,
None
bsize
,
imshp
=
None
,
None
#
##
reshape tensors to 4D, for compatibility with ConvOp ###
# reshape tensors to 4D, for compatibility with ConvOp ###
if
input
.
ndim
==
3
:
if
input
.
ndim
==
3
:
sym_bsize
=
input
.
shape
[
0
]
sym_bsize
=
input
.
shape
[
0
]
else
:
else
:
...
@@ -86,7 +88,7 @@ def conv2d(input, filters, image_shape=None, filter_shape=None,
...
@@ -86,7 +88,7 @@ def conv2d(input, filters, image_shape=None, filter_shape=None,
new_filter_shape
=
tensor
.
join
(
0
,
tensor
.
stack
([
sym_nkern
,
1
]),
filters
.
shape
[
-
2
:])
new_filter_shape
=
tensor
.
join
(
0
,
tensor
.
stack
([
sym_nkern
,
1
]),
filters
.
shape
[
-
2
:])
filters4D
=
tensor
.
reshape
(
filters
,
new_filter_shape
,
ndim
=
4
)
filters4D
=
tensor
.
reshape
(
filters
,
new_filter_shape
,
ndim
=
4
)
#
##
perform actual convolution ###
# perform actual convolution ###
op
=
conv
.
ConvOp
(
output_mode
=
border_mode
,
op
=
conv
.
ConvOp
(
output_mode
=
border_mode
,
dx
=
subsample
[
0
],
dy
=
subsample
[
1
],
dx
=
subsample
[
0
],
dy
=
subsample
[
1
],
imshp
=
imshp
,
kshp
=
kshp
,
nkern
=
nkern
,
bsize
=
bsize
,
**
kargs
)
imshp
=
imshp
,
kshp
=
kshp
,
nkern
=
nkern
,
bsize
=
bsize
,
**
kargs
)
...
...
theano/tensor/signal/downsample.py
浏览文件 @
c81f8c36
...
@@ -18,6 +18,7 @@ from theano import gof, Op, tensor, Variable, Apply
...
@@ -18,6 +18,7 @@ from theano import gof, Op, tensor, Variable, Apply
from
theano.tensor.opt
import
register_canonicalize
from
theano.tensor.opt
import
register_canonicalize
def
max_pool2D
(
*
args
,
**
kwargs
):
def
max_pool2D
(
*
args
,
**
kwargs
):
import
sys
import
sys
print
(
"DEPRECATION: max_pool2D renamed to max_pool_2d"
,
file
=
sys
.
stderr
)
print
(
"DEPRECATION: max_pool2D renamed to max_pool_2d"
,
file
=
sys
.
stderr
)
...
@@ -206,8 +207,8 @@ class DownsampleFactorMax(Op):
...
@@ -206,8 +207,8 @@ class DownsampleFactorMax(Op):
if
isinstance
(
r
,
theano
.
Variable
):
if
isinstance
(
r
,
theano
.
Variable
):
nr
=
tensor
.
switch
(
tensor
.
ge
(
st
[
0
],
ds
[
0
]),
nr
=
tensor
.
switch
(
tensor
.
ge
(
st
[
0
],
ds
[
0
]),
(
r
-
1
)
//
st
[
0
]
+
1
,
(
r
-
1
)
//
st
[
0
]
+
1
,
tensor
.
maximum
(
0
,
(
r
-
1
-
ds
[
0
])
tensor
.
maximum
(
0
,
(
r
-
1
-
ds
[
0
])
//
//
st
[
0
]
+
1
)
+
1
)
st
[
0
]
+
1
)
+
1
)
elif
st
[
0
]
>=
ds
[
0
]:
elif
st
[
0
]
>=
ds
[
0
]:
nr
=
(
r
-
1
)
//
st
[
0
]
+
1
nr
=
(
r
-
1
)
//
st
[
0
]
+
1
else
:
else
:
...
@@ -216,8 +217,8 @@ class DownsampleFactorMax(Op):
...
@@ -216,8 +217,8 @@ class DownsampleFactorMax(Op):
if
isinstance
(
c
,
theano
.
Variable
):
if
isinstance
(
c
,
theano
.
Variable
):
nc
=
tensor
.
switch
(
tensor
.
ge
(
st
[
1
],
ds
[
1
]),
nc
=
tensor
.
switch
(
tensor
.
ge
(
st
[
1
],
ds
[
1
]),
(
c
-
1
)
//
st
[
1
]
+
1
,
(
c
-
1
)
//
st
[
1
]
+
1
,
tensor
.
maximum
(
0
,
(
c
-
1
-
ds
[
1
])
tensor
.
maximum
(
0
,
(
c
-
1
-
ds
[
1
])
//
//
st
[
1
]
+
1
)
+
1
)
st
[
1
]
+
1
)
+
1
)
elif
st
[
1
]
>=
ds
[
1
]:
elif
st
[
1
]
>=
ds
[
1
]:
nc
=
(
c
-
1
)
//
st
[
1
]
+
1
nc
=
(
c
-
1
)
//
st
[
1
]
+
1
else
:
else
:
...
@@ -289,7 +290,7 @@ class DownsampleFactorMax(Op):
...
@@ -289,7 +290,7 @@ class DownsampleFactorMax(Op):
y
=
numpy
.
zeros
(
y
=
numpy
.
zeros
(
(
x
.
shape
[
0
],
x
.
shape
[
1
],
img_rows
,
img_cols
),
(
x
.
shape
[
0
],
x
.
shape
[
1
],
img_rows
,
img_cols
),
dtype
=
x
.
dtype
)
dtype
=
x
.
dtype
)
y
[:,
:,
pad_h
:(
img_rows
-
pad_h
),
pad_w
:(
img_cols
-
pad_w
)]
=
x
y
[:,
:,
pad_h
:(
img_rows
-
pad_h
),
pad_w
:(
img_cols
-
pad_w
)]
=
x
else
:
else
:
y
=
x
y
=
x
func
=
numpy
.
max
func
=
numpy
.
max
...
@@ -336,6 +337,7 @@ class DownsampleFactorMax(Op):
...
@@ -336,6 +337,7 @@ class DownsampleFactorMax(Op):
st
=
self
.
st
,
padding
=
self
.
padding
,
st
=
self
.
st
,
padding
=
self
.
padding
,
mode
=
self
.
mode
)(
mode
=
self
.
mode
)(
x
,
gz
)]
x
,
gz
)]
def
c_headers
(
self
):
def
c_headers
(
self
):
return
[
'<algorithm>'
]
return
[
'<algorithm>'
]
...
@@ -522,6 +524,7 @@ class DownsampleFactorMax(Op):
...
@@ -522,6 +524,7 @@ class DownsampleFactorMax(Op):
def
c_code_cache_version
(
self
):
def
c_code_cache_version
(
self
):
return
(
0
,
6
,
8
,
3
)
return
(
0
,
6
,
8
,
3
)
class
PoolGrad
(
Op
):
class
PoolGrad
(
Op
):
__props__
=
(
'ds'
,
'ignore_border'
,
'st'
,
'padding'
,
'mode'
)
__props__
=
(
'ds'
,
'ignore_border'
,
'st'
,
'padding'
,
'mode'
)
...
@@ -582,8 +585,8 @@ class PoolGrad(Op):
...
@@ -582,8 +585,8 @@ class PoolGrad(Op):
if
isinstance
(
r
,
theano
.
Variable
):
if
isinstance
(
r
,
theano
.
Variable
):
nr
=
tensor
.
switch
(
tensor
.
ge
(
st
[
0
],
ds
[
0
]),
nr
=
tensor
.
switch
(
tensor
.
ge
(
st
[
0
],
ds
[
0
]),
(
r
-
1
)
//
st
[
0
]
+
1
,
(
r
-
1
)
//
st
[
0
]
+
1
,
tensor
.
maximum
(
0
,
(
r
-
1
-
ds
[
0
])
tensor
.
maximum
(
0
,
(
r
-
1
-
ds
[
0
])
//
//
st
[
0
]
+
1
)
+
1
)
st
[
0
]
+
1
)
+
1
)
elif
st
[
0
]
>=
ds
[
0
]:
elif
st
[
0
]
>=
ds
[
0
]:
nr
=
(
r
-
1
)
//
st
[
0
]
+
1
nr
=
(
r
-
1
)
//
st
[
0
]
+
1
else
:
else
:
...
@@ -592,8 +595,8 @@ class PoolGrad(Op):
...
@@ -592,8 +595,8 @@ class PoolGrad(Op):
if
isinstance
(
c
,
theano
.
Variable
):
if
isinstance
(
c
,
theano
.
Variable
):
nc
=
tensor
.
switch
(
tensor
.
ge
(
st
[
1
],
ds
[
1
]),
nc
=
tensor
.
switch
(
tensor
.
ge
(
st
[
1
],
ds
[
1
]),
(
c
-
1
)
//
st
[
1
]
+
1
,
(
c
-
1
)
//
st
[
1
]
+
1
,
tensor
.
maximum
(
0
,
(
c
-
1
-
ds
[
1
])
tensor
.
maximum
(
0
,
(
c
-
1
-
ds
[
1
])
//
//
st
[
1
]
+
1
)
+
1
)
st
[
1
]
+
1
)
+
1
)
elif
st
[
1
]
>=
ds
[
1
]:
elif
st
[
1
]
>=
ds
[
1
]:
nc
=
(
c
-
1
)
//
st
[
1
]
+
1
nc
=
(
c
-
1
)
//
st
[
1
]
+
1
else
:
else
:
...
@@ -656,7 +659,7 @@ class MaxPoolGrad(PoolGrad):
...
@@ -656,7 +659,7 @@ class MaxPoolGrad(PoolGrad):
y
=
numpy
.
zeros
(
y
=
numpy
.
zeros
(
(
x
.
shape
[
0
],
x
.
shape
[
1
],
img_rows
,
img_cols
),
(
x
.
shape
[
0
],
x
.
shape
[
1
],
img_rows
,
img_cols
),
dtype
=
x
.
dtype
)
dtype
=
x
.
dtype
)
y
[:,
:,
pad_h
:(
img_rows
-
pad_h
),
pad_w
:(
img_cols
-
pad_w
)]
=
x
y
[:,
:,
pad_h
:(
img_rows
-
pad_h
),
pad_w
:(
img_cols
-
pad_w
)]
=
x
else
:
else
:
y
=
x
y
=
x
gx
=
numpy
.
zeros_like
(
y
)
gx
=
numpy
.
zeros_like
(
y
)
...
@@ -673,7 +676,7 @@ class MaxPoolGrad(PoolGrad):
...
@@ -673,7 +676,7 @@ class MaxPoolGrad(PoolGrad):
if
(
maxout
[
n
,
k
,
r
,
c
]
==
y
[
n
,
k
,
row_ind
,
col_ind
]):
if
(
maxout
[
n
,
k
,
r
,
c
]
==
y
[
n
,
k
,
row_ind
,
col_ind
]):
gx
[
n
,
k
,
row_ind
,
col_ind
]
+=
gz
[
n
,
k
,
r
,
c
]
gx
[
n
,
k
,
row_ind
,
col_ind
]
+=
gz
[
n
,
k
,
r
,
c
]
# unpad the image
# unpad the image
gx
=
gx
[:,
:,
pad_h
:(
img_rows
-
pad_h
),
pad_w
:(
img_cols
-
pad_w
)]
gx
=
gx
[:,
:,
pad_h
:(
img_rows
-
pad_h
),
pad_w
:(
img_cols
-
pad_w
)]
gx_stg
[
0
]
=
gx
gx_stg
[
0
]
=
gx
def
grad
(
self
,
inp
,
grads
):
def
grad
(
self
,
inp
,
grads
):
...
@@ -804,6 +807,7 @@ class MaxPoolGrad(PoolGrad):
...
@@ -804,6 +807,7 @@ class MaxPoolGrad(PoolGrad):
DownsampleFactorMaxGrad
=
MaxPoolGrad
DownsampleFactorMaxGrad
=
MaxPoolGrad
class
AveragePoolGrad
(
PoolGrad
):
class
AveragePoolGrad
(
PoolGrad
):
def
__init__
(
self
,
ds
,
ignore_border
,
st
=
None
,
padding
=
(
0
,
0
),
mode
=
'average_inc_pad'
):
def
__init__
(
self
,
ds
,
ignore_border
,
st
=
None
,
padding
=
(
0
,
0
),
mode
=
'average_inc_pad'
):
...
@@ -848,7 +852,7 @@ class AveragePoolGrad(PoolGrad):
...
@@ -848,7 +852,7 @@ class AveragePoolGrad(PoolGrad):
y
=
numpy
.
zeros
(
y
=
numpy
.
zeros
(
(
x
.
shape
[
0
],
x
.
shape
[
1
],
img_rows
,
img_cols
),
(
x
.
shape
[
0
],
x
.
shape
[
1
],
img_rows
,
img_cols
),
dtype
=
x
.
dtype
)
dtype
=
x
.
dtype
)
y
[:,
:,
pad_h
:(
img_rows
-
pad_h
),
pad_w
:(
img_cols
-
pad_w
)]
=
x
y
[:,
:,
pad_h
:(
img_rows
-
pad_h
),
pad_w
:(
img_cols
-
pad_w
)]
=
x
else
:
else
:
y
=
x
y
=
x
gx
=
numpy
.
zeros_like
(
y
)
gx
=
numpy
.
zeros_like
(
y
)
...
@@ -874,7 +878,7 @@ class AveragePoolGrad(PoolGrad):
...
@@ -874,7 +878,7 @@ class AveragePoolGrad(PoolGrad):
(
col_end
-
col_st
))
(
col_end
-
col_st
))
gx
[
n
,
k
,
row_st
:
row_end
,
col_st
:
col_end
]
+=
val
gx
[
n
,
k
,
row_st
:
row_end
,
col_st
:
col_end
]
+=
val
# unpad the image
# unpad the image
gx
=
gx
[:,
:,
pad_h
:(
img_rows
-
pad_h
),
pad_w
:(
img_cols
-
pad_w
)]
gx
=
gx
[:,
:,
pad_h
:(
img_rows
-
pad_h
),
pad_w
:(
img_cols
-
pad_w
)]
gx_stg
[
0
]
=
gx
gx_stg
[
0
]
=
gx
def
grad
(
self
,
inp
,
grads
):
def
grad
(
self
,
inp
,
grads
):
...
@@ -885,6 +889,7 @@ class AveragePoolGrad(PoolGrad):
...
@@ -885,6 +889,7 @@ class AveragePoolGrad(PoolGrad):
self
.
ds
,
ignore_border
=
self
.
ignore_border
,
self
.
ds
,
ignore_border
=
self
.
ignore_border
,
st
=
self
.
st
,
padding
=
self
.
padding
,
mode
=
self
.
mode
)(
ggx
)]
st
=
self
.
st
,
padding
=
self
.
padding
,
mode
=
self
.
mode
)(
ggx
)]
class
DownsampleFactorMaxGradGrad
(
Op
):
class
DownsampleFactorMaxGradGrad
(
Op
):
__props__
=
(
'ds'
,
'ignore_border'
,
'st'
,
'padding'
,
'mode'
)
__props__
=
(
'ds'
,
'ignore_border'
,
'st'
,
'padding'
,
'mode'
)
...
@@ -946,8 +951,8 @@ class DownsampleFactorMaxGradGrad(Op):
...
@@ -946,8 +951,8 @@ class DownsampleFactorMaxGradGrad(Op):
if
isinstance
(
r
,
theano
.
Variable
):
if
isinstance
(
r
,
theano
.
Variable
):
nr
=
tensor
.
switch
(
tensor
.
ge
(
st
[
0
],
ds
[
0
]),
nr
=
tensor
.
switch
(
tensor
.
ge
(
st
[
0
],
ds
[
0
]),
(
r
-
1
)
//
st
[
0
]
+
1
,
(
r
-
1
)
//
st
[
0
]
+
1
,
tensor
.
maximum
(
0
,
(
r
-
1
-
ds
[
0
])
tensor
.
maximum
(
0
,
(
r
-
1
-
ds
[
0
])
//
//
st
[
0
]
+
1
)
+
1
)
st
[
0
]
+
1
)
+
1
)
elif
st
[
0
]
>=
ds
[
0
]:
elif
st
[
0
]
>=
ds
[
0
]:
nr
=
(
r
-
1
)
//
st
[
0
]
+
1
nr
=
(
r
-
1
)
//
st
[
0
]
+
1
else
:
else
:
...
@@ -956,8 +961,8 @@ class DownsampleFactorMaxGradGrad(Op):
...
@@ -956,8 +961,8 @@ class DownsampleFactorMaxGradGrad(Op):
if
isinstance
(
c
,
theano
.
Variable
):
if
isinstance
(
c
,
theano
.
Variable
):
nc
=
tensor
.
switch
(
tensor
.
ge
(
st
[
1
],
ds
[
1
]),
nc
=
tensor
.
switch
(
tensor
.
ge
(
st
[
1
],
ds
[
1
]),
(
c
-
1
)
//
st
[
1
]
+
1
,
(
c
-
1
)
//
st
[
1
]
+
1
,
tensor
.
maximum
(
0
,
(
c
-
1
-
ds
[
1
])
tensor
.
maximum
(
0
,
(
c
-
1
-
ds
[
1
])
//
//
st
[
1
]
+
1
)
+
1
)
st
[
1
]
+
1
)
+
1
)
elif
st
[
1
]
>=
ds
[
1
]:
elif
st
[
1
]
>=
ds
[
1
]:
nc
=
(
c
-
1
)
//
st
[
1
]
+
1
nc
=
(
c
-
1
)
//
st
[
1
]
+
1
else
:
else
:
...
@@ -966,7 +971,7 @@ class DownsampleFactorMaxGradGrad(Op):
...
@@ -966,7 +971,7 @@ class DownsampleFactorMaxGradGrad(Op):
rval
=
list
(
imgshape
[:
-
2
])
+
[
nr
,
nc
]
rval
=
list
(
imgshape
[:
-
2
])
+
[
nr
,
nc
]
return
rval
return
rval
def
__init__
(
self
,
ds
,
ignore_border
,
st
=
None
,
padding
=
(
0
,
0
),
mode
=
'max'
):
def
__init__
(
self
,
ds
,
ignore_border
,
st
=
None
,
padding
=
(
0
,
0
),
mode
=
'max'
):
self
.
ds
=
tuple
(
ds
)
self
.
ds
=
tuple
(
ds
)
if
not
all
([
isinstance
(
d
,
int
)
for
d
in
ds
]):
if
not
all
([
isinstance
(
d
,
int
)
for
d
in
ds
]):
raise
ValueError
(
raise
ValueError
(
...
@@ -1025,11 +1030,11 @@ class DownsampleFactorMaxGradGrad(Op):
...
@@ -1025,11 +1030,11 @@ class DownsampleFactorMaxGradGrad(Op):
y_padded
=
numpy
.
zeros
(
y_padded
=
numpy
.
zeros
(
(
x
.
shape
[
0
],
x
.
shape
[
1
],
img_rows
,
img_cols
),
(
x
.
shape
[
0
],
x
.
shape
[
1
],
img_rows
,
img_cols
),
dtype
=
x
.
dtype
)
+
x
.
min
()
-
1
dtype
=
x
.
dtype
)
+
x
.
min
()
-
1
y_padded
[:,
:,
pd0
:(
img_rows
-
pd0
),
pd1
:(
img_cols
-
pd1
)]
=
x
y_padded
[:,
:,
pd0
:(
img_rows
-
pd0
),
pd1
:(
img_cols
-
pd1
)]
=
x
ggx_padded
=
numpy
.
zeros
(
ggx_padded
=
numpy
.
zeros
(
(
x
.
shape
[
0
],
x
.
shape
[
1
],
img_rows
,
img_cols
),
(
x
.
shape
[
0
],
x
.
shape
[
1
],
img_rows
,
img_cols
),
dtype
=
x
.
dtype
)
dtype
=
x
.
dtype
)
ggx_padded
[:,
:,
pd0
:(
img_rows
-
pd0
),
pd1
:(
img_cols
-
pd1
)]
=
ggx
ggx_padded
[:,
:,
pd0
:(
img_rows
-
pd0
),
pd1
:(
img_cols
-
pd1
)]
=
ggx
else
:
else
:
y_padded
=
x
y_padded
=
x
...
@@ -1130,10 +1135,11 @@ class DownsampleFactorMaxGradGrad(Op):
...
@@ -1130,10 +1135,11 @@ class DownsampleFactorMaxGradGrad(Op):
}
}
}
}
}
}
"""
%
locals
()
"""
%
locals
()
def
c_code_cache_version
(
self
):
def
c_code_cache_version
(
self
):
return
(
0
,
1
)
return
(
0
,
1
)
@register_canonicalize
(
'fast_compile'
)
@register_canonicalize
(
'fast_compile'
)
@gof.local_optimizer
([
MaxPoolGrad
])
@gof.local_optimizer
([
MaxPoolGrad
])
...
@@ -1141,7 +1147,7 @@ def local_average_pool_grad(node):
...
@@ -1141,7 +1147,7 @@ def local_average_pool_grad(node):
# To assure backward compatibility with
# To assure backward compatibility with
# DownsampleFactorMaxGrad
# DownsampleFactorMaxGrad
if
(
not
isinstance
(
node
.
op
,
MaxPoolGrad
)
or
node
.
op
.
mode
not
in
if
(
not
isinstance
(
node
.
op
,
MaxPoolGrad
)
or
node
.
op
.
mode
not
in
[
'sum'
,
'average_exc_pad'
,
'average_inc_pad'
]):
[
'sum'
,
'average_exc_pad'
,
'average_inc_pad'
]):
return
False
return
False
return
[
AveragePoolGrad
(
ds
=
node
.
op
.
ds
,
return
[
AveragePoolGrad
(
ds
=
node
.
op
.
ds
,
ignore_border
=
node
.
op
.
ignore_border
,
ignore_border
=
node
.
op
.
ignore_border
,
...
...
theano/tensor/signal/tests/test_conv.py
浏览文件 @
c81f8c36
...
@@ -31,7 +31,7 @@ class TestSignalConv2D(unittest.TestCase):
...
@@ -31,7 +31,7 @@ class TestSignalConv2D(unittest.TestCase):
if
filter_dim
!=
3
:
if
filter_dim
!=
3
:
nkern
=
1
nkern
=
1
#
############
THEANO IMPLEMENTATION ############
# THEANO IMPLEMENTATION ############
# we create a symbolic function so that verify_grad can work
# we create a symbolic function so that verify_grad can work
def
sym_conv2d
(
input
,
filters
):
def
sym_conv2d
(
input
,
filters
):
return
conv
.
conv2d
(
input
,
filters
)
return
conv
.
conv2d
(
input
,
filters
)
...
@@ -44,9 +44,8 @@ class TestSignalConv2D(unittest.TestCase):
...
@@ -44,9 +44,8 @@ class TestSignalConv2D(unittest.TestCase):
filter_data
=
numpy
.
random
.
random
(
filter_shape
)
filter_data
=
numpy
.
random
.
random
(
filter_shape
)
theano_output
=
theano_conv
(
image_data
,
filter_data
)
theano_output
=
theano_conv
(
image_data
,
filter_data
)
############# REFERENCE IMPLEMENTATION ############
# REFERENCE IMPLEMENTATION ############
out_shape2d
=
numpy
.
array
(
image_shape
[
-
2
:])
-
\
out_shape2d
=
numpy
.
array
(
image_shape
[
-
2
:])
-
numpy
.
array
(
filter_shape
[
-
2
:])
+
1
numpy
.
array
(
filter_shape
[
-
2
:])
+
1
ref_output
=
numpy
.
zeros
(
tuple
(
out_shape2d
))
ref_output
=
numpy
.
zeros
(
tuple
(
out_shape2d
))
# reshape as 3D input tensors to make life easier
# reshape as 3D input tensors to make life easier
...
@@ -76,7 +75,7 @@ class TestSignalConv2D(unittest.TestCase):
...
@@ -76,7 +75,7 @@ class TestSignalConv2D(unittest.TestCase):
self
.
assertTrue
(
_allclose
(
theano_output4d
[
b
,
k
,
:,
:],
self
.
assertTrue
(
_allclose
(
theano_output4d
[
b
,
k
,
:,
:],
output2d
))
output2d
))
#
############
TEST GRADIENT ############
# TEST GRADIENT ############
if
verify_grad
:
if
verify_grad
:
utt
.
verify_grad
(
sym_conv2d
,
[
image_data
,
filter_data
])
utt
.
verify_grad
(
sym_conv2d
,
[
image_data
,
filter_data
])
...
@@ -87,7 +86,7 @@ class TestSignalConv2D(unittest.TestCase):
...
@@ -87,7 +86,7 @@ class TestSignalConv2D(unittest.TestCase):
signal.conv.conv2d can support inputs and filters of type
signal.conv.conv2d can support inputs and filters of type
matrix or tensor3.
matrix or tensor3.
"""
"""
if
(
not
theano
.
tensor
.
nnet
.
conv
.
imported_scipy_signal
and
if
(
not
theano
.
tensor
.
nnet
.
conv
.
imported_scipy_signal
and
theano
.
config
.
cxx
==
""
):
theano
.
config
.
cxx
==
""
):
raise
SkipTest
(
"conv2d tests need SciPy or a c++ compiler"
)
raise
SkipTest
(
"conv2d tests need SciPy or a c++ compiler"
)
...
...
theano/tensor/signal/tests/test_downsample.py
浏览文件 @
c81f8c36
...
@@ -63,9 +63,9 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
...
@@ -63,9 +63,9 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
def
pad_img
(
x
):
def
pad_img
(
x
):
y
=
numpy
.
zeros
(
y
=
numpy
.
zeros
(
(
x
.
shape
[
0
],
x
.
shape
[
1
],
(
x
.
shape
[
0
],
x
.
shape
[
1
],
x
.
shape
[
2
]
+
pad_h
*
2
,
x
.
shape
[
3
]
+
pad_w
*
2
),
x
.
shape
[
2
]
+
pad_h
*
2
,
x
.
shape
[
3
]
+
pad_w
*
2
),
dtype
=
x
.
dtype
)
dtype
=
x
.
dtype
)
y
[:,
:,
pad_h
:(
x
.
shape
[
2
]
+
pad_h
),
pad_w
:(
x
.
shape
[
3
]
+
pad_w
)]
=
x
y
[:,
:,
pad_h
:(
x
.
shape
[
2
]
+
pad_h
),
pad_w
:(
x
.
shape
[
3
]
+
pad_w
)]
=
x
return
y
return
y
img_rows
=
h
+
2
*
pad_h
img_rows
=
h
+
2
*
pad_h
...
@@ -78,7 +78,6 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
...
@@ -78,7 +78,6 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
ds0
,
ds1
=
ds
ds0
,
ds1
=
ds
st0
,
st1
=
st
st0
,
st1
=
st
output_val
=
numpy
.
zeros
(
out_shp
)
output_val
=
numpy
.
zeros
(
out_shp
)
tt
=
[]
y
=
pad_img
(
x
)
y
=
pad_img
(
x
)
func
=
numpy
.
max
func
=
numpy
.
max
if
mode
==
'sum'
:
if
mode
==
'sum'
:
...
@@ -117,8 +116,6 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
...
@@ -117,8 +116,6 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
if
st
is
None
:
if
st
is
None
:
st
=
ds
st
=
ds
xi
=
0
yi
=
0
img_rows
=
input
.
shape
[
-
2
]
img_rows
=
input
.
shape
[
-
2
]
img_cols
=
input
.
shape
[
-
1
]
img_cols
=
input
.
shape
[
-
1
]
...
@@ -581,10 +578,12 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
...
@@ -581,10 +578,12 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
stridesize
=
stridesizes
[
i
]
stridesize
=
stridesizes
[
i
]
paddingsize
=
paddingsizes
[
i
]
paddingsize
=
paddingsizes
[
i
]
grad_shape
=
DownsampleFactorMaxGradGrad
.
out_shape
(
grad_shape
=
DownsampleFactorMaxGradGrad
.
out_shape
(
imval
.
shape
,
imval
.
shape
,
maxpoolsize
,
st
=
stridesize
,
maxpoolsize
,
st
=
stridesize
,
ignore_border
=
True
,
padding
=
paddingsize
)
ignore_border
=
True
,
padding
=
paddingsize
)
grad_val
=
rng
.
rand
(
*
grad_shape
)
*
10.0
grad_val
=
rng
.
rand
(
*
grad_shape
)
*
10.0
def
mp
(
input
,
grad
):
def
mp
(
input
,
grad
):
out
=
DownsampleFactorMax
(
out
=
DownsampleFactorMax
(
maxpoolsize
,
ignore_border
=
True
,
maxpoolsize
,
ignore_border
=
True
,
...
@@ -610,12 +609,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
...
@@ -610,12 +609,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
stridesize
=
stridesizes
[
i
]
stridesize
=
stridesizes
[
i
]
paddingsize
=
paddingsizes
[
i
]
paddingsize
=
paddingsizes
[
i
]
#'average_exc_pad' with non-zero padding is not implemented
#
'average_exc_pad' with non-zero padding is not implemented
for
mode
in
[
'sum'
,
'average_inc_pad'
]:
for
mode
in
[
'sum'
,
'average_inc_pad'
]:
grad_shape
=
DownsampleFactorMax
.
out_shape
(
grad_shape
=
DownsampleFactorMax
.
out_shape
(
imval
.
shape
,
imval
.
shape
,
avgpoolsize
,
st
=
stridesize
,
avgpoolsize
,
st
=
stridesize
,
ignore_border
=
True
,
padding
=
paddingsize
)
ignore_border
=
True
,
padding
=
paddingsize
)
grad_val
=
rng
.
rand
(
*
grad_shape
)
*
10.0
grad_val
=
rng
.
rand
(
*
grad_shape
)
*
10.0
def
mp
(
input
,
grad
):
def
mp
(
input
,
grad
):
grad_op
=
AveragePoolGrad
(
avgpoolsize
,
ignore_border
=
True
,
grad_op
=
AveragePoolGrad
(
avgpoolsize
,
ignore_border
=
True
,
st
=
stridesize
,
padding
=
paddingsize
,
st
=
stridesize
,
padding
=
paddingsize
,
...
@@ -637,7 +637,8 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
...
@@ -637,7 +637,8 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
# The value has been manually computed from the theoretical gradient,
# The value has been manually computed from the theoretical gradient,
# and confirmed by the implementation.
# and confirmed by the implementation.
assert
numpy
.
allclose
(
fn_hess
([
1
,
2
]),
[[
0.
,
0.
],
[
0.
,
982.7667
]])
assert
numpy
.
allclose
(
fn_hess
(
[
1
,
2
]),
[[
0.
,
0.
],
[
0.
,
982.7667
]])
def
test_max_pool_2d_2D
(
self
):
def
test_max_pool_2d_2D
(
self
):
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
...
@@ -683,9 +684,11 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
...
@@ -683,9 +684,11 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
op_output
=
function
([
input
],
op
)(
test_input_array
)
op_output
=
function
([
input
],
op
)(
test_input_array
)
assert
numpy
.
all
(
op_output
==
test_answer_array
),
(
assert
numpy
.
all
(
op_output
==
test_answer_array
),
(
"op_output is
%
s, test_answer_array is
%
s"
%
(
"op_output is
%
s, test_answer_array is
%
s"
%
(
op_output
,
numpy_output_val
op_output
,
test_answer_array
)
)
)
)
def
mp
(
input
):
def
mp
(
input
):
return
max_pool_2d_same_size
(
input
,
patch_size
)
return
max_pool_2d_same_size
(
input
,
patch_size
)
utt
.
verify_grad
(
mp
,
[
test_input_array
],
rng
=
rng
)
utt
.
verify_grad
(
mp
,
[
test_input_array
],
rng
=
rng
)
...
@@ -712,14 +715,6 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
...
@@ -712,14 +715,6 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
assert
numpy
.
all
(
output_val
==
numpy_output_val
),
(
assert
numpy
.
all
(
output_val
==
numpy_output_val
),
(
"output_val is
%
s, numpy_output_val is
%
s"
"output_val is
%
s, numpy_output_val is
%
s"
%
(
output_val
,
numpy_output_val
))
%
(
output_val
,
numpy_output_val
))
c
=
tensor
.
sum
(
output
)
c_val
=
function
([
images
],
c
)(
imval
)
g
=
tensor
.
grad
(
c
,
images
)
g_val
=
function
([
images
],
[
g
.
shape
,
tensor
.
min
(
g
,
axis
=
(
0
,
1
,
2
)),
tensor
.
max
(
g
,
axis
=
(
0
,
1
,
2
))]
)(
imval
)
# removed as already tested in test_max_pool_2d_2D
# removed as already tested in test_max_pool_2d_2D
# This make test in debug mode too slow.
# This make test in debug mode too slow.
...
@@ -780,7 +775,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
...
@@ -780,7 +775,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for
i
,
maxpoolshp
in
enumerate
(
maxpoolshps
):
for
i
,
maxpoolshp
in
enumerate
(
maxpoolshps
):
for
j
,
ignore_border
in
enumerate
([
True
,
False
]):
for
j
,
ignore_border
in
enumerate
([
True
,
False
]):
for
k
,
padding
in
enumerate
([(
0
,
0
),
(
1
,
1
),
(
1
,
2
)]):
for
k
,
padding
in
enumerate
([(
0
,
0
),
(
1
,
1
),
(
1
,
2
)]):
if
out_shapes
[
k
][
i
][
j
]
is
None
:
if
out_shapes
[
k
][
i
][
j
]
is
None
:
continue
continue
# checking shapes generated by DownsampleFactorMax
# checking shapes generated by DownsampleFactorMax
...
@@ -817,12 +812,12 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
...
@@ -817,12 +812,12 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
maxout
=
theano
.
tensor
.
tensor4
()
maxout
=
theano
.
tensor
.
tensor4
()
grad
=
theano
.
tensor
.
tensor4
()
grad
=
theano
.
tensor
.
tensor4
()
compilation_mode
=
theano
.
compile
.
get_default_mode
()
.
including
(
compilation_mode
=
theano
.
compile
.
get_default_mode
()
.
including
(
'local_average_pool_grad'
)
'local_average_pool_grad'
)
for
mode
in
[
'max'
,
'sum'
,
'average_inc_pad'
,
'average_exc_pad'
]:
for
mode
in
[
'max'
,
'sum'
,
'average_inc_pad'
,
'average_exc_pad'
]:
f
=
theano
.
function
([
im
,
maxout
,
grad
],
f
=
theano
.
function
([
im
,
maxout
,
grad
],
DownsampleFactorMaxGrad
(
ds
=
(
3
,
3
),
DownsampleFactorMaxGrad
(
ds
=
(
3
,
3
),
ignore_border
=
False
,
ignore_border
=
False
,
mode
=
mode
)(
im
,
maxout
,
grad
),
mode
=
mode
)(
im
,
maxout
,
grad
),
mode
=
compilation_mode
)
mode
=
compilation_mode
)
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论