Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
673a6713
提交
673a6713
authored
1月 31, 2017
作者:
Pascal Lamblin
提交者:
GitHub
1月 31, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #5398 from vdumoulin/conv2d_transpose
Implement conv2d_transpose convenience function
上级
f09900c5
56ae0cf0
隐藏空白字符变更
内嵌
并排
正在显示
3 个修改的文件
包含
127 行增加
和
1 行删除
+127
-1
test_abstractconv.py
theano/gpuarray/tests/test_abstractconv.py
+4
-0
__init__.py
theano/tensor/nnet/__init__.py
+84
-0
test_abstract_conv.py
theano/tensor/nnet/tests/test_abstract_conv.py
+39
-1
没有找到文件。
theano/gpuarray/tests/test_abstractconv.py
浏览文件 @
673a6713
...
@@ -252,3 +252,7 @@ class TestDnnConvTypes(test_abstract_conv.TestConvTypes):
...
@@ -252,3 +252,7 @@ class TestDnnConvTypes(test_abstract_conv.TestConvTypes):
self
.
constant_tensor
=
gpuarray
.
array
(
self
.
constant_tensor
=
gpuarray
.
array
(
numpy
.
zeros
((
3
,
5
,
7
,
11
),
dtype
=
'float32'
),
numpy
.
zeros
((
3
,
5
,
7
,
11
),
dtype
=
'float32'
),
context
=
get_context
(
test_ctx_name
))
context
=
get_context
(
test_ctx_name
))
class
TestConv2dTranspose
(
test_abstract_conv
.
TestConv2dTranspose
):
mode
=
mode_with_gpu
theano/tensor/nnet/__init__.py
浏览文件 @
673a6713
...
@@ -32,6 +32,7 @@ from .bn import batch_normalization
...
@@ -32,6 +32,7 @@ from .bn import batch_normalization
import
warnings
import
warnings
from
.abstract_conv
import
conv2d
as
abstract_conv2d
from
.abstract_conv
import
conv2d
as
abstract_conv2d
from
.abstract_conv
import
conv2d_grad_wrt_inputs
from
.abstract_conv
import
conv3d
from
.abstract_conv
import
conv3d
...
@@ -151,3 +152,86 @@ def conv2d(input, filters, input_shape=None, filter_shape=None,
...
@@ -151,3 +152,86 @@ def conv2d(input, filters, input_shape=None, filter_shape=None,
return
abstract_conv2d
(
input
,
filters
,
input_shape
,
filter_shape
,
return
abstract_conv2d
(
input
,
filters
,
input_shape
,
filter_shape
,
border_mode
,
subsample
,
filter_flip
,
border_mode
,
subsample
,
filter_flip
,
filter_dilation
)
filter_dilation
)
def
conv2d_transpose
(
input
,
filters
,
output_shape
,
filter_shape
=
None
,
border_mode
=
'valid'
,
input_dilation
=
(
1
,
1
),
filter_flip
=
True
,
filter_dilation
=
(
1
,
1
)):
"""
This function will build the symbolic graph for applying a transposed
convolution over a mini-batch of a stack of 2D inputs with a set of 2D
filters.
Parameters
----------
input: symbolic 4D tensor
Mini-batch of feature map stacks, of shape
(batch size, input channels, input rows, input columns).
See the optional parameter ``input_shape``.
filters: symbolic 4D tensor
Set of filters used in CNN layer of shape
(input channels, output channels, filter rows, filter columns).
See the optional parameter ``filter_shape``. **Note: the order for
``output_channels`` and ``input_channels`` is reversed with respect to
``conv2d``.**
output_shape: tuple/list of len 4 of int or Constant variable
The shape of the output of ``conv2d_transpose``. The last two elements
are allowed to be ``tensor.scalar`` variables.
filter_shape: None, tuple/list of len 4 of int or Constant variable
The shape of the filters parameter.
Optional, possibly used to choose an optimal implementation.
You can give ``None`` for any element of the list to specify that this
element is not known at compile time.
border_mode: str, int or tuple of two int
Refers to the ``border_mode`` argument of the corresponding forward
(non-transposed) convolution. See the argument description in
``conv2d``. What was ``padding`` for the forward convolution means
``cropping`` the output of the transposed one. ``valid`` corresponds to
no cropping, ``full`` to maximal cropping.
input_dilation: tuple of len 2
Corresponds to ``subsample`` (also called strides elsewhere) in the
non-transposed convolution.
filter_flip: bool
If ``True``, will flip the filter rows and columns
before sliding them over the input. This operation is normally referred
to as a convolution, and this is the default. If ``False``, the filters
are not flipped and the operation is referred to as a cross-correlation.
filter_dilation: tuple of len 2
Factor by which to subsample (stride) the input.
Also called dilation elsewhere.
Returns
-------
Symbolic 4D tensor
Set of feature maps generated by the transposed convolution. Tensor is
of shape (batch size, output channels, output rows, output columns)
Notes
-----
If cuDNN is available, it will be used on the
GPU. Otherwise, it is the *CorrMM* convolution that will be used
"caffe style convolution".
This operation is also sometimes called "deconvolution".
The parameter filter_dilation is an implementation of `dilated
convolution <https://arxiv.org/pdf/1511.07122v3.pdf>`_.
"""
return
conv2d_grad_wrt_inputs
(
output_grad
=
input
,
filters
=
filters
,
input_shape
=
output_shape
,
filter_shape
=
filter_shape
,
border_mode
=
border_mode
,
subsample
=
input_dilation
,
filter_flip
=
filter_flip
,
filter_dilation
=
filter_dilation
)
theano/tensor/nnet/tests/test_abstract_conv.py
浏览文件 @
673a6713
...
@@ -9,7 +9,8 @@ import theano
...
@@ -9,7 +9,8 @@ import theano
from
theano
import
tensor
from
theano
import
tensor
from
theano.gof.opt
import
check_stack_trace
from
theano.gof.opt
import
check_stack_trace
from
theano.tests
import
unittest_tools
as
utt
from
theano.tests
import
unittest_tools
as
utt
from
theano.tensor.nnet
import
corr
,
corr3d
,
abstract_conv
as
conv
from
theano.tensor.nnet
import
(
corr
,
corr3d
,
conv2d_transpose
,
abstract_conv
as
conv
)
from
theano.tensor.nnet.abstract_conv
import
(
get_conv_output_shape
,
from
theano.tensor.nnet.abstract_conv
import
(
get_conv_output_shape
,
get_conv_gradweights_shape
,
get_conv_gradweights_shape
,
get_conv_gradinputs_shape
,
get_conv_gradinputs_shape
,
...
@@ -1548,3 +1549,40 @@ class TestBilinearUpsampling(unittest.TestCase):
...
@@ -1548,3 +1549,40 @@ class TestBilinearUpsampling(unittest.TestCase):
f_1D
=
theano
.
function
([],
mat_1D
,
mode
=
self
.
compile_mode
)
f_1D
=
theano
.
function
([],
mat_1D
,
mode
=
self
.
compile_mode
)
f_2D
=
theano
.
function
([],
mat_2D
,
mode
=
self
.
compile_mode
)
f_2D
=
theano
.
function
([],
mat_2D
,
mode
=
self
.
compile_mode
)
utt
.
assert_allclose
(
f_1D
(),
f_2D
(),
rtol
=
1e-06
)
utt
.
assert_allclose
(
f_1D
(),
f_2D
(),
rtol
=
1e-06
)
class
TestConv2dTranspose
(
unittest
.
TestCase
):
mode
=
None
def
test_interface
(
self
):
"""Test conv2d_transpose wrapper.
This method tests that the order of the filter's
axes expected by the function produces the correct
output shape.
"""
mode
=
self
.
mode
if
theano
.
config
.
mode
==
"FAST_COMPILE"
:
mode
=
theano
.
compile
.
get_mode
(
mode
)
.
excluding
(
"conv_gemm"
)
.
excluding
(
"AbstractConvCheck"
)
output
=
theano
.
function
(
inputs
=
[],
outputs
=
conv2d_transpose
(
input
=
tensor
.
ones
((
2
,
2
,
4
,
4
)),
filters
=
tensor
.
ones
((
2
,
1
,
4
,
4
)),
output_shape
=
(
2
,
1
,
10
,
10
),
input_dilation
=
(
2
,
2
)),
mode
=
mode
)()
expected_output
=
numpy
.
array
(
[[[[
2
,
2
,
4
,
4
,
4
,
4
,
4
,
4
,
2
,
2
],
[
2
,
2
,
4
,
4
,
4
,
4
,
4
,
4
,
2
,
2
],
[
4
,
4
,
8
,
8
,
8
,
8
,
8
,
8
,
4
,
4
],
[
4
,
4
,
8
,
8
,
8
,
8
,
8
,
8
,
4
,
4
],
[
4
,
4
,
8
,
8
,
8
,
8
,
8
,
8
,
4
,
4
],
[
4
,
4
,
8
,
8
,
8
,
8
,
8
,
8
,
4
,
4
],
[
4
,
4
,
8
,
8
,
8
,
8
,
8
,
8
,
4
,
4
],
[
4
,
4
,
8
,
8
,
8
,
8
,
8
,
8
,
4
,
4
],
[
2
,
2
,
4
,
4
,
4
,
4
,
4
,
4
,
2
,
2
],
[
2
,
2
,
4
,
4
,
4
,
4
,
4
,
4
,
2
,
2
]]]]
*
2
)
numpy
.
testing
.
assert_equal
(
output
,
expected_output
)
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论