提交 0d2bee04 authored 作者: erakra's avatar erakra

finalizing bilinear_upsampling

上级 83a288fd
......@@ -1556,8 +1556,7 @@ def bilinear_kernel_1D(ratio, normalize=True):
def frac_bilinear_upsampling(input,
ratio=None,
frac_ratio=None,
use_1D_kernel=False):
frac_ratio=None):
"""Compute bilinear upsampling
This function will build the symbolic graph for upsampling
a tensor by the given ratio using bilinear interpolation.
......@@ -1576,11 +1575,6 @@ def frac_bilinear_upsampling(input,
represented as (numerator, denominator). If row and col ratios are
different frac_ratio should be a tuple of fractional ratios, i.e
a tuple of tuples.
use_1D_kernel: bool
if set to true, row and column will be upsampled seperately by 1D
kernels, otherwise they are upsampled together using a 2D kernel. The
final result is the same, only the speed can differ, given factors such
as upsampling ratio.
Returns
-------
symbolic 4D tensor
......@@ -1598,15 +1592,11 @@ def frac_bilinear_upsampling(input,
never both at once.
"""
if ratio and frac_ratio:
raise ValueError("can't use ratio and frac_ratio together")
if not (ratio or frac_ratio):
raise ValueError("No ratio (or frac_ratio) provided")
T = theano.tensor
row, col = input.shape[2:]
up_input = input.reshape((-1, 1, row, col))
# redefince the ratio depending of the case
# redefince the ratio depending on the case
if frac_ratio is None:
if not isinstance(ratio, tuple):
ratio = (ratio, ratio)
......@@ -1642,45 +1632,33 @@ def frac_bilinear_upsampling(input,
pad = double_pad // 2
# build pyramidal kernel
if use_1D_kernel:
kern = bilinear_kernel_1D(ratio=ratio[0])[np.newaxis, np.newaxis,
:, np.newaxis]
else:
kern = bilinear_kernel_2D(ratio=ratio)[np.newaxis, np.newaxis, :, :]
# add corresponding padding
pad_kern = T.concatenate((T.zeros(tuple(kern.shape[:2]) + (pad[0], kern.shape[-1])),
kern,
T.zeros(tuple(kern.shape[:2]) + (double_pad[0]-pad[0], kern.shape[-1]))),
axis=2)
if use_1D_kernel:
# for 1D kernel, upsample along rows
upsamp = T.nnet.conv2d(pad_kern, concat_mat, border_mode='valid', filter_dilation=(ratio[0], 1))
upsamp = upsamp.dimshuffle((1, 0, 2, 3))
pad_kern = bilinear_kernel_1D(ratio=ratio[1])[np.newaxis, np.newaxis, np.newaxis, :]
pad_kern = T.concatenate((T.zeros(tuple(pad_kern.shape[:3]) + (pad[1],)),
pad_kern,
T.zeros(tuple(pad_kern.shape[:3]) + (double_pad[1]-pad[1],))),
axis=3)
if use_1D_kernel:
upsamp = T.nnet.conv2d(pad_kern, upsamp, border_mode='valid', filter_dilation=(1, ratio[1]),
subsample=(1, 1))
else:
upsamp = T.nnet.conv2d(pad_kern, concat_mat, border_mode='valid', filter_dilation=ratio,
subsample=subsample)
up_img_sh = T.ceil(T.as_tensor([row, col]) * np.array(ratio) / np.array(subsample)).astype('int64')
# upsample the input by passing it as kernl of conv and using filter_dilation
upsamp = T.nnet.conv2d(pad_kern, concat_mat, border_mode='valid',
filter_dilation=ratio, subsample=subsample)
up_img_sh = T.ceil(T.as_tensor([row, col]) * np.array(ratio) / np.array(subsample)).astype('int64')
return upsamp.reshape((input.shape[0], input.shape[1], up_img_sh[0], up_img_sh[1]))
def bilinear_upsampling(input,
ratio,
ratio=None,
frac_ratio=None,
batch_size=None,
num_input_channels=None,
use_1D_kernel=True):
"""Compute bilinear upsampling
This function will build the symbolic graph for upsampling
a tensor by the given ratio using bilinear interpolation.
......@@ -1689,46 +1667,54 @@ def bilinear_upsampling(input,
input: symbolic 4D tensor
mini-batch of feature map stacks, of shape (batch size,
input channels, input rows, input columns) that will be upsampled.
ratio: `int or Constant or Scalar Tensor of int* dtype`
the ratio by which the input is upsampled in the 2D space (row and
col size).
batch_size: None, int or Constant variable
The size of the first dimension of the input variable.
Optional, possibly used to choose an optimal implementation.
batch_size will be used only if num_input_channels is not None.
num_input_channels: None, int or Constant variable
The size of the second dimension of the input variable.
Optional, possibly used to choose an optimal implementation.
num_input_channels will be used only if batch_size is not None.
frac_ratio: None, tuple of int or tuple of tuples of int
The tuple defining the fractional ratio by which the input is
upsampled in the 2D space. One fractional ratio should be
represented as (numerator, denominator). If row and col ratios are
different frac_ratio should be a tuple of fractional ratios, i.e
a tuple of tuples.
use_1D_kernel: bool
if set to true, row and column will be upsampled seperately by 1D
kernels, otherwise they are upsampled together using a 2D kernel. The
final result is the same, only the speed can differ, given factors such
as upsampling ratio.
Returns
-------
symbolic 4D tensor
set of feature maps generated by bilinear upsampling. Tensor
is of shape (batch size, num_input_channels, input row size * ratio,
input column size * ratio)
is of shape (batch size, num_input_channels, input row size * row ratio,
input column size * column ratio). Each of these ratios can be fractional.
Notes
-----
:note: The kernel used for bilinear interpolation is fixed (not learned).
:note: When the upsampling ratio is even, the last row and column is
repeated one extra time compared to the first row and column which makes
the upsampled tensor asymmetrical on both sides. This does not happen when
the upsampling ratio is odd.
:note: This function must get either ratio or frac_ratio as parameter and
never both at once.
"""
if ratio and frac_ratio:
raise ValueError("can't use ratio and frac_ratio together")
if not (ratio or frac_ratio):
raise ValueError("No ratio (or frac_ratio) provided")
if frac_ratio:
if use_1D_kernel:
raise ValueError('For fractional ratios 1D kernel'
'method not implemented. You may want to pass '
'use_1D_kernel as False')
if not use_1D_kernel:
return frac_bilinear_upsampling(input,
ratio=ratio,
frac_ratio=frac_ratio)
# the remaining case if integer ratio with use_1D_kernel
T = theano.tensor
try:
up_bs = batch_size * num_input_channels
......
......@@ -23,7 +23,6 @@ from theano.tensor.nnet.abstract_conv import AbstractConv2d_gradWeights
from theano.tensor.nnet.abstract_conv import bilinear_kernel_1D
from theano.tensor.nnet.abstract_conv import bilinear_kernel_2D
from theano.tensor.nnet.abstract_conv import bilinear_upsampling
from theano.tensor.nnet.abstract_conv import frac_bilinear_upsampling
from theano.tensor.nnet.abstract_conv import separable_conv2d, separable_conv3d
from theano.tensor.nnet.conv import ConvOp
from theano.tensor.nnet.corr import (CorrMM, CorrMM_gradWeights,
......@@ -1299,8 +1298,9 @@ class TestBilinearUpsampling(unittest.TestCase):
[[5, 6], [7, 8]],
[[9, 10], [11, 12]]],
ndmin=4).astype(theano.config.floatX)
up_x = frac_bilinear_upsampling(input=input_x,
frac_ratio=((7, 4), (5, 3)))
up_x = bilinear_upsampling(input=input_x,
frac_ratio=((7, 4), (5, 3)))#,
# use_1D_kernel=False)
num_up_x = np.array(
[[[[1., 1.2, 1.8, 2.],
[1.28571429, 1.48571429, 2.08571429, 2.28571429],
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论