提交 09605e7c authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Merge pull request #3683 from laurent-dinh/conv_infer_shape

Factoring inference of convolution output shape (continuation)
......@@ -11,6 +11,7 @@ from theano.gof.type import CDataType, Generic
from theano.compile import optdb
from theano.compile.ops import shape_i
from theano.tensor.nnet import SoftmaxGrad
from theano.tensor.nnet.abstract_conv2d import get_conv_output_shape
from theano.tensor.signal.downsample import (
DownsampleFactorMax, MaxPoolGrad, AveragePoolGrad)
from theano.sandbox.cuda.type import CudaNdarrayType
......@@ -549,29 +550,11 @@ class GpuDnnConv(DnnBase, COp):
or scalar.
"""
b = ishape[0] # Number of inputs
h = ishape[2] # Height of input feature maps
w = ishape[3] # Width of input feature maps
nb = kshape[0] # Number of output feature maps
kh = kshape[2] # Height of each filter
kw = kshape[3] # Width of each filter
sh, sw = subsample
if border_mode == 'full':
padh = kh - 1
padw = kw - 1
elif isinstance(border_mode, tuple):
padh, padw = border_mode
else:
assert border_mode == 'valid'
padh = 0
padw = 0
return (
b, nb,
(h + 2*padh - kh)//sh + 1,
(w + 2*padw - kw)//sw + 1
)
return get_conv_output_shape(
ishape,
kshape,
border_mode,
subsample)
def infer_shape(self, node, shape):
return [shape[2]]
......@@ -648,34 +631,11 @@ class GpuDnnConv3d(GpuDnnConv):
the specified parameters. `ishape` and `kshape` can be symbolic
or scalar.
"""
b = ishape[0] # Number of inputs
d = ishape[2] # Depth of input feature maps
h = ishape[3] # Height of input feature maps
w = ishape[4] # Width of input feature maps
nb = kshape[0] # Number of output feature maps
kd = kshape[2] # Depth of each filter
kh = kshape[3] # Height of each filter
kw = kshape[4] # Width of each filter
sd, sh, sw = subsample
if border_mode == 'full':
padd = kd - 1
padh = kh - 1
padw = kw - 1
elif isinstance(border_mode, tuple):
padd, padh, padw = border_mode
else:
assert border_mode == 'valid'
padd = 0
padh = 0
padw = 0
return (
b, nb,
(d + 2*padd - kd)//sd + 1,
(h + 2*padh - kh)//sh + 1,
(w + 2*padw - kw)//sw + 1
)
return get_conv_output_shape(
ishape,
kshape,
border_mode,
subsample)
class GpuDnnConvGradW(DnnBase, COp):
......
......@@ -21,7 +21,8 @@ from theano import OpenMPOp
from theano.tensor import (as_tensor_variable, blas, get_scalar_constant_value,
patternbroadcast, NotScalarConstantError)
from theano.gof import Apply
from theano.tensor.nnet.abstract_conv2d import get_conv_output_shape
from theano.tensor.nnet.abstract_conv2d import (get_conv_output_shape,
get_conv_shape_1axis)
try:
# TODO: move these back out to global scope when they no longer
......@@ -367,11 +368,8 @@ class ConvOp(OpenMPOp):
# To support symbolic shapes, we express this with integer arithmetics.
warnings.warn("The method `getOutputShape` is deprecated use"
"`get_conv_output_shape` instead.")
return get_conv_output_shape(
image_shape=(None, None, inshp[0], inshp[1]),
kernel_shape=(None, None, kshp[0], kshp[1]),
border_mode=mode,
subsample=stride)
return tuple(get_conv_shape_1axis(i, k, mode, d)
for i, k, d in zip(inshp, kshp, stride))
def __init__(self, imshp=None, kshp=None, nkern=None, bsize=None,
dx=1, dy=1,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论