提交 ee75b95b authored 作者: Gijs van Tulder's avatar Gijs van Tulder

Deprecate ds, st, padding parameters in pooling.

Closes #4933.
上级 e6a3b009
......@@ -158,9 +158,9 @@ def test_pooling():
continue
# We will check that the opt introduced it.
out = pool_2d(x, (ws, ws),
st=(stride, stride),
stride=(stride, stride),
ignore_border=True,
padding=pad, mode=mode)
pad=pad, mode=mode)
mode_without_gpu2 = mode_without_gpu.including()
mode_without_gpu2.check_isfinite = False
......@@ -199,7 +199,7 @@ def test_pooling():
# This tests the CPU grad + opt + GPU implementation
def fn(x):
return pool_2d(x, (ws, ws), ignore_border=True,
padding=pad, mode=mode)
pad=pad, mode=mode)
utt.verify_grad(fn, [data], mode=mode_with_gpu)
# Confirm that the opt would have inserted it.
fg = theano.function([x], theano.grad(fn(x).sum(), x),
......@@ -228,14 +228,14 @@ def test_pooling_with_tensor_vars():
raise SkipTest(dnn.dnn_available.msg)
x = T.ftensor4()
ws = theano.shared(numpy.array([2, 2], dtype='int32'))
st = theano.shared(numpy.array([1, 1], dtype='int32'))
stride = theano.shared(numpy.array([1, 1], dtype='int32'))
pad = theano.shared(numpy.array([0, 0], dtype='int32'))
mode = 'max'
def fn(x):
dnn_op = dnn.dnn_pool(
x, ws=ws,
stride=st,
stride=stride,
pad=pad,
mode=mode)
return dnn_op
......@@ -255,7 +255,7 @@ def test_pooling_with_tensor_vars():
for node in f_gpu.maker.fgraph.apply_nodes])
# CPU implementation
out_cpu = pool_2d(x, ws, ignore_border=True, st=st, padding=pad, mode=mode)
out_cpu = pool_2d(x, ws, ignore_border=True, stride=stride, pad=pad, mode=mode)
f_cpu = theano.function([x], out_cpu, mode=mode_without_gpu2)
assert not any([isinstance(node.op, dnn.GpuDnnPool)
for node in f_cpu.maker.fgraph.apply_nodes])
......@@ -307,9 +307,9 @@ def test_pooling3d():
# Not implemented
continue
out = pool_3d(x, (ws, ws, ws),
st=(stride, stride, stride),
stride=(stride, stride, stride),
ignore_border=True,
padding=pad, mode=mode)
pad=pad, mode=mode)
# GPU implementation
f_gpu = theano.function([x], out, mode=mode_with_gpu)
......@@ -374,7 +374,7 @@ def test_pooling_opt():
f = theano.function(
[x],
pool_2d(x, ds=(2, 2), mode='average_inc_pad',
pool_2d(x, ws=(2, 2), mode='average_inc_pad',
ignore_border=True),
mode=mode_with_gpu)
......@@ -386,7 +386,7 @@ def test_pooling_opt():
# gradient of 2D pooling
f = theano.function(
[x],
T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad',
T.grad(pool_2d(x, ws=(2, 2), mode='average_inc_pad',
ignore_border=True).sum(),
x),
mode=mode_with_gpu.including("cudnn"))
......@@ -399,7 +399,7 @@ def test_pooling_opt():
# Test sum pooling
f = theano.function(
[x],
pool_2d(x, ds=(2, 3), mode='sum',
pool_2d(x, ws=(2, 3), mode='sum',
ignore_border=True),
mode=mode_with_gpu)
......@@ -413,7 +413,7 @@ def test_pooling_opt():
f = theano.function(
[x],
pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad',
pool_3d(x, ws=(2, 2, 2), mode='average_inc_pad',
ignore_border=True),
mode=mode_with_gpu)
......@@ -425,7 +425,7 @@ def test_pooling_opt():
# gradient of 3D pooling
f = theano.function(
[x],
T.grad(pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad',
T.grad(pool_3d(x, ws=(2, 2, 2), mode='average_inc_pad',
ignore_border=True).sum(),
x),
mode=mode_with_gpu.including("cudnn"))
......@@ -504,7 +504,7 @@ def test_dnn_tag():
try:
f = theano.function(
[x],
pool_2d(x, ds=(2, 2), ignore_border=True),
pool_2d(x, ws=(2, 2), ignore_border=True),
mode=mode_with_gpu.including("cudnn"))
except (AssertionError, RuntimeError):
assert not dnn.dnn_available(test_ctx_name)
......
......@@ -194,9 +194,9 @@ def test_pooling():
continue
# We will check that the opt introduced it.
out = pool_2d(x, (ws, ws),
st=(stride, stride),
stride=(stride, stride),
ignore_border=True,
padding=pad, mode=mode)
pad=pad, mode=mode)
mode_without_gpu2 = mode_without_gpu.including()
mode_without_gpu2.check_isfinite = False
......@@ -235,7 +235,7 @@ def test_pooling():
# This tests the CPU grad + opt + GPU implementation
def fn(x):
return pool_2d(x, (ws, ws), ignore_border=True,
padding=pad, mode=mode)
pad=pad, mode=mode)
utt.verify_grad(fn, [data], mode=mode_with_gpu)
# Confirm that the opt would have inserted it.
fg = theano.function([x], theano.grad(fn(x).sum(), x),
......@@ -264,14 +264,14 @@ def test_pooling_with_tensor_vars():
raise SkipTest(cuda.dnn.dnn_available.msg)
x = T.ftensor4()
ws = theano.shared(numpy.array([2, 2], dtype='int32'))
st = theano.shared(numpy.array([1, 1], dtype='int32'))
stride = theano.shared(numpy.array([1, 1], dtype='int32'))
pad = theano.shared(numpy.array([0, 0], dtype='int32'))
mode = 'max'
def fn(x):
dnn_op = cuda.dnn.dnn_pool(
x, ws=ws,
stride=st,
stride=stride,
pad=pad,
mode=mode)
return dnn_op
......@@ -291,7 +291,7 @@ def test_pooling_with_tensor_vars():
for node in f_gpu.maker.fgraph.apply_nodes])
# CPU implementation
out_cpu = pool_2d(x, ws, ignore_border=True, st=st, padding=pad, mode=mode)
out_cpu = pool_2d(x, ws, ignore_border=True, stride=stride, pad=pad, mode=mode)
f_cpu = theano.function([x], out_cpu, mode=mode_without_gpu2)
assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool)
for node in f_cpu.maker.fgraph.apply_nodes])
......@@ -364,9 +364,9 @@ def test_pooling3d():
# Not implemented
continue
out = pool_3d(x, (ws, ws, ws),
st=(stride, stride, stride),
stride=(stride, stride, stride),
ignore_border=True,
padding=pad, mode=mode)
pad=pad, mode=mode)
# GPU implementation
f_gpu = theano.function([x], out, mode=mode_with_gpu)
......@@ -431,7 +431,7 @@ def test_pooling_opt():
f = theano.function(
[x],
pool_2d(x, ds=(2, 2), mode='average_inc_pad', ignore_border=True),
pool_2d(x, ws=(2, 2), mode='average_inc_pad', ignore_border=True),
mode=mode_with_gpu)
assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)
......@@ -442,7 +442,7 @@ def test_pooling_opt():
# gradient of 2D pooling
f = theano.function(
[x],
T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad',
T.grad(pool_2d(x, ws=(2, 2), mode='average_inc_pad',
ignore_border=True).sum(), x),
mode=mode_with_gpu.including("cudnn"))
......@@ -454,7 +454,7 @@ def test_pooling_opt():
# Test sum pooling
f = theano.function(
[x],
pool_2d(x, ds=(2, 3), mode='sum',
pool_2d(x, ws=(2, 3), mode='sum',
ignore_border=True),
mode=mode_with_gpu)
......@@ -468,7 +468,7 @@ def test_pooling_opt():
f = theano.function(
[x],
pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad', ignore_border=True),
pool_3d(x, ws=(2, 2, 2), mode='average_inc_pad', ignore_border=True),
mode=mode_with_gpu)
assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)
......@@ -479,7 +479,7 @@ def test_pooling_opt():
# gradient of 3D pooling
f = theano.function(
[x],
T.grad(pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad',
T.grad(pool_3d(x, ws=(2, 2, 2), mode='average_inc_pad',
ignore_border=True).sum(), x),
mode=mode_with_gpu.including("cudnn"))
......@@ -849,7 +849,7 @@ def test_dnn_tag():
try:
f = theano.function(
[x],
pool_2d(x, ds=(2, 2), ignore_border=True),
pool_2d(x, ws=(2, 2), ignore_border=True),
mode=mode_with_gpu.including("cudnn"))
except (AssertionError, RuntimeError):
assert not cuda.dnn.dnn_available()
......
......@@ -38,29 +38,29 @@ def max_pool_2d_same_size(input, patch_size):
return outs
def pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0),
mode='max'):
def pool_2d(input, ws=None, ignore_border=None, stride=None, pad=(0, 0),
mode='max', ds=None, st=None, padding=None):
"""Downscale the input by a specified factor
Takes as input a N-D tensor, where N >= 2. It downscales the input image by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1])
patches of size (ws[0],ws[1])
Parameters
----------
input : N-D theano tensor of input images
Input images. Max pooling will be done over the 2 last dimensions.
ds : tuple of length 2 or theano vector of ints of size 2.
Factor by which to downscale (vertical ds, horizontal ds).
ws : tuple of length 2 or theano vector of ints of size 2.
Factor by which to downscale (vertical ws, horizontal ws).
(2,2) will halve the image in each dimension.
ignore_border : bool (default None, will print a warning and set to False)
When True, (5,5) input with ds=(2,2) will generate a (2,2) output.
When True, (5,5) input with ws=(2,2) will generate a (2,2) output.
(3,3) otherwise.
st : tuple of two ints or theano vector of ints of size 2.
stride : tuple of two ints or theano vector of ints of size 2.
Stride size, which is the number of shifts over rows/cols to get the
next pool region. If st is None, it is considered equal to ds
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
padding : tuple of two ints or theano vector of ints of size 2.
pad : tuple of two ints or theano vector of ints of size 2.
(pad_h, pad_w), pad zeros to extend beyond four borders of the
images, pad_h is the size of the top and bottom margins, and
pad_w is the size of the left and right margins.
......@@ -68,8 +68,33 @@ def pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0),
Operation executed on each window. `max` and `sum` always exclude
the padding in the computation. `average` gives you the choice to
include or exclude it.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter st instead.
padding
*deprecated*, use parameter pad instead.
"""
# check for deprecated parameter names
if ds is not None:
warnings.warn(
"pool_2d() ds parameter is deprecated, please use ws",
stacklevel=2)
ws = ds
if st is not None:
warnings.warn(
"pool_2d() st parameter is deprecated, please use stride",
stacklevel=2)
stride = st
if padding is not None:
warnings.warn(
"pool_2d() padding parameter is deprecated, please use pad",
stacklevel=2)
pad = padding
if ws is None:
raise ValueError('pool_2d() ws parameter can not be None')
if input.ndim < 2:
raise NotImplementedError('pool_2d requires a dimension >= 2')
if ignore_border is None:
......@@ -81,38 +106,38 @@ def pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0),
" On the GPU, using ignore_border=True is needed to use cuDNN."
" When using ignore_border=False and not using cuDNN, the only"
" GPU combination supported is when"
" `ds == st and padding == (0, 0) and mode == 'max'`."
" `ws == stride and pad == (0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.",
stacklevel=2)
ignore_border = False
op = Pool(ignore_border, ndim=2, mode=mode)
output = op(input, ds, st, padding)
output = op(input, ws, stride, pad)
return output
def pool_3d(input, ds, ignore_border=None, st=None, padding=(0, 0, 0),
mode='max'):
def pool_3d(input, ws=None, ignore_border=None, stride=None, pad=(0, 0, 0),
mode='max', ds=None, st=None, padding=None):
"""Downscale the input by a specified factor
Takes as input a N-D tensor, where N >= 3. It downscales the input image by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1],ds[2])
patches of size (ws[0],ws[1],ws[2])
Parameters
----------
input : N-D theano tensor of input images
Input images. Max pooling will be done over the 3 last dimensions.
ds : tuple of length 3 or theano vector of ints of size 3
Factor by which to downscale (vertical ds, horizontal ds, depth ds).
ws : tuple of length 3 or theano vector of ints of size 3
Factor by which to downscale (vertical ws, horizontal ws, depth ws).
(2,2,2) will halve the image in each dimension.
ignore_border : bool (default None, will print a warning and set to False)
When True, (5,5,5) input with ds=(2,2,2) will generate a (2,2,2) output.
When True, (5,5,5) input with ws=(2,2,2) will generate a (2,2,2) output.
(3,3,3) otherwise.
st : tuple of three ints or theano vector of ints of size 3
Stride size, which is the number of shifts over rows/cols/slices to get
the next pool region. If st is None, it is considered equal to ds
the next pool region. If st is None, it is considered equal to ws
(no overlap on pooling regions).
padding : tuple of two ints or theano vector of ints of size 3
pad : tuple of two ints or theano vector of ints of size 3
(pad_h, pad_w, pad_d), pad zeros to extend beyond six borders of the
images, pad_h is the size of the top and bottom margins,
pad_w is the size of the left and right margins, and pad_d is the size
......@@ -121,8 +146,33 @@ def pool_3d(input, ds, ignore_border=None, st=None, padding=(0, 0, 0),
Operation executed on each window. `max` and `sum` always exclude
the padding in the computation. `average` gives you the choice to
include or exclude it.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter st instead.
padding
*deprecated*, use parameter pad instead.
"""
# check for deprecated parameter names
if ds is not None:
warnings.warn(
"pool_3d() ds parameter is deprecated, please use ws",
stacklevel=2)
ws = ds
if st is not None:
warnings.warn(
"pool_3d() st parameter is deprecated, please use stride",
stacklevel=2)
stride = st
if padding is not None:
warnings.warn(
"pool_3d() padding parameter is deprecated, please use pad",
stacklevel=2)
pad = padding
if ws is None:
raise ValueError('pool_3d() ws parameter can not be None')
if input.ndim < 3:
raise NotImplementedError('pool_3d requires a dimension >= 3')
if ignore_border is None:
......@@ -134,37 +184,36 @@ def pool_3d(input, ds, ignore_border=None, st=None, padding=(0, 0, 0),
" On the GPU, using ignore_border=True is needed to use cuDNN."
" When using ignore_border=False and not using cuDNN, the only"
" GPU combination supported is when"
" `ds == st and padding == (0, 0, 0) and mode == 'max'`."
" `ws == stride and pad == (0, 0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.",
stacklevel=2)
ignore_border = False
op = Pool(ignore_border, ndim=3, mode=mode)
output = op(input, ds, st, padding)
output = op(input, ws, stride, pad)
return output
class Pool(OpenMPOp):
"""
This Op downsamples the last N dimensions of the input by taking the max,
sum or average over different patches.
Parameters
----------
ds : list or tuple of N ints
ws : list or tuple of N ints
Downsample factor over rows, columns etc.
ds indicates the size of the pooling region.
ws indicates the size of the pooling region.
ignore_border : bool
If ds doesn't divide imgshape, do we include an extra row/col/slice
If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True).
st : list or tuple of N ints or None
stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If st is None, it is considered equal to ds
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
padding : tuple of N ints or None
pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if padding is None.
right margins. No padding is added if pad is None.
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
('average_inc_pad' excludes the padding from the count,
'average_exc_pad' include it)
......@@ -177,7 +226,8 @@ class Pool(OpenMPOp):
__props__ = ('ignore_border', 'mode', 'ndim')
@staticmethod
def out_shape(imgshape, ds, ignore_border=False, st=None, padding=None, ndim=2):
def out_shape(imgshape, ws=None, ignore_border=False, stride=None, pad=None, ndim=2,
ds=None, st=None, padding=None):
"""
Return the shape of the output from this op, for input of given
shape and flags.
......@@ -187,21 +237,21 @@ class Pool(OpenMPOp):
imgshape : tuple, list, or similar of integer or scalar Theano variable
The shape of a tensor of images. The last N elements are
interpreted as the number of rows, and the number of cols.
ds : list or tuple of N ints
ws : list or tuple of N ints
Downsample factor over rows and column.
ds indicates the pool region size.
ws indicates the pool region size.
ignore_border : bool
If ds doesn't divide imgshape, do we include an extra row/col/slice
If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True).
st : list or tuple of N ints or None
stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If st is None, it is considered equal to ds
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
padding : tuple of N ints or None
pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if padding is None.
right margins. No padding is added if pad is None.
ndim : int
The number of pooling dimensions N.
The default is 2.
......@@ -214,17 +264,36 @@ class Pool(OpenMPOp):
elements reduced as per the downsampling & ignore_border flags.
"""
# check for deprecated parameter names
if ds is not None:
warnings.warn(
"Pool ds parameter is deprecated, please use ws",
stacklevel=2)
ws = ds
if st is not None:
warnings.warn(
"Pool st parameter is deprecated, please use stride",
stacklevel=2)
stride = st
if padding is not None:
warnings.warn(
"Pool padding parameter is deprecated, please use pad",
stacklevel=2)
pad = padding
if ws is None:
raise ValueError('Pool ws parameter can not be None')
if ndim is None:
ndim = 2
assert ndim > 0
if len(imgshape) < ndim:
raise TypeError('imgshape must have at least {} dimensions'.format(ndim))
if st is None:
st = ds
if padding is None:
padding = (0,) * ndim
patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + padding[i] * 2
if stride is None:
stride = ws
if pad is None:
pad = (0,) * ndim
patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + pad[i] * 2
for i in xrange(ndim))
def compute_out(v, downsample, stride):
......@@ -248,7 +317,7 @@ class Pool(OpenMPOp):
else:
return max(0, (v - 1 - downsample + stride) // stride) + 1
out_shape = [compute_out(patch_shape[i], ds[i], st[i]) for i in xrange(ndim)]
out_shape = [compute_out(patch_shape[i], ws[i], stride[i]) for i in xrange(ndim)]
rval = list(imgshape[:-ndim]) + out_shape
return rval
......@@ -308,7 +377,7 @@ class Pool(OpenMPOp):
if isinstance(ws, (tuple, list)):
if any(pad[i] >= ws[i] for i in range(nd)):
raise NotImplementedError(
'padding_h and padding_w must be smaller than strides')
'padding must be smaller than strides')
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
......@@ -715,7 +784,8 @@ class PoolGrad(OpenMPOp):
__props__ = ('ignore_border', 'mode', 'ndim')
@staticmethod
def out_shape(imgshape, ds, ignore_border=False, st=None, padding=None, ndim=2):
def out_shape(imgshape, ws=None, ignore_border=False, stride=None, pad=None, ndim=2,
ds=None, st=None, padding=None):
"""Return the shape of the output from this op, for input of given
shape and flags.
......@@ -724,21 +794,21 @@ class PoolGrad(OpenMPOp):
imgshape : tuple of integers or scalar Theano variables
the shape of a tensor of images. The last N elements are
interpreted as the downsampling dimensions.
ds : tuple of N ints
ws : tuple of N ints
downsample factor over rows and columns this parameter
indicates the size of the pooling region
ignore_border : bool
If ds doesn't divide imgshape, do we include an extra row/col/slice
If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True).
st : list or tuple of N ints or None
stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If st is None, it is considered equal to ds
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
padding : tuple of N ints or None
pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if padding is None.
right margins. No padding is added if pad is None.
ndim : int
The number of pooling dimensions N.
The default is 2.
......@@ -752,15 +822,33 @@ class PoolGrad(OpenMPOp):
ignore_border flags.
"""
# check for deprecated parameter names
if ds is not None:
warnings.warn(
"PoolGrad ds parameter is deprecated, please use ws",
stacklevel=2)
ws = ds
if st is not None:
warnings.warn(
"PoolGrad st parameter is deprecated, please use stride",
stacklevel=2)
stride = st
if padding is not None:
warnings.warn(
"PoolGrad padding parameter is deprecated, please use pad",
stacklevel=2)
pad = padding
if ws is None:
raise ValueError('PoolGrad ws parameter can not be None')
if len(imgshape) < ndim:
raise TypeError('imgshape must have at least {} dimensions'.format(ndim))
if st is None:
st = ds
if padding is None:
padding = (0,) * ndim
patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + padding[i] * 2
if stride is None:
stride = ws
if pad is None:
pad = (0,) * ndim
patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + pad[i] * 2
for i in xrange(ndim))
def compute_out(v, downsample, stride):
......@@ -781,7 +869,7 @@ class PoolGrad(OpenMPOp):
else:
return max(0, (v - 1 - downsample) // stride + 1) + 1
out_shape = [compute_out(patch_shape[i], ds[i], st[i]) for i in xrange(ndim)]
out_shape = [compute_out(patch_shape[i], ws[i], stride[i]) for i in xrange(ndim)]
rval = list(imgshape[:-ndim]) + out_shape
return rval
......@@ -1506,7 +1594,7 @@ class DownsampleFactorMaxGradGrad(OpenMPOp):
if isinstance(ws, (tuple, list)):
if any(pad[i] >= ws[i] for i in range(nd)):
raise NotImplementedError(
'padding_h and padding_w must be smaller than strides')
'padding must be smaller than strides')
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
......
......@@ -30,7 +30,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
assert Pool.out_shape((8, 6), (2, 2)) == [4, 3]
@staticmethod
def numpy_max_pool_2d(input, ds, ignore_border=False, mode='max'):
def numpy_max_pool_2d(input, ws, ignore_border=False, mode='max'):
'''Helper function, implementing pool_2d in pure numpy'''
if len(input.shape) < 2:
raise NotImplementedError('input should have at least 2 dim,'
......@@ -39,13 +39,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
xi = 0
yi = 0
if not ignore_border:
if input.shape[-2] % ds[0]:
if input.shape[-2] % ws[0]:
xi += 1
if input.shape[-1] % ds[1]:
if input.shape[-1] % ws[1]:
yi += 1
out_shp = list(input.shape[:-2])
out_shp.append(input.shape[-2] // ds[0] + xi)
out_shp.append(input.shape[-1] // ds[1] + yi)
out_shp.append(input.shape[-2] // ws[0] + xi)
out_shp.append(input.shape[-1] // ws[1] + yi)
output_val = numpy.zeros(out_shp)
func = numpy.max
if mode == 'sum':
......@@ -55,29 +55,29 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for k in numpy.ndindex(*input.shape[:-2]):
for i in range(output_val.shape[-2]):
ii = i * ds[0]
ii = i * ws[0]
for j in range(output_val.shape[-1]):
jj = j * ds[1]
patch = input[k][ii:ii + ds[0], jj:jj + ds[1]]
jj = j * ws[1]
patch = input[k][ii:ii + ws[0], jj:jj + ws[1]]
output_val[k][i, j] = func(patch)
return output_val
@staticmethod
def numpy_max_pool_nd(input, ds, ignore_border=False, mode='max'):
def numpy_max_pool_nd(input, ws, ignore_border=False, mode='max'):
'''Helper function, implementing pool_nd in pure numpy'''
if len(input.shape) < len(ds):
if len(input.shape) < len(ws):
raise NotImplementedError('input should have at least %s dim,'
' shape is %s'
% (str(ds), str(input.shape)))
nd = len(ds)
% (str(ws), str(input.shape)))
nd = len(ws)
si = [0] * nd
if not ignore_border:
for i in range(nd):
if input.shape[-nd + i] % ds[i]:
if input.shape[-nd + i] % ws[i]:
si[i] += 1
out_shp = list(input.shape[:-nd])
for i in range(nd):
out_shp.append(input.shape[-nd + i] // ds[i] + si[i])
out_shp.append(input.shape[-nd + i] // ws[i] + si[i])
output_val = numpy.zeros(out_shp)
func = numpy.max
if mode == 'sum':
......@@ -87,21 +87,21 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for l in numpy.ndindex(*input.shape[:-nd]):
for r in numpy.ndindex(*output_val.shape[-nd:]):
patch = input[l][tuple(slice(r[i] * ds[i], (r[i] + 1) * ds[i])
patch = input[l][tuple(slice(r[i] * ws[i], (r[i] + 1) * ws[i])
for i in range(nd))]
output_val[l][r] = func(patch)
return output_val
@staticmethod
def numpy_max_pool_2d_stride_padding(
x, ds, ignore_border=True, st=None, padding=(0, 0), mode='max'):
def numpy_max_pool_2d_stride_pad(
x, ws, ignore_border=True, stride=None, pad=(0, 0), mode='max'):
assert ignore_border
pad_h = padding[0]
pad_w = padding[1]
pad_h = pad[0]
pad_w = pad[1]
h = x.shape[-2]
w = x.shape[-1]
assert ds[0] > pad_h
assert ds[1] > pad_w
assert ws[0] > pad_h
assert ws[1] > pad_w
def pad_img(x):
y = numpy.zeros(
......@@ -113,13 +113,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
return y
img_rows = h + 2 * pad_h
img_cols = w + 2 * pad_w
out_r = (img_rows - ds[0]) // st[0] + 1
out_c = (img_cols - ds[1]) // st[1] + 1
out_r = (img_rows - ws[0]) // stride[0] + 1
out_c = (img_cols - ws[1]) // stride[1] + 1
out_shp = list(x.shape[:-2])
out_shp.append(out_r)
out_shp.append(out_c)
ds0, ds1 = ds
st0, st1 = st
ws0, ws1 = ws
stride0, stride1 = stride
output_val = numpy.zeros(out_shp)
y = pad_img(x)
func = numpy.max
......@@ -131,42 +131,42 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for k in numpy.ndindex(*x.shape[:-2]):
for i in range(output_val.shape[-2]):
ii_st = i * st[0]
ii_end = builtins.min(ii_st + ds[0], img_rows)
ii_stride = i * stride[0]
ii_end = builtins.min(ii_stride + ws[0], img_rows)
if not inc_pad:
ii_st = builtins.max(ii_st, pad_h)
ii_stride = builtins.max(ii_stride, pad_h)
ii_end = builtins.min(ii_end, h + pad_h)
for j in range(output_val.shape[-1]):
jj_st = j * st[1]
jj_end = builtins.min(jj_st + ds[1], img_cols)
jj_stride = j * stride[1]
jj_end = builtins.min(jj_stride + ws[1], img_cols)
if not inc_pad:
jj_st = builtins.max(jj_st, pad_w)
jj_stride = builtins.max(jj_stride, pad_w)
jj_end = builtins.min(jj_end, w + pad_w)
patch = y[k][ii_st:ii_end, jj_st:jj_end]
patch = y[k][ii_stride:ii_end, jj_stride:jj_end]
output_val[k][i, j] = func(patch)
return output_val
@staticmethod
def numpy_max_pool_nd_stride_padding(
input, ds, ignore_border=True, st=None, padding=None, mode='max'):
def numpy_max_pool_nd_stride_pad(
input, ws, ignore_border=True, stride=None, pad=None, mode='max'):
assert ignore_border
nd = len(ds)
if padding is None:
padding = (0,) * nd
if st is None:
st = (0,) * nd
assert len(padding) == len(ds) == len(st)
assert all(ds[i] > padding[i] for i in range(nd))
nd = len(ws)
if pad is None:
pad = (0,) * nd
if stride is None:
stride = (0,) * nd
assert len(pad) == len(ws) == len(stride)
assert all(ws[i] > pad[i] for i in range(nd))
def pad_img(x):
# initialize padded input
y = numpy.zeros(
x.shape[0:-nd] +
tuple(x.shape[-nd + i] + padding[i] * 2 for i in range(nd)),
tuple(x.shape[-nd + i] + pad[i] * 2 for i in range(nd)),
dtype=x.dtype)
# place the unpadded input in the center
block = ((slice(None),) * (len(x.shape) - nd) +
tuple(slice(padding[i], x.shape[-nd + i] + padding[i])
tuple(slice(pad[i], x.shape[-nd + i] + pad[i])
for i in range(nd)))
y[block] = x
return y
......@@ -174,9 +174,9 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
pad_img_shp = list(input.shape[:-nd])
out_shp = list(input.shape[:-nd])
for i in range(nd):
padded_size = input.shape[-nd + i] + 2 * padding[i]
padded_size = input.shape[-nd + i] + 2 * pad[i]
pad_img_shp.append(padded_size)
out_shp.append((padded_size - ds[i]) // st[i] + 1)
out_shp.append((padded_size - ws[i]) // stride[i] + 1)
output_val = numpy.zeros(out_shp)
padded_input = pad_img(input)
func = numpy.max
......@@ -190,51 +190,51 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for r in numpy.ndindex(*output_val.shape[-nd:]):
region = []
for i in range(nd):
r_st = r[i] * st[i]
r_end = builtins.min(r_st + ds[i], pad_img_shp[-nd + i])
r_stride = r[i] * stride[i]
r_end = builtins.min(r_stride + ws[i], pad_img_shp[-nd + i])
if not inc_pad:
r_st = builtins.max(r_st, padding[i])
r_end = builtins.min(r_end, input.shape[-nd + i] + padding[i])
region.append(slice(r_st, r_end))
r_stride = builtins.max(r_stride, pad[i])
r_end = builtins.min(r_end, input.shape[-nd + i] + pad[i])
region.append(slice(r_stride, r_end))
patch = padded_input[l][region]
output_val[l][r] = func(patch)
return output_val
@staticmethod
def numpy_max_pool_2d_stride(input, ds, ignore_border=False, st=None,
def numpy_max_pool_2d_stride(input, ws, ignore_border=False, stride=None,
mode='max'):
'''Helper function, implementing pool_2d in pure numpy
this function provides st input to indicate the stide size
for the pooling regions. if not indicated, st == sd.'''
this function provides stride input to indicate the stide size
for the pooling regions. if not indicated, stride == ws.'''
if len(input.shape) < 2:
raise NotImplementedError('input should have at least 2 dim,'
' shape is %s'
% str(input.shape))
if st is None:
st = ds
if stride is None:
stride = ws
img_rows = input.shape[-2]
img_cols = input.shape[-1]
out_r = 0
out_c = 0
if img_rows - ds[0] >= 0:
out_r = (img_rows - ds[0]) // st[0] + 1
if img_cols - ds[1] >= 0:
out_c = (img_cols - ds[1]) // st[1] + 1
if img_rows - ws[0] >= 0:
out_r = (img_rows - ws[0]) // stride[0] + 1
if img_cols - ws[1] >= 0:
out_c = (img_cols - ws[1]) // stride[1] + 1
if not ignore_border:
if out_r > 0:
if img_rows - ((out_r - 1) * st[0] + ds[0]) > 0:
rr = img_rows - out_r * st[0]
if img_rows - ((out_r - 1) * stride[0] + ws[0]) > 0:
rr = img_rows - out_r * stride[0]
if rr > 0:
out_r += 1
else:
if img_rows > 0:
out_r += 1
if out_c > 0:
if img_cols - ((out_c - 1) * st[1] + ds[1]) > 0:
cr = img_cols - out_c * st[1]
if img_cols - ((out_c - 1) * stride[1] + ws[1]) > 0:
cr = img_cols - out_c * stride[1]
if cr > 0:
out_c += 1
else:
......@@ -254,35 +254,35 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
output_val = numpy.zeros(out_shp)
for k in numpy.ndindex(*input.shape[:-2]):
for i in range(output_val.shape[-2]):
ii_st = i * st[0]
ii_end = builtins.min(ii_st + ds[0], img_rows)
ii_stride = i * stride[0]
ii_end = builtins.min(ii_stride + ws[0], img_rows)
for j in range(output_val.shape[-1]):
jj_st = j * st[1]
jj_end = builtins.min(jj_st + ds[1], img_cols)
patch = input[k][ii_st:ii_end, jj_st:jj_end]
jj_stride = j * stride[1]
jj_end = builtins.min(jj_stride + ws[1], img_cols)
patch = input[k][ii_stride:ii_end, jj_stride:jj_end]
output_val[k][i, j] = func(patch)
return output_val
@staticmethod
def numpy_max_pool_nd_stride(input, ds, ignore_border=False, st=None,
def numpy_max_pool_nd_stride(input, ws, ignore_border=False, stride=None,
mode='max'):
'''Helper function, implementing pooling in pure numpy
this function provides st input to indicate the stide size
for the pooling regions. if not indicated, st == sd.'''
nd = len(ds)
if st is None:
st = ds
assert len(st) == len(ds)
this function provides stride input to indicate the stide size
for the pooling regions. if not indicated, stride == ws.'''
nd = len(ws)
if stride is None:
stride = ws
assert len(stride) == len(ws)
out_shp = list(input.shape[:-nd])
for i in range(nd):
out = 0
if input.shape[-nd + i] - ds[i] >= 0:
out = (input.shape[-nd + i] - ds[i]) // st[i] + 1
if input.shape[-nd + i] - ws[i] >= 0:
out = (input.shape[-nd + i] - ws[i]) // stride[i] + 1
if not ignore_border:
if out > 0:
if input.shape[-nd + i] - ((out - 1) * st[i] + ds[i]) > 0:
if input.shape[-nd + i] - out * st[i] > 0:
if input.shape[-nd + i] - ((out - 1) * stride[i] + ws[i]) > 0:
if input.shape[-nd + i] - out * stride[i] > 0:
out += 1
else:
if input.shape[-nd + i] > 0:
......@@ -300,9 +300,9 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for r in numpy.ndindex(*output_val.shape[-nd:]):
region = []
for i in range(nd):
r_st = r[i] * st[i]
r_end = builtins.min(r_st + ds[i], input.shape[-nd + i])
region.append(slice(r_st, r_end))
r_stride = r[i] * stride[i]
r_end = builtins.min(r_stride + ws[i], input.shape[-nd + i])
region.append(slice(r_stride, r_end))
patch = input[l][region]
output_val[l][r] = func(patch)
return output_val
......@@ -469,7 +469,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
def test_DownsampleFactorMaxPaddingStride(self):
ignore_border = True # padding does not support ignore_border=False
rng = numpy.random.RandomState(utt.fetch_seed())
# maxpool, stride, padding, input sizes
# maxpool, stride, pad, input sizes
examples = (
((3,), (2,), (2,), (5,)),
((3,), (2,), (2,), (4, 5)),
......@@ -486,25 +486,25 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for example, mode in product(examples,
['max', 'sum', 'average_inc_pad',
'average_exc_pad']):
(maxpoolshp, stridesize, paddingsize, inputsize) = example
(maxpoolshp, stridesize, padsize, inputsize) = example
imval = rng.rand(*inputsize) - 0.5
images = theano.shared(imval)
numpy_output_val = self.numpy_max_pool_nd_stride_padding(
numpy_output_val = self.numpy_max_pool_nd_stride_pad(
imval, maxpoolshp, ignore_border,
stridesize, paddingsize, mode)
stridesize, padsize, mode)
maxpool_op = Pool(
ndim=len(maxpoolshp),
ignore_border=ignore_border,
mode=mode
)(images, maxpoolshp, stridesize, paddingsize)
)(images, maxpoolshp, stridesize, padsize)
f = function([], maxpool_op)
output_val = f()
utt.assert_allclose(output_val, numpy_output_val)
def test_DownsampleFactorMaxPaddingStride_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
# maxpool, stride, padding, input sizes
# maxpool, stride, pad, input sizes
examples = (
((10,), (5,), (3,), (2,)),
((10,), (5,), (3,), (2, 2)),
......@@ -518,7 +518,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
# support grad with padding
for mode in ['max', 'sum']:
for example in examples:
(maxpoolshp, stridesize, paddingsize, inputsize) = example
(maxpoolshp, stridesize, padsize, inputsize) = example
imval = rng.rand(*inputsize) * 10.0
def mp(input):
......@@ -526,7 +526,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
ndim=len(maxpoolshp),
ignore_border=True,
mode=mode,
)(input, maxpoolshp, stridesize, paddingsize)
)(input, maxpoolshp, stridesize, padsize)
utt.verify_grad(mp, [imval], rng=rng)
def test_DownsampleFactorMax_grad(self):
......@@ -562,7 +562,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
utt.verify_grad(mp, [imval], rng=rng)
# pool, stride, input sizes
pool_grad_st_examples = (
pool_grad_stride_examples = (
((1,), (1,), (16,)),
((1,), (3,), (1, 16)),
((1,), (5,), (1, 2, 16)),
......@@ -590,14 +590,14 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
((9, 9), (1, 1), (1, 2, 8, 5)),
)
@parameterized.expand(product(pool_grad_st_examples,
@parameterized.expand(product(pool_grad_stride_examples,
[True, False],
['max',
'sum',
'average_inc_pad',
'average_exc_pad']),
testcase_func_name=utt.custom_name_func)
def test_DownsampleFactorMax_grad_st(self, example, ignore_border, mode):
def test_DownsampleFactorMax_grad_stride(self, example, ignore_border, mode):
# checks the gradient for the case that stride is used
rng = numpy.random.RandomState(utt.fetch_seed())
......@@ -685,10 +685,10 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
utt.verify_grad(mp, [imval, grad_val], rng=rng)
@parameterized.expand(product(pool_grad_st_examples,
@parameterized.expand(product(pool_grad_stride_examples,
[True, False]),
testcase_func_name=utt.custom_name_func)
def test_DownsampleFactorMaxGrad_grad_st(self, example, ignore_border):
def test_DownsampleFactorMaxGrad_grad_stride(self, example, ignore_border):
# checks the gradient of the gradient for
# the case that stride is used
rng = numpy.random.RandomState(utt.fetch_seed())
......@@ -696,7 +696,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
imval = rng.rand(*inputsize)
grad_shape = Pool.out_shape(
imval.shape, maxpoolshp, ndim=len(maxpoolshp),
ignore_border=ignore_border, st=stride)
ignore_border=ignore_border, stride=stride)
# skip the grad verification when the output is empty
if numpy.prod(grad_shape) != 0:
......@@ -713,13 +713,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
utt.verify_grad(mp, [imval, grad_val], rng=rng)
@parameterized.expand(product(pool_grad_st_examples,
@parameterized.expand(product(pool_grad_stride_examples,
[True, False],
['sum',
'average_inc_pad',
'average_exc_pad']),
testcase_func_name=utt.custom_name_func)
def test_AveragePoolGrad_grad_st(self, example, ignore_border, mode):
def test_AveragePoolGrad_grad_stride(self, example, ignore_border, mode):
# checks the gradient of the gradient for
# the case that stride is used
rng = numpy.random.RandomState(utt.fetch_seed())
......@@ -728,7 +728,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
grad_shape = Pool.out_shape(
imval.shape, avgpoolshp,
ndim=len(avgpoolshp),
ignore_border=ignore_border, st=stride)
ignore_border=ignore_border, stride=stride)
# skip the grad verification when the output is empty
if numpy.prod(grad_shape) != 0:
......@@ -745,7 +745,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
def test_DownsampleFactorMaxPaddingStride_grad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
# maxpool, stride, padding, input sizes
# maxpool, stride, pad, input sizes
examples = (
((3,), (2,), (2,), (10,)),
((3,), (2,), (2,), (2, 10,)),
......@@ -759,30 +759,30 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
((3, 3, 5), (2, 2, 3), (2, 2, 1), (1, 1, 5, 5, 10)),
)
for (maxpoolshp, stridesize, paddingsize, inputsize) in examples:
for (maxpoolshp, stridesize, padsize, inputsize) in examples:
imval = rng.rand(*inputsize) * 10.0
grad_shape = Pool.out_shape(imval.shape,
maxpoolshp,
ndim=len(maxpoolshp),
st=stridesize,
stride=stridesize,
ignore_border=True,
padding=paddingsize)
pad=padsize)
grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad):
out = Pool(
ndim=len(maxpoolshp),
ignore_border=True,
)(input, maxpoolshp, stridesize, paddingsize)
)(input, maxpoolshp, stridesize, padsize)
grad_op = MaxPoolGrad(ndim=len(maxpoolshp),
ignore_border=True)
return grad_op(input, out, grad, maxpoolshp, stridesize, paddingsize)
return grad_op(input, out, grad, maxpoolshp, stridesize, padsize)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_AveragePoolPaddingStride_grad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
# avgpool, stride, padding, input sizes
# avgpool, stride, pad, input sizes
examples = (
((3,), (2,), (2,), (10,)),
((3,), (2,), (2,), (2, 10,)),
......@@ -796,7 +796,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
((3, 3, 5), (2, 2, 3), (2, 2, 1), (1, 1, 5, 5, 10)),
)
for (avgpoolshp, stridesize, paddingsize, inputsize) in examples:
for (avgpoolshp, stridesize, padsize, inputsize) in examples:
imval = rng.rand(*inputsize) * 10.0
# 'average_exc_pad' with non-zero padding is not implemented
......@@ -804,16 +804,16 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
grad_shape = Pool.out_shape(imval.shape,
avgpoolshp,
ndim=len(avgpoolshp),
st=stridesize,
stride=stridesize,
ignore_border=True,
padding=paddingsize)
pad=padsize)
grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad):
grad_op = AveragePoolGrad(ndim=len(avgpoolshp),
ignore_border=True,
mode=mode)
return grad_op(input, grad, avgpoolshp, stridesize, paddingsize)
return grad_op(input, grad, avgpoolshp, stridesize, padsize)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_DownsampleFactorMax_hessian(self):
......@@ -822,7 +822,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
x_vec = tensor.vector('x')
z = tensor.dot(x_vec.dimshuffle(0, 'x'),
x_vec.dimshuffle('x', 0))
y = pool_2d(input=z, ds=(2, 2), ignore_border=True)
y = pool_2d(input=z, ws=(2, 2), ignore_border=True)
C = tensor.exp(tensor.sum(y))
grad_hess = tensor.hessian(cost=C, wrt=x_vec)
......@@ -835,7 +835,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
def test_DownsampleFactorMaxGradGrad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
# maxpool, stride, padding, input sizes
# maxpool, stride, pad, input sizes
examples = (
((3,), (2,), (2,), (10,)),
((3,), (2,), (2,), (2, 10,)),
......@@ -849,17 +849,17 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
((3, 3, 5), (2, 2, 3), (2, 2, 1), (1, 1, 5, 5, 10)),
)
for (maxpoolshp, stridesize, paddingsize, inputsize) in examples:
for (maxpoolshp, stridesize, padsize, inputsize) in examples:
imval1 = rng.rand(*inputsize) * 10.0
imval2 = rng.rand(*inputsize) * 10.0
def mp(input1, input2):
op1 = Pool(ndim=len(maxpoolshp), ignore_border=True)
pooled_out = op1(input1, maxpoolshp, stridesize, paddingsize)
pooled_out = op1(input1, maxpoolshp, stridesize, padsize)
op2 = DownsampleFactorMaxGradGrad(
ndim=len(maxpoolshp),
ignore_border=True)
out = op2(input1, pooled_out, input2, maxpoolshp, stridesize, paddingsize)
out = op2(input1, pooled_out, input2, maxpoolshp, stridesize, padsize)
return out
utt.verify_grad(mp, [imval1, imval2], rng=rng)
......@@ -1015,13 +1015,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for i, maxpoolshp in enumerate(maxpoolshps):
for j, ignore_border in enumerate([True, False]):
for k, padding in enumerate([(0, 0), (1, 1), (1, 2)]):
for k, pad in enumerate([(0, 0), (1, 1), (1, 2)]):
if out_shapes[k][i][j] is None:
continue
# checking shapes generated by Pool
self._compile_and_check([image],
[Pool(ignore_border=ignore_border)
(image, maxpoolshp, pad=padding)],
(image, maxpoolshp, pad=pad)],
[image_val], Pool)
# checking shapes generated by MaxPoolGrad
......@@ -1031,7 +1031,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
[MaxPoolGrad(
ignore_border=ignore_border)
(image, maxout, gz, maxpoolshp,
pad=padding)],
pad=pad)],
[image_val, maxout_val, gz_val],
MaxPoolGrad,
warn=False)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论