提交 ee75b95b authored 作者: Gijs van Tulder's avatar Gijs van Tulder

Deprecate ds, st, padding parameters in pooling.

Closes #4933.
上级 e6a3b009
...@@ -158,9 +158,9 @@ def test_pooling(): ...@@ -158,9 +158,9 @@ def test_pooling():
continue continue
# We will check that the opt introduced it. # We will check that the opt introduced it.
out = pool_2d(x, (ws, ws), out = pool_2d(x, (ws, ws),
st=(stride, stride), stride=(stride, stride),
ignore_border=True, ignore_border=True,
padding=pad, mode=mode) pad=pad, mode=mode)
mode_without_gpu2 = mode_without_gpu.including() mode_without_gpu2 = mode_without_gpu.including()
mode_without_gpu2.check_isfinite = False mode_without_gpu2.check_isfinite = False
...@@ -199,7 +199,7 @@ def test_pooling(): ...@@ -199,7 +199,7 @@ def test_pooling():
# This tests the CPU grad + opt + GPU implementation # This tests the CPU grad + opt + GPU implementation
def fn(x): def fn(x):
return pool_2d(x, (ws, ws), ignore_border=True, return pool_2d(x, (ws, ws), ignore_border=True,
padding=pad, mode=mode) pad=pad, mode=mode)
utt.verify_grad(fn, [data], mode=mode_with_gpu) utt.verify_grad(fn, [data], mode=mode_with_gpu)
# Confirm that the opt would have inserted it. # Confirm that the opt would have inserted it.
fg = theano.function([x], theano.grad(fn(x).sum(), x), fg = theano.function([x], theano.grad(fn(x).sum(), x),
...@@ -228,14 +228,14 @@ def test_pooling_with_tensor_vars(): ...@@ -228,14 +228,14 @@ def test_pooling_with_tensor_vars():
raise SkipTest(dnn.dnn_available.msg) raise SkipTest(dnn.dnn_available.msg)
x = T.ftensor4() x = T.ftensor4()
ws = theano.shared(numpy.array([2, 2], dtype='int32')) ws = theano.shared(numpy.array([2, 2], dtype='int32'))
st = theano.shared(numpy.array([1, 1], dtype='int32')) stride = theano.shared(numpy.array([1, 1], dtype='int32'))
pad = theano.shared(numpy.array([0, 0], dtype='int32')) pad = theano.shared(numpy.array([0, 0], dtype='int32'))
mode = 'max' mode = 'max'
def fn(x): def fn(x):
dnn_op = dnn.dnn_pool( dnn_op = dnn.dnn_pool(
x, ws=ws, x, ws=ws,
stride=st, stride=stride,
pad=pad, pad=pad,
mode=mode) mode=mode)
return dnn_op return dnn_op
...@@ -255,7 +255,7 @@ def test_pooling_with_tensor_vars(): ...@@ -255,7 +255,7 @@ def test_pooling_with_tensor_vars():
for node in f_gpu.maker.fgraph.apply_nodes]) for node in f_gpu.maker.fgraph.apply_nodes])
# CPU implementation # CPU implementation
out_cpu = pool_2d(x, ws, ignore_border=True, st=st, padding=pad, mode=mode) out_cpu = pool_2d(x, ws, ignore_border=True, stride=stride, pad=pad, mode=mode)
f_cpu = theano.function([x], out_cpu, mode=mode_without_gpu2) f_cpu = theano.function([x], out_cpu, mode=mode_without_gpu2)
assert not any([isinstance(node.op, dnn.GpuDnnPool) assert not any([isinstance(node.op, dnn.GpuDnnPool)
for node in f_cpu.maker.fgraph.apply_nodes]) for node in f_cpu.maker.fgraph.apply_nodes])
...@@ -307,9 +307,9 @@ def test_pooling3d(): ...@@ -307,9 +307,9 @@ def test_pooling3d():
# Not implemented # Not implemented
continue continue
out = pool_3d(x, (ws, ws, ws), out = pool_3d(x, (ws, ws, ws),
st=(stride, stride, stride), stride=(stride, stride, stride),
ignore_border=True, ignore_border=True,
padding=pad, mode=mode) pad=pad, mode=mode)
# GPU implementation # GPU implementation
f_gpu = theano.function([x], out, mode=mode_with_gpu) f_gpu = theano.function([x], out, mode=mode_with_gpu)
...@@ -374,7 +374,7 @@ def test_pooling_opt(): ...@@ -374,7 +374,7 @@ def test_pooling_opt():
f = theano.function( f = theano.function(
[x], [x],
pool_2d(x, ds=(2, 2), mode='average_inc_pad', pool_2d(x, ws=(2, 2), mode='average_inc_pad',
ignore_border=True), ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
...@@ -386,7 +386,7 @@ def test_pooling_opt(): ...@@ -386,7 +386,7 @@ def test_pooling_opt():
# gradient of 2D pooling # gradient of 2D pooling
f = theano.function( f = theano.function(
[x], [x],
T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad', T.grad(pool_2d(x, ws=(2, 2), mode='average_inc_pad',
ignore_border=True).sum(), ignore_border=True).sum(),
x), x),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
...@@ -399,7 +399,7 @@ def test_pooling_opt(): ...@@ -399,7 +399,7 @@ def test_pooling_opt():
# Test sum pooling # Test sum pooling
f = theano.function( f = theano.function(
[x], [x],
pool_2d(x, ds=(2, 3), mode='sum', pool_2d(x, ws=(2, 3), mode='sum',
ignore_border=True), ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
...@@ -413,7 +413,7 @@ def test_pooling_opt(): ...@@ -413,7 +413,7 @@ def test_pooling_opt():
f = theano.function( f = theano.function(
[x], [x],
pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad', pool_3d(x, ws=(2, 2, 2), mode='average_inc_pad',
ignore_border=True), ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
...@@ -425,7 +425,7 @@ def test_pooling_opt(): ...@@ -425,7 +425,7 @@ def test_pooling_opt():
# gradient of 3D pooling # gradient of 3D pooling
f = theano.function( f = theano.function(
[x], [x],
T.grad(pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad', T.grad(pool_3d(x, ws=(2, 2, 2), mode='average_inc_pad',
ignore_border=True).sum(), ignore_border=True).sum(),
x), x),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
...@@ -504,7 +504,7 @@ def test_dnn_tag(): ...@@ -504,7 +504,7 @@ def test_dnn_tag():
try: try:
f = theano.function( f = theano.function(
[x], [x],
pool_2d(x, ds=(2, 2), ignore_border=True), pool_2d(x, ws=(2, 2), ignore_border=True),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
except (AssertionError, RuntimeError): except (AssertionError, RuntimeError):
assert not dnn.dnn_available(test_ctx_name) assert not dnn.dnn_available(test_ctx_name)
......
...@@ -194,9 +194,9 @@ def test_pooling(): ...@@ -194,9 +194,9 @@ def test_pooling():
continue continue
# We will check that the opt introduced it. # We will check that the opt introduced it.
out = pool_2d(x, (ws, ws), out = pool_2d(x, (ws, ws),
st=(stride, stride), stride=(stride, stride),
ignore_border=True, ignore_border=True,
padding=pad, mode=mode) pad=pad, mode=mode)
mode_without_gpu2 = mode_without_gpu.including() mode_without_gpu2 = mode_without_gpu.including()
mode_without_gpu2.check_isfinite = False mode_without_gpu2.check_isfinite = False
...@@ -235,7 +235,7 @@ def test_pooling(): ...@@ -235,7 +235,7 @@ def test_pooling():
# This tests the CPU grad + opt + GPU implementation # This tests the CPU grad + opt + GPU implementation
def fn(x): def fn(x):
return pool_2d(x, (ws, ws), ignore_border=True, return pool_2d(x, (ws, ws), ignore_border=True,
padding=pad, mode=mode) pad=pad, mode=mode)
utt.verify_grad(fn, [data], mode=mode_with_gpu) utt.verify_grad(fn, [data], mode=mode_with_gpu)
# Confirm that the opt would have inserted it. # Confirm that the opt would have inserted it.
fg = theano.function([x], theano.grad(fn(x).sum(), x), fg = theano.function([x], theano.grad(fn(x).sum(), x),
...@@ -264,14 +264,14 @@ def test_pooling_with_tensor_vars(): ...@@ -264,14 +264,14 @@ def test_pooling_with_tensor_vars():
raise SkipTest(cuda.dnn.dnn_available.msg) raise SkipTest(cuda.dnn.dnn_available.msg)
x = T.ftensor4() x = T.ftensor4()
ws = theano.shared(numpy.array([2, 2], dtype='int32')) ws = theano.shared(numpy.array([2, 2], dtype='int32'))
st = theano.shared(numpy.array([1, 1], dtype='int32')) stride = theano.shared(numpy.array([1, 1], dtype='int32'))
pad = theano.shared(numpy.array([0, 0], dtype='int32')) pad = theano.shared(numpy.array([0, 0], dtype='int32'))
mode = 'max' mode = 'max'
def fn(x): def fn(x):
dnn_op = cuda.dnn.dnn_pool( dnn_op = cuda.dnn.dnn_pool(
x, ws=ws, x, ws=ws,
stride=st, stride=stride,
pad=pad, pad=pad,
mode=mode) mode=mode)
return dnn_op return dnn_op
...@@ -291,7 +291,7 @@ def test_pooling_with_tensor_vars(): ...@@ -291,7 +291,7 @@ def test_pooling_with_tensor_vars():
for node in f_gpu.maker.fgraph.apply_nodes]) for node in f_gpu.maker.fgraph.apply_nodes])
# CPU implementation # CPU implementation
out_cpu = pool_2d(x, ws, ignore_border=True, st=st, padding=pad, mode=mode) out_cpu = pool_2d(x, ws, ignore_border=True, stride=stride, pad=pad, mode=mode)
f_cpu = theano.function([x], out_cpu, mode=mode_without_gpu2) f_cpu = theano.function([x], out_cpu, mode=mode_without_gpu2)
assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool) assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool)
for node in f_cpu.maker.fgraph.apply_nodes]) for node in f_cpu.maker.fgraph.apply_nodes])
...@@ -364,9 +364,9 @@ def test_pooling3d(): ...@@ -364,9 +364,9 @@ def test_pooling3d():
# Not implemented # Not implemented
continue continue
out = pool_3d(x, (ws, ws, ws), out = pool_3d(x, (ws, ws, ws),
st=(stride, stride, stride), stride=(stride, stride, stride),
ignore_border=True, ignore_border=True,
padding=pad, mode=mode) pad=pad, mode=mode)
# GPU implementation # GPU implementation
f_gpu = theano.function([x], out, mode=mode_with_gpu) f_gpu = theano.function([x], out, mode=mode_with_gpu)
...@@ -431,7 +431,7 @@ def test_pooling_opt(): ...@@ -431,7 +431,7 @@ def test_pooling_opt():
f = theano.function( f = theano.function(
[x], [x],
pool_2d(x, ds=(2, 2), mode='average_inc_pad', ignore_border=True), pool_2d(x, ws=(2, 2), mode='average_inc_pad', ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
assert any([isinstance(n.op, cuda.dnn.GpuDnnPool) assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)
...@@ -442,7 +442,7 @@ def test_pooling_opt(): ...@@ -442,7 +442,7 @@ def test_pooling_opt():
# gradient of 2D pooling # gradient of 2D pooling
f = theano.function( f = theano.function(
[x], [x],
T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad', T.grad(pool_2d(x, ws=(2, 2), mode='average_inc_pad',
ignore_border=True).sum(), x), ignore_border=True).sum(), x),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
...@@ -454,7 +454,7 @@ def test_pooling_opt(): ...@@ -454,7 +454,7 @@ def test_pooling_opt():
# Test sum pooling # Test sum pooling
f = theano.function( f = theano.function(
[x], [x],
pool_2d(x, ds=(2, 3), mode='sum', pool_2d(x, ws=(2, 3), mode='sum',
ignore_border=True), ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
...@@ -468,7 +468,7 @@ def test_pooling_opt(): ...@@ -468,7 +468,7 @@ def test_pooling_opt():
f = theano.function( f = theano.function(
[x], [x],
pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad', ignore_border=True), pool_3d(x, ws=(2, 2, 2), mode='average_inc_pad', ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
assert any([isinstance(n.op, cuda.dnn.GpuDnnPool) assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)
...@@ -479,7 +479,7 @@ def test_pooling_opt(): ...@@ -479,7 +479,7 @@ def test_pooling_opt():
# gradient of 3D pooling # gradient of 3D pooling
f = theano.function( f = theano.function(
[x], [x],
T.grad(pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad', T.grad(pool_3d(x, ws=(2, 2, 2), mode='average_inc_pad',
ignore_border=True).sum(), x), ignore_border=True).sum(), x),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
...@@ -849,7 +849,7 @@ def test_dnn_tag(): ...@@ -849,7 +849,7 @@ def test_dnn_tag():
try: try:
f = theano.function( f = theano.function(
[x], [x],
pool_2d(x, ds=(2, 2), ignore_border=True), pool_2d(x, ws=(2, 2), ignore_border=True),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
except (AssertionError, RuntimeError): except (AssertionError, RuntimeError):
assert not cuda.dnn.dnn_available() assert not cuda.dnn.dnn_available()
......
...@@ -38,29 +38,29 @@ def max_pool_2d_same_size(input, patch_size): ...@@ -38,29 +38,29 @@ def max_pool_2d_same_size(input, patch_size):
return outs return outs
def pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0), def pool_2d(input, ws=None, ignore_border=None, stride=None, pad=(0, 0),
mode='max'): mode='max', ds=None, st=None, padding=None):
"""Downscale the input by a specified factor """Downscale the input by a specified factor
Takes as input a N-D tensor, where N >= 2. It downscales the input image by Takes as input a N-D tensor, where N >= 2. It downscales the input image by
the specified factor, by keeping only the maximum value of non-overlapping the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1]) patches of size (ws[0],ws[1])
Parameters Parameters
---------- ----------
input : N-D theano tensor of input images input : N-D theano tensor of input images
Input images. Max pooling will be done over the 2 last dimensions. Input images. Max pooling will be done over the 2 last dimensions.
ds : tuple of length 2 or theano vector of ints of size 2. ws : tuple of length 2 or theano vector of ints of size 2.
Factor by which to downscale (vertical ds, horizontal ds). Factor by which to downscale (vertical ws, horizontal ws).
(2,2) will halve the image in each dimension. (2,2) will halve the image in each dimension.
ignore_border : bool (default None, will print a warning and set to False) ignore_border : bool (default None, will print a warning and set to False)
When True, (5,5) input with ds=(2,2) will generate a (2,2) output. When True, (5,5) input with ws=(2,2) will generate a (2,2) output.
(3,3) otherwise. (3,3) otherwise.
st : tuple of two ints or theano vector of ints of size 2. stride : tuple of two ints or theano vector of ints of size 2.
Stride size, which is the number of shifts over rows/cols to get the Stride size, which is the number of shifts over rows/cols to get the
next pool region. If st is None, it is considered equal to ds next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions). (no overlap on pooling regions).
padding : tuple of two ints or theano vector of ints of size 2. pad : tuple of two ints or theano vector of ints of size 2.
(pad_h, pad_w), pad zeros to extend beyond four borders of the (pad_h, pad_w), pad zeros to extend beyond four borders of the
images, pad_h is the size of the top and bottom margins, and images, pad_h is the size of the top and bottom margins, and
pad_w is the size of the left and right margins. pad_w is the size of the left and right margins.
...@@ -68,8 +68,33 @@ def pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0), ...@@ -68,8 +68,33 @@ def pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0),
Operation executed on each window. `max` and `sum` always exclude Operation executed on each window. `max` and `sum` always exclude
the padding in the computation. `average` gives you the choice to the padding in the computation. `average` gives you the choice to
include or exclude it. include or exclude it.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter st instead.
padding
*deprecated*, use parameter pad instead.
""" """
# check for deprecated parameter names
if ds is not None:
warnings.warn(
"pool_2d() ds parameter is deprecated, please use ws",
stacklevel=2)
ws = ds
if st is not None:
warnings.warn(
"pool_2d() st parameter is deprecated, please use stride",
stacklevel=2)
stride = st
if padding is not None:
warnings.warn(
"pool_2d() padding parameter is deprecated, please use pad",
stacklevel=2)
pad = padding
if ws is None:
raise ValueError('pool_2d() ws parameter can not be None')
if input.ndim < 2: if input.ndim < 2:
raise NotImplementedError('pool_2d requires a dimension >= 2') raise NotImplementedError('pool_2d requires a dimension >= 2')
if ignore_border is None: if ignore_border is None:
...@@ -81,38 +106,38 @@ def pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0), ...@@ -81,38 +106,38 @@ def pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0),
" On the GPU, using ignore_border=True is needed to use cuDNN." " On the GPU, using ignore_border=True is needed to use cuDNN."
" When using ignore_border=False and not using cuDNN, the only" " When using ignore_border=False and not using cuDNN, the only"
" GPU combination supported is when" " GPU combination supported is when"
" `ds == st and padding == (0, 0) and mode == 'max'`." " `ws == stride and pad == (0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.", " Otherwise, the convolution will be executed on CPU.",
stacklevel=2) stacklevel=2)
ignore_border = False ignore_border = False
op = Pool(ignore_border, ndim=2, mode=mode) op = Pool(ignore_border, ndim=2, mode=mode)
output = op(input, ds, st, padding) output = op(input, ws, stride, pad)
return output return output
def pool_3d(input, ds, ignore_border=None, st=None, padding=(0, 0, 0), def pool_3d(input, ws=None, ignore_border=None, stride=None, pad=(0, 0, 0),
mode='max'): mode='max', ds=None, st=None, padding=None):
"""Downscale the input by a specified factor """Downscale the input by a specified factor
Takes as input a N-D tensor, where N >= 3. It downscales the input image by Takes as input a N-D tensor, where N >= 3. It downscales the input image by
the specified factor, by keeping only the maximum value of non-overlapping the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1],ds[2]) patches of size (ws[0],ws[1],ws[2])
Parameters Parameters
---------- ----------
input : N-D theano tensor of input images input : N-D theano tensor of input images
Input images. Max pooling will be done over the 3 last dimensions. Input images. Max pooling will be done over the 3 last dimensions.
ds : tuple of length 3 or theano vector of ints of size 3 ws : tuple of length 3 or theano vector of ints of size 3
Factor by which to downscale (vertical ds, horizontal ds, depth ds). Factor by which to downscale (vertical ws, horizontal ws, depth ws).
(2,2,2) will halve the image in each dimension. (2,2,2) will halve the image in each dimension.
ignore_border : bool (default None, will print a warning and set to False) ignore_border : bool (default None, will print a warning and set to False)
When True, (5,5,5) input with ds=(2,2,2) will generate a (2,2,2) output. When True, (5,5,5) input with ws=(2,2,2) will generate a (2,2,2) output.
(3,3,3) otherwise. (3,3,3) otherwise.
st : tuple of three ints or theano vector of ints of size 3 st : tuple of three ints or theano vector of ints of size 3
Stride size, which is the number of shifts over rows/cols/slices to get Stride size, which is the number of shifts over rows/cols/slices to get
the next pool region. If st is None, it is considered equal to ds the next pool region. If st is None, it is considered equal to ws
(no overlap on pooling regions). (no overlap on pooling regions).
padding : tuple of two ints or theano vector of ints of size 3 pad : tuple of two ints or theano vector of ints of size 3
(pad_h, pad_w, pad_d), pad zeros to extend beyond six borders of the (pad_h, pad_w, pad_d), pad zeros to extend beyond six borders of the
images, pad_h is the size of the top and bottom margins, images, pad_h is the size of the top and bottom margins,
pad_w is the size of the left and right margins, and pad_d is the size pad_w is the size of the left and right margins, and pad_d is the size
...@@ -121,8 +146,33 @@ def pool_3d(input, ds, ignore_border=None, st=None, padding=(0, 0, 0), ...@@ -121,8 +146,33 @@ def pool_3d(input, ds, ignore_border=None, st=None, padding=(0, 0, 0),
Operation executed on each window. `max` and `sum` always exclude Operation executed on each window. `max` and `sum` always exclude
the padding in the computation. `average` gives you the choice to the padding in the computation. `average` gives you the choice to
include or exclude it. include or exclude it.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter st instead.
padding
*deprecated*, use parameter pad instead.
""" """
# check for deprecated parameter names
if ds is not None:
warnings.warn(
"pool_3d() ds parameter is deprecated, please use ws",
stacklevel=2)
ws = ds
if st is not None:
warnings.warn(
"pool_3d() st parameter is deprecated, please use stride",
stacklevel=2)
stride = st
if padding is not None:
warnings.warn(
"pool_3d() padding parameter is deprecated, please use pad",
stacklevel=2)
pad = padding
if ws is None:
raise ValueError('pool_3d() ws parameter can not be None')
if input.ndim < 3: if input.ndim < 3:
raise NotImplementedError('pool_3d requires a dimension >= 3') raise NotImplementedError('pool_3d requires a dimension >= 3')
if ignore_border is None: if ignore_border is None:
...@@ -134,37 +184,36 @@ def pool_3d(input, ds, ignore_border=None, st=None, padding=(0, 0, 0), ...@@ -134,37 +184,36 @@ def pool_3d(input, ds, ignore_border=None, st=None, padding=(0, 0, 0),
" On the GPU, using ignore_border=True is needed to use cuDNN." " On the GPU, using ignore_border=True is needed to use cuDNN."
" When using ignore_border=False and not using cuDNN, the only" " When using ignore_border=False and not using cuDNN, the only"
" GPU combination supported is when" " GPU combination supported is when"
" `ds == st and padding == (0, 0, 0) and mode == 'max'`." " `ws == stride and pad == (0, 0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.", " Otherwise, the convolution will be executed on CPU.",
stacklevel=2) stacklevel=2)
ignore_border = False ignore_border = False
op = Pool(ignore_border, ndim=3, mode=mode) op = Pool(ignore_border, ndim=3, mode=mode)
output = op(input, ds, st, padding) output = op(input, ws, stride, pad)
return output return output
class Pool(OpenMPOp): class Pool(OpenMPOp):
""" """
This Op downsamples the last N dimensions of the input by taking the max,
sum or average over different patches. sum or average over different patches.
Parameters Parameters
---------- ----------
ds : list or tuple of N ints ws : list or tuple of N ints
Downsample factor over rows, columns etc. Downsample factor over rows, columns etc.
ds indicates the size of the pooling region. ws indicates the size of the pooling region.
ignore_border : bool ignore_border : bool
If ds doesn't divide imgshape, do we include an extra row/col/slice If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True). of partial downsampling (False) or ignore it (True).
st : list or tuple of N ints or None stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If st is None, it is considered equal to ds next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions). (no overlap on pooling regions).
padding : tuple of N ints or None pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if padding is None. right margins. No padding is added if pad is None.
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'} mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
('average_inc_pad' excludes the padding from the count, ('average_inc_pad' excludes the padding from the count,
'average_exc_pad' include it) 'average_exc_pad' include it)
...@@ -177,7 +226,8 @@ class Pool(OpenMPOp): ...@@ -177,7 +226,8 @@ class Pool(OpenMPOp):
__props__ = ('ignore_border', 'mode', 'ndim') __props__ = ('ignore_border', 'mode', 'ndim')
@staticmethod @staticmethod
def out_shape(imgshape, ds, ignore_border=False, st=None, padding=None, ndim=2): def out_shape(imgshape, ws=None, ignore_border=False, stride=None, pad=None, ndim=2,
ds=None, st=None, padding=None):
""" """
Return the shape of the output from this op, for input of given Return the shape of the output from this op, for input of given
shape and flags. shape and flags.
...@@ -187,21 +237,21 @@ class Pool(OpenMPOp): ...@@ -187,21 +237,21 @@ class Pool(OpenMPOp):
imgshape : tuple, list, or similar of integer or scalar Theano variable imgshape : tuple, list, or similar of integer or scalar Theano variable
The shape of a tensor of images. The last N elements are The shape of a tensor of images. The last N elements are
interpreted as the number of rows, and the number of cols. interpreted as the number of rows, and the number of cols.
ds : list or tuple of N ints ws : list or tuple of N ints
Downsample factor over rows and column. Downsample factor over rows and column.
ds indicates the pool region size. ws indicates the pool region size.
ignore_border : bool ignore_border : bool
If ds doesn't divide imgshape, do we include an extra row/col/slice If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True). of partial downsampling (False) or ignore it (True).
st : list or tuple of N ints or None stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If st is None, it is considered equal to ds next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions). (no overlap on pooling regions).
padding : tuple of N ints or None pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if padding is None. right margins. No padding is added if pad is None.
ndim : int ndim : int
The number of pooling dimensions N. The number of pooling dimensions N.
The default is 2. The default is 2.
...@@ -214,17 +264,36 @@ class Pool(OpenMPOp): ...@@ -214,17 +264,36 @@ class Pool(OpenMPOp):
elements reduced as per the downsampling & ignore_border flags. elements reduced as per the downsampling & ignore_border flags.
""" """
# check for deprecated parameter names
if ds is not None:
warnings.warn(
"Pool ds parameter is deprecated, please use ws",
stacklevel=2)
ws = ds
if st is not None:
warnings.warn(
"Pool st parameter is deprecated, please use stride",
stacklevel=2)
stride = st
if padding is not None:
warnings.warn(
"Pool padding parameter is deprecated, please use pad",
stacklevel=2)
pad = padding
if ws is None:
raise ValueError('Pool ws parameter can not be None')
if ndim is None: if ndim is None:
ndim = 2 ndim = 2
assert ndim > 0 assert ndim > 0
if len(imgshape) < ndim: if len(imgshape) < ndim:
raise TypeError('imgshape must have at least {} dimensions'.format(ndim)) raise TypeError('imgshape must have at least {} dimensions'.format(ndim))
if st is None: if stride is None:
st = ds stride = ws
if padding is None: if pad is None:
padding = (0,) * ndim pad = (0,) * ndim
patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + padding[i] * 2 patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + pad[i] * 2
for i in xrange(ndim)) for i in xrange(ndim))
def compute_out(v, downsample, stride): def compute_out(v, downsample, stride):
...@@ -248,7 +317,7 @@ class Pool(OpenMPOp): ...@@ -248,7 +317,7 @@ class Pool(OpenMPOp):
else: else:
return max(0, (v - 1 - downsample + stride) // stride) + 1 return max(0, (v - 1 - downsample + stride) // stride) + 1
out_shape = [compute_out(patch_shape[i], ds[i], st[i]) for i in xrange(ndim)] out_shape = [compute_out(patch_shape[i], ws[i], stride[i]) for i in xrange(ndim)]
rval = list(imgshape[:-ndim]) + out_shape rval = list(imgshape[:-ndim]) + out_shape
return rval return rval
...@@ -308,7 +377,7 @@ class Pool(OpenMPOp): ...@@ -308,7 +377,7 @@ class Pool(OpenMPOp):
if isinstance(ws, (tuple, list)): if isinstance(ws, (tuple, list)):
if any(pad[i] >= ws[i] for i in range(nd)): if any(pad[i] >= ws[i] for i in range(nd)):
raise NotImplementedError( raise NotImplementedError(
'padding_h and padding_w must be smaller than strides') 'padding must be smaller than strides')
ws = tensor.as_tensor_variable(ws) ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride) stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad) pad = tensor.as_tensor_variable(pad)
...@@ -715,7 +784,8 @@ class PoolGrad(OpenMPOp): ...@@ -715,7 +784,8 @@ class PoolGrad(OpenMPOp):
__props__ = ('ignore_border', 'mode', 'ndim') __props__ = ('ignore_border', 'mode', 'ndim')
@staticmethod @staticmethod
def out_shape(imgshape, ds, ignore_border=False, st=None, padding=None, ndim=2): def out_shape(imgshape, ws=None, ignore_border=False, stride=None, pad=None, ndim=2,
ds=None, st=None, padding=None):
"""Return the shape of the output from this op, for input of given """Return the shape of the output from this op, for input of given
shape and flags. shape and flags.
...@@ -724,21 +794,21 @@ class PoolGrad(OpenMPOp): ...@@ -724,21 +794,21 @@ class PoolGrad(OpenMPOp):
imgshape : tuple of integers or scalar Theano variables imgshape : tuple of integers or scalar Theano variables
the shape of a tensor of images. The last N elements are the shape of a tensor of images. The last N elements are
interpreted as the downsampling dimensions. interpreted as the downsampling dimensions.
ds : tuple of N ints ws : tuple of N ints
downsample factor over rows and columns this parameter downsample factor over rows and columns this parameter
indicates the size of the pooling region indicates the size of the pooling region
ignore_border : bool ignore_border : bool
If ds doesn't divide imgshape, do we include an extra row/col/slice If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True). of partial downsampling (False) or ignore it (True).
st : list or tuple of N ints or None stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If st is None, it is considered equal to ds next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions). (no overlap on pooling regions).
padding : tuple of N ints or None pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if padding is None. right margins. No padding is added if pad is None.
ndim : int ndim : int
The number of pooling dimensions N. The number of pooling dimensions N.
The default is 2. The default is 2.
...@@ -752,15 +822,33 @@ class PoolGrad(OpenMPOp): ...@@ -752,15 +822,33 @@ class PoolGrad(OpenMPOp):
ignore_border flags. ignore_border flags.
""" """
# check for deprecated parameter names
if ds is not None:
warnings.warn(
"PoolGrad ds parameter is deprecated, please use ws",
stacklevel=2)
ws = ds
if st is not None:
warnings.warn(
"PoolGrad st parameter is deprecated, please use stride",
stacklevel=2)
stride = st
if padding is not None:
warnings.warn(
"PoolGrad padding parameter is deprecated, please use pad",
stacklevel=2)
pad = padding
if ws is None:
raise ValueError('PoolGrad ws parameter can not be None')
if len(imgshape) < ndim: if len(imgshape) < ndim:
raise TypeError('imgshape must have at least {} dimensions'.format(ndim)) raise TypeError('imgshape must have at least {} dimensions'.format(ndim))
if st is None: if stride is None:
st = ds stride = ws
if padding is None: if pad is None:
padding = (0,) * ndim pad = (0,) * ndim
patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + padding[i] * 2 patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + pad[i] * 2
for i in xrange(ndim)) for i in xrange(ndim))
def compute_out(v, downsample, stride): def compute_out(v, downsample, stride):
...@@ -781,7 +869,7 @@ class PoolGrad(OpenMPOp): ...@@ -781,7 +869,7 @@ class PoolGrad(OpenMPOp):
else: else:
return max(0, (v - 1 - downsample) // stride + 1) + 1 return max(0, (v - 1 - downsample) // stride + 1) + 1
out_shape = [compute_out(patch_shape[i], ds[i], st[i]) for i in xrange(ndim)] out_shape = [compute_out(patch_shape[i], ws[i], stride[i]) for i in xrange(ndim)]
rval = list(imgshape[:-ndim]) + out_shape rval = list(imgshape[:-ndim]) + out_shape
return rval return rval
...@@ -1506,7 +1594,7 @@ class DownsampleFactorMaxGradGrad(OpenMPOp): ...@@ -1506,7 +1594,7 @@ class DownsampleFactorMaxGradGrad(OpenMPOp):
if isinstance(ws, (tuple, list)): if isinstance(ws, (tuple, list)):
if any(pad[i] >= ws[i] for i in range(nd)): if any(pad[i] >= ws[i] for i in range(nd)):
raise NotImplementedError( raise NotImplementedError(
'padding_h and padding_w must be smaller than strides') 'padding must be smaller than strides')
ws = tensor.as_tensor_variable(ws) ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride) stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad) pad = tensor.as_tensor_variable(pad)
......
...@@ -30,7 +30,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -30,7 +30,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
assert Pool.out_shape((8, 6), (2, 2)) == [4, 3] assert Pool.out_shape((8, 6), (2, 2)) == [4, 3]
@staticmethod @staticmethod
def numpy_max_pool_2d(input, ds, ignore_border=False, mode='max'): def numpy_max_pool_2d(input, ws, ignore_border=False, mode='max'):
'''Helper function, implementing pool_2d in pure numpy''' '''Helper function, implementing pool_2d in pure numpy'''
if len(input.shape) < 2: if len(input.shape) < 2:
raise NotImplementedError('input should have at least 2 dim,' raise NotImplementedError('input should have at least 2 dim,'
...@@ -39,13 +39,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -39,13 +39,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
xi = 0 xi = 0
yi = 0 yi = 0
if not ignore_border: if not ignore_border:
if input.shape[-2] % ds[0]: if input.shape[-2] % ws[0]:
xi += 1 xi += 1
if input.shape[-1] % ds[1]: if input.shape[-1] % ws[1]:
yi += 1 yi += 1
out_shp = list(input.shape[:-2]) out_shp = list(input.shape[:-2])
out_shp.append(input.shape[-2] // ds[0] + xi) out_shp.append(input.shape[-2] // ws[0] + xi)
out_shp.append(input.shape[-1] // ds[1] + yi) out_shp.append(input.shape[-1] // ws[1] + yi)
output_val = numpy.zeros(out_shp) output_val = numpy.zeros(out_shp)
func = numpy.max func = numpy.max
if mode == 'sum': if mode == 'sum':
...@@ -55,29 +55,29 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -55,29 +55,29 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for k in numpy.ndindex(*input.shape[:-2]): for k in numpy.ndindex(*input.shape[:-2]):
for i in range(output_val.shape[-2]): for i in range(output_val.shape[-2]):
ii = i * ds[0] ii = i * ws[0]
for j in range(output_val.shape[-1]): for j in range(output_val.shape[-1]):
jj = j * ds[1] jj = j * ws[1]
patch = input[k][ii:ii + ds[0], jj:jj + ds[1]] patch = input[k][ii:ii + ws[0], jj:jj + ws[1]]
output_val[k][i, j] = func(patch) output_val[k][i, j] = func(patch)
return output_val return output_val
@staticmethod @staticmethod
def numpy_max_pool_nd(input, ds, ignore_border=False, mode='max'): def numpy_max_pool_nd(input, ws, ignore_border=False, mode='max'):
'''Helper function, implementing pool_nd in pure numpy''' '''Helper function, implementing pool_nd in pure numpy'''
if len(input.shape) < len(ds): if len(input.shape) < len(ws):
raise NotImplementedError('input should have at least %s dim,' raise NotImplementedError('input should have at least %s dim,'
' shape is %s' ' shape is %s'
% (str(ds), str(input.shape))) % (str(ws), str(input.shape)))
nd = len(ds) nd = len(ws)
si = [0] * nd si = [0] * nd
if not ignore_border: if not ignore_border:
for i in range(nd): for i in range(nd):
if input.shape[-nd + i] % ds[i]: if input.shape[-nd + i] % ws[i]:
si[i] += 1 si[i] += 1
out_shp = list(input.shape[:-nd]) out_shp = list(input.shape[:-nd])
for i in range(nd): for i in range(nd):
out_shp.append(input.shape[-nd + i] // ds[i] + si[i]) out_shp.append(input.shape[-nd + i] // ws[i] + si[i])
output_val = numpy.zeros(out_shp) output_val = numpy.zeros(out_shp)
func = numpy.max func = numpy.max
if mode == 'sum': if mode == 'sum':
...@@ -87,21 +87,21 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -87,21 +87,21 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for l in numpy.ndindex(*input.shape[:-nd]): for l in numpy.ndindex(*input.shape[:-nd]):
for r in numpy.ndindex(*output_val.shape[-nd:]): for r in numpy.ndindex(*output_val.shape[-nd:]):
patch = input[l][tuple(slice(r[i] * ds[i], (r[i] + 1) * ds[i]) patch = input[l][tuple(slice(r[i] * ws[i], (r[i] + 1) * ws[i])
for i in range(nd))] for i in range(nd))]
output_val[l][r] = func(patch) output_val[l][r] = func(patch)
return output_val return output_val
@staticmethod @staticmethod
def numpy_max_pool_2d_stride_padding( def numpy_max_pool_2d_stride_pad(
x, ds, ignore_border=True, st=None, padding=(0, 0), mode='max'): x, ws, ignore_border=True, stride=None, pad=(0, 0), mode='max'):
assert ignore_border assert ignore_border
pad_h = padding[0] pad_h = pad[0]
pad_w = padding[1] pad_w = pad[1]
h = x.shape[-2] h = x.shape[-2]
w = x.shape[-1] w = x.shape[-1]
assert ds[0] > pad_h assert ws[0] > pad_h
assert ds[1] > pad_w assert ws[1] > pad_w
def pad_img(x): def pad_img(x):
y = numpy.zeros( y = numpy.zeros(
...@@ -113,13 +113,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -113,13 +113,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
return y return y
img_rows = h + 2 * pad_h img_rows = h + 2 * pad_h
img_cols = w + 2 * pad_w img_cols = w + 2 * pad_w
out_r = (img_rows - ds[0]) // st[0] + 1 out_r = (img_rows - ws[0]) // stride[0] + 1
out_c = (img_cols - ds[1]) // st[1] + 1 out_c = (img_cols - ws[1]) // stride[1] + 1
out_shp = list(x.shape[:-2]) out_shp = list(x.shape[:-2])
out_shp.append(out_r) out_shp.append(out_r)
out_shp.append(out_c) out_shp.append(out_c)
ds0, ds1 = ds ws0, ws1 = ws
st0, st1 = st stride0, stride1 = stride
output_val = numpy.zeros(out_shp) output_val = numpy.zeros(out_shp)
y = pad_img(x) y = pad_img(x)
func = numpy.max func = numpy.max
...@@ -131,42 +131,42 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -131,42 +131,42 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for k in numpy.ndindex(*x.shape[:-2]): for k in numpy.ndindex(*x.shape[:-2]):
for i in range(output_val.shape[-2]): for i in range(output_val.shape[-2]):
ii_st = i * st[0] ii_stride = i * stride[0]
ii_end = builtins.min(ii_st + ds[0], img_rows) ii_end = builtins.min(ii_stride + ws[0], img_rows)
if not inc_pad: if not inc_pad:
ii_st = builtins.max(ii_st, pad_h) ii_stride = builtins.max(ii_stride, pad_h)
ii_end = builtins.min(ii_end, h + pad_h) ii_end = builtins.min(ii_end, h + pad_h)
for j in range(output_val.shape[-1]): for j in range(output_val.shape[-1]):
jj_st = j * st[1] jj_stride = j * stride[1]
jj_end = builtins.min(jj_st + ds[1], img_cols) jj_end = builtins.min(jj_stride + ws[1], img_cols)
if not inc_pad: if not inc_pad:
jj_st = builtins.max(jj_st, pad_w) jj_stride = builtins.max(jj_stride, pad_w)
jj_end = builtins.min(jj_end, w + pad_w) jj_end = builtins.min(jj_end, w + pad_w)
patch = y[k][ii_st:ii_end, jj_st:jj_end] patch = y[k][ii_stride:ii_end, jj_stride:jj_end]
output_val[k][i, j] = func(patch) output_val[k][i, j] = func(patch)
return output_val return output_val
@staticmethod @staticmethod
def numpy_max_pool_nd_stride_padding( def numpy_max_pool_nd_stride_pad(
input, ds, ignore_border=True, st=None, padding=None, mode='max'): input, ws, ignore_border=True, stride=None, pad=None, mode='max'):
assert ignore_border assert ignore_border
nd = len(ds) nd = len(ws)
if padding is None: if pad is None:
padding = (0,) * nd pad = (0,) * nd
if st is None: if stride is None:
st = (0,) * nd stride = (0,) * nd
assert len(padding) == len(ds) == len(st) assert len(pad) == len(ws) == len(stride)
assert all(ds[i] > padding[i] for i in range(nd)) assert all(ws[i] > pad[i] for i in range(nd))
def pad_img(x): def pad_img(x):
# initialize padded input # initialize padded input
y = numpy.zeros( y = numpy.zeros(
x.shape[0:-nd] + x.shape[0:-nd] +
tuple(x.shape[-nd + i] + padding[i] * 2 for i in range(nd)), tuple(x.shape[-nd + i] + pad[i] * 2 for i in range(nd)),
dtype=x.dtype) dtype=x.dtype)
# place the unpadded input in the center # place the unpadded input in the center
block = ((slice(None),) * (len(x.shape) - nd) + block = ((slice(None),) * (len(x.shape) - nd) +
tuple(slice(padding[i], x.shape[-nd + i] + padding[i]) tuple(slice(pad[i], x.shape[-nd + i] + pad[i])
for i in range(nd))) for i in range(nd)))
y[block] = x y[block] = x
return y return y
...@@ -174,9 +174,9 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -174,9 +174,9 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
pad_img_shp = list(input.shape[:-nd]) pad_img_shp = list(input.shape[:-nd])
out_shp = list(input.shape[:-nd]) out_shp = list(input.shape[:-nd])
for i in range(nd): for i in range(nd):
padded_size = input.shape[-nd + i] + 2 * padding[i] padded_size = input.shape[-nd + i] + 2 * pad[i]
pad_img_shp.append(padded_size) pad_img_shp.append(padded_size)
out_shp.append((padded_size - ds[i]) // st[i] + 1) out_shp.append((padded_size - ws[i]) // stride[i] + 1)
output_val = numpy.zeros(out_shp) output_val = numpy.zeros(out_shp)
padded_input = pad_img(input) padded_input = pad_img(input)
func = numpy.max func = numpy.max
...@@ -190,51 +190,51 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -190,51 +190,51 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for r in numpy.ndindex(*output_val.shape[-nd:]): for r in numpy.ndindex(*output_val.shape[-nd:]):
region = [] region = []
for i in range(nd): for i in range(nd):
r_st = r[i] * st[i] r_stride = r[i] * stride[i]
r_end = builtins.min(r_st + ds[i], pad_img_shp[-nd + i]) r_end = builtins.min(r_stride + ws[i], pad_img_shp[-nd + i])
if not inc_pad: if not inc_pad:
r_st = builtins.max(r_st, padding[i]) r_stride = builtins.max(r_stride, pad[i])
r_end = builtins.min(r_end, input.shape[-nd + i] + padding[i]) r_end = builtins.min(r_end, input.shape[-nd + i] + pad[i])
region.append(slice(r_st, r_end)) region.append(slice(r_stride, r_end))
patch = padded_input[l][region] patch = padded_input[l][region]
output_val[l][r] = func(patch) output_val[l][r] = func(patch)
return output_val return output_val
@staticmethod @staticmethod
def numpy_max_pool_2d_stride(input, ds, ignore_border=False, st=None, def numpy_max_pool_2d_stride(input, ws, ignore_border=False, stride=None,
mode='max'): mode='max'):
'''Helper function, implementing pool_2d in pure numpy '''Helper function, implementing pool_2d in pure numpy
this function provides st input to indicate the stide size this function provides stride input to indicate the stide size
for the pooling regions. if not indicated, st == sd.''' for the pooling regions. if not indicated, stride == ws.'''
if len(input.shape) < 2: if len(input.shape) < 2:
raise NotImplementedError('input should have at least 2 dim,' raise NotImplementedError('input should have at least 2 dim,'
' shape is %s' ' shape is %s'
% str(input.shape)) % str(input.shape))
if st is None: if stride is None:
st = ds stride = ws
img_rows = input.shape[-2] img_rows = input.shape[-2]
img_cols = input.shape[-1] img_cols = input.shape[-1]
out_r = 0 out_r = 0
out_c = 0 out_c = 0
if img_rows - ds[0] >= 0: if img_rows - ws[0] >= 0:
out_r = (img_rows - ds[0]) // st[0] + 1 out_r = (img_rows - ws[0]) // stride[0] + 1
if img_cols - ds[1] >= 0: if img_cols - ws[1] >= 0:
out_c = (img_cols - ds[1]) // st[1] + 1 out_c = (img_cols - ws[1]) // stride[1] + 1
if not ignore_border: if not ignore_border:
if out_r > 0: if out_r > 0:
if img_rows - ((out_r - 1) * st[0] + ds[0]) > 0: if img_rows - ((out_r - 1) * stride[0] + ws[0]) > 0:
rr = img_rows - out_r * st[0] rr = img_rows - out_r * stride[0]
if rr > 0: if rr > 0:
out_r += 1 out_r += 1
else: else:
if img_rows > 0: if img_rows > 0:
out_r += 1 out_r += 1
if out_c > 0: if out_c > 0:
if img_cols - ((out_c - 1) * st[1] + ds[1]) > 0: if img_cols - ((out_c - 1) * stride[1] + ws[1]) > 0:
cr = img_cols - out_c * st[1] cr = img_cols - out_c * stride[1]
if cr > 0: if cr > 0:
out_c += 1 out_c += 1
else: else:
...@@ -254,35 +254,35 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -254,35 +254,35 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
output_val = numpy.zeros(out_shp) output_val = numpy.zeros(out_shp)
for k in numpy.ndindex(*input.shape[:-2]): for k in numpy.ndindex(*input.shape[:-2]):
for i in range(output_val.shape[-2]): for i in range(output_val.shape[-2]):
ii_st = i * st[0] ii_stride = i * stride[0]
ii_end = builtins.min(ii_st + ds[0], img_rows) ii_end = builtins.min(ii_stride + ws[0], img_rows)
for j in range(output_val.shape[-1]): for j in range(output_val.shape[-1]):
jj_st = j * st[1] jj_stride = j * stride[1]
jj_end = builtins.min(jj_st + ds[1], img_cols) jj_end = builtins.min(jj_stride + ws[1], img_cols)
patch = input[k][ii_st:ii_end, jj_st:jj_end] patch = input[k][ii_stride:ii_end, jj_stride:jj_end]
output_val[k][i, j] = func(patch) output_val[k][i, j] = func(patch)
return output_val return output_val
@staticmethod @staticmethod
def numpy_max_pool_nd_stride(input, ds, ignore_border=False, st=None, def numpy_max_pool_nd_stride(input, ws, ignore_border=False, stride=None,
mode='max'): mode='max'):
'''Helper function, implementing pooling in pure numpy '''Helper function, implementing pooling in pure numpy
this function provides st input to indicate the stide size this function provides stride input to indicate the stide size
for the pooling regions. if not indicated, st == sd.''' for the pooling regions. if not indicated, stride == ws.'''
nd = len(ds) nd = len(ws)
if st is None: if stride is None:
st = ds stride = ws
assert len(st) == len(ds) assert len(stride) == len(ws)
out_shp = list(input.shape[:-nd]) out_shp = list(input.shape[:-nd])
for i in range(nd): for i in range(nd):
out = 0 out = 0
if input.shape[-nd + i] - ds[i] >= 0: if input.shape[-nd + i] - ws[i] >= 0:
out = (input.shape[-nd + i] - ds[i]) // st[i] + 1 out = (input.shape[-nd + i] - ws[i]) // stride[i] + 1
if not ignore_border: if not ignore_border:
if out > 0: if out > 0:
if input.shape[-nd + i] - ((out - 1) * st[i] + ds[i]) > 0: if input.shape[-nd + i] - ((out - 1) * stride[i] + ws[i]) > 0:
if input.shape[-nd + i] - out * st[i] > 0: if input.shape[-nd + i] - out * stride[i] > 0:
out += 1 out += 1
else: else:
if input.shape[-nd + i] > 0: if input.shape[-nd + i] > 0:
...@@ -300,9 +300,9 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -300,9 +300,9 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for r in numpy.ndindex(*output_val.shape[-nd:]): for r in numpy.ndindex(*output_val.shape[-nd:]):
region = [] region = []
for i in range(nd): for i in range(nd):
r_st = r[i] * st[i] r_stride = r[i] * stride[i]
r_end = builtins.min(r_st + ds[i], input.shape[-nd + i]) r_end = builtins.min(r_stride + ws[i], input.shape[-nd + i])
region.append(slice(r_st, r_end)) region.append(slice(r_stride, r_end))
patch = input[l][region] patch = input[l][region]
output_val[l][r] = func(patch) output_val[l][r] = func(patch)
return output_val return output_val
...@@ -469,7 +469,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -469,7 +469,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
def test_DownsampleFactorMaxPaddingStride(self): def test_DownsampleFactorMaxPaddingStride(self):
ignore_border = True # padding does not support ignore_border=False ignore_border = True # padding does not support ignore_border=False
rng = numpy.random.RandomState(utt.fetch_seed()) rng = numpy.random.RandomState(utt.fetch_seed())
# maxpool, stride, padding, input sizes # maxpool, stride, pad, input sizes
examples = ( examples = (
((3,), (2,), (2,), (5,)), ((3,), (2,), (2,), (5,)),
((3,), (2,), (2,), (4, 5)), ((3,), (2,), (2,), (4, 5)),
...@@ -486,25 +486,25 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -486,25 +486,25 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for example, mode in product(examples, for example, mode in product(examples,
['max', 'sum', 'average_inc_pad', ['max', 'sum', 'average_inc_pad',
'average_exc_pad']): 'average_exc_pad']):
(maxpoolshp, stridesize, paddingsize, inputsize) = example (maxpoolshp, stridesize, padsize, inputsize) = example
imval = rng.rand(*inputsize) - 0.5 imval = rng.rand(*inputsize) - 0.5
images = theano.shared(imval) images = theano.shared(imval)
numpy_output_val = self.numpy_max_pool_nd_stride_padding( numpy_output_val = self.numpy_max_pool_nd_stride_pad(
imval, maxpoolshp, ignore_border, imval, maxpoolshp, ignore_border,
stridesize, paddingsize, mode) stridesize, padsize, mode)
maxpool_op = Pool( maxpool_op = Pool(
ndim=len(maxpoolshp), ndim=len(maxpoolshp),
ignore_border=ignore_border, ignore_border=ignore_border,
mode=mode mode=mode
)(images, maxpoolshp, stridesize, paddingsize) )(images, maxpoolshp, stridesize, padsize)
f = function([], maxpool_op) f = function([], maxpool_op)
output_val = f() output_val = f()
utt.assert_allclose(output_val, numpy_output_val) utt.assert_allclose(output_val, numpy_output_val)
def test_DownsampleFactorMaxPaddingStride_grad(self): def test_DownsampleFactorMaxPaddingStride_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed()) rng = numpy.random.RandomState(utt.fetch_seed())
# maxpool, stride, padding, input sizes # maxpool, stride, pad, input sizes
examples = ( examples = (
((10,), (5,), (3,), (2,)), ((10,), (5,), (3,), (2,)),
((10,), (5,), (3,), (2, 2)), ((10,), (5,), (3,), (2, 2)),
...@@ -518,7 +518,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -518,7 +518,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
# support grad with padding # support grad with padding
for mode in ['max', 'sum']: for mode in ['max', 'sum']:
for example in examples: for example in examples:
(maxpoolshp, stridesize, paddingsize, inputsize) = example (maxpoolshp, stridesize, padsize, inputsize) = example
imval = rng.rand(*inputsize) * 10.0 imval = rng.rand(*inputsize) * 10.0
def mp(input): def mp(input):
...@@ -526,7 +526,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -526,7 +526,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
ndim=len(maxpoolshp), ndim=len(maxpoolshp),
ignore_border=True, ignore_border=True,
mode=mode, mode=mode,
)(input, maxpoolshp, stridesize, paddingsize) )(input, maxpoolshp, stridesize, padsize)
utt.verify_grad(mp, [imval], rng=rng) utt.verify_grad(mp, [imval], rng=rng)
def test_DownsampleFactorMax_grad(self): def test_DownsampleFactorMax_grad(self):
...@@ -562,7 +562,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -562,7 +562,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
utt.verify_grad(mp, [imval], rng=rng) utt.verify_grad(mp, [imval], rng=rng)
# pool, stride, input sizes # pool, stride, input sizes
pool_grad_st_examples = ( pool_grad_stride_examples = (
((1,), (1,), (16,)), ((1,), (1,), (16,)),
((1,), (3,), (1, 16)), ((1,), (3,), (1, 16)),
((1,), (5,), (1, 2, 16)), ((1,), (5,), (1, 2, 16)),
...@@ -590,14 +590,14 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -590,14 +590,14 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
((9, 9), (1, 1), (1, 2, 8, 5)), ((9, 9), (1, 1), (1, 2, 8, 5)),
) )
@parameterized.expand(product(pool_grad_st_examples, @parameterized.expand(product(pool_grad_stride_examples,
[True, False], [True, False],
['max', ['max',
'sum', 'sum',
'average_inc_pad', 'average_inc_pad',
'average_exc_pad']), 'average_exc_pad']),
testcase_func_name=utt.custom_name_func) testcase_func_name=utt.custom_name_func)
def test_DownsampleFactorMax_grad_st(self, example, ignore_border, mode): def test_DownsampleFactorMax_grad_stride(self, example, ignore_border, mode):
# checks the gradient for the case that stride is used # checks the gradient for the case that stride is used
rng = numpy.random.RandomState(utt.fetch_seed()) rng = numpy.random.RandomState(utt.fetch_seed())
...@@ -685,10 +685,10 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -685,10 +685,10 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
utt.verify_grad(mp, [imval, grad_val], rng=rng) utt.verify_grad(mp, [imval, grad_val], rng=rng)
@parameterized.expand(product(pool_grad_st_examples, @parameterized.expand(product(pool_grad_stride_examples,
[True, False]), [True, False]),
testcase_func_name=utt.custom_name_func) testcase_func_name=utt.custom_name_func)
def test_DownsampleFactorMaxGrad_grad_st(self, example, ignore_border): def test_DownsampleFactorMaxGrad_grad_stride(self, example, ignore_border):
# checks the gradient of the gradient for # checks the gradient of the gradient for
# the case that stride is used # the case that stride is used
rng = numpy.random.RandomState(utt.fetch_seed()) rng = numpy.random.RandomState(utt.fetch_seed())
...@@ -696,7 +696,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -696,7 +696,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
imval = rng.rand(*inputsize) imval = rng.rand(*inputsize)
grad_shape = Pool.out_shape( grad_shape = Pool.out_shape(
imval.shape, maxpoolshp, ndim=len(maxpoolshp), imval.shape, maxpoolshp, ndim=len(maxpoolshp),
ignore_border=ignore_border, st=stride) ignore_border=ignore_border, stride=stride)
# skip the grad verification when the output is empty # skip the grad verification when the output is empty
if numpy.prod(grad_shape) != 0: if numpy.prod(grad_shape) != 0:
...@@ -713,13 +713,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -713,13 +713,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
utt.verify_grad(mp, [imval, grad_val], rng=rng) utt.verify_grad(mp, [imval, grad_val], rng=rng)
@parameterized.expand(product(pool_grad_st_examples, @parameterized.expand(product(pool_grad_stride_examples,
[True, False], [True, False],
['sum', ['sum',
'average_inc_pad', 'average_inc_pad',
'average_exc_pad']), 'average_exc_pad']),
testcase_func_name=utt.custom_name_func) testcase_func_name=utt.custom_name_func)
def test_AveragePoolGrad_grad_st(self, example, ignore_border, mode): def test_AveragePoolGrad_grad_stride(self, example, ignore_border, mode):
# checks the gradient of the gradient for # checks the gradient of the gradient for
# the case that stride is used # the case that stride is used
rng = numpy.random.RandomState(utt.fetch_seed()) rng = numpy.random.RandomState(utt.fetch_seed())
...@@ -728,7 +728,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -728,7 +728,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
grad_shape = Pool.out_shape( grad_shape = Pool.out_shape(
imval.shape, avgpoolshp, imval.shape, avgpoolshp,
ndim=len(avgpoolshp), ndim=len(avgpoolshp),
ignore_border=ignore_border, st=stride) ignore_border=ignore_border, stride=stride)
# skip the grad verification when the output is empty # skip the grad verification when the output is empty
if numpy.prod(grad_shape) != 0: if numpy.prod(grad_shape) != 0:
...@@ -745,7 +745,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -745,7 +745,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
def test_DownsampleFactorMaxPaddingStride_grad_grad(self): def test_DownsampleFactorMaxPaddingStride_grad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed()) rng = numpy.random.RandomState(utt.fetch_seed())
# maxpool, stride, padding, input sizes # maxpool, stride, pad, input sizes
examples = ( examples = (
((3,), (2,), (2,), (10,)), ((3,), (2,), (2,), (10,)),
((3,), (2,), (2,), (2, 10,)), ((3,), (2,), (2,), (2, 10,)),
...@@ -759,30 +759,30 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -759,30 +759,30 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
((3, 3, 5), (2, 2, 3), (2, 2, 1), (1, 1, 5, 5, 10)), ((3, 3, 5), (2, 2, 3), (2, 2, 1), (1, 1, 5, 5, 10)),
) )
for (maxpoolshp, stridesize, paddingsize, inputsize) in examples: for (maxpoolshp, stridesize, padsize, inputsize) in examples:
imval = rng.rand(*inputsize) * 10.0 imval = rng.rand(*inputsize) * 10.0
grad_shape = Pool.out_shape(imval.shape, grad_shape = Pool.out_shape(imval.shape,
maxpoolshp, maxpoolshp,
ndim=len(maxpoolshp), ndim=len(maxpoolshp),
st=stridesize, stride=stridesize,
ignore_border=True, ignore_border=True,
padding=paddingsize) pad=padsize)
grad_val = rng.rand(*grad_shape) * 10.0 grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad): def mp(input, grad):
out = Pool( out = Pool(
ndim=len(maxpoolshp), ndim=len(maxpoolshp),
ignore_border=True, ignore_border=True,
)(input, maxpoolshp, stridesize, paddingsize) )(input, maxpoolshp, stridesize, padsize)
grad_op = MaxPoolGrad(ndim=len(maxpoolshp), grad_op = MaxPoolGrad(ndim=len(maxpoolshp),
ignore_border=True) ignore_border=True)
return grad_op(input, out, grad, maxpoolshp, stridesize, paddingsize) return grad_op(input, out, grad, maxpoolshp, stridesize, padsize)
utt.verify_grad(mp, [imval, grad_val], rng=rng) utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_AveragePoolPaddingStride_grad_grad(self): def test_AveragePoolPaddingStride_grad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed()) rng = numpy.random.RandomState(utt.fetch_seed())
# avgpool, stride, padding, input sizes # avgpool, stride, pad, input sizes
examples = ( examples = (
((3,), (2,), (2,), (10,)), ((3,), (2,), (2,), (10,)),
((3,), (2,), (2,), (2, 10,)), ((3,), (2,), (2,), (2, 10,)),
...@@ -796,7 +796,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -796,7 +796,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
((3, 3, 5), (2, 2, 3), (2, 2, 1), (1, 1, 5, 5, 10)), ((3, 3, 5), (2, 2, 3), (2, 2, 1), (1, 1, 5, 5, 10)),
) )
for (avgpoolshp, stridesize, paddingsize, inputsize) in examples: for (avgpoolshp, stridesize, padsize, inputsize) in examples:
imval = rng.rand(*inputsize) * 10.0 imval = rng.rand(*inputsize) * 10.0
# 'average_exc_pad' with non-zero padding is not implemented # 'average_exc_pad' with non-zero padding is not implemented
...@@ -804,16 +804,16 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -804,16 +804,16 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
grad_shape = Pool.out_shape(imval.shape, grad_shape = Pool.out_shape(imval.shape,
avgpoolshp, avgpoolshp,
ndim=len(avgpoolshp), ndim=len(avgpoolshp),
st=stridesize, stride=stridesize,
ignore_border=True, ignore_border=True,
padding=paddingsize) pad=padsize)
grad_val = rng.rand(*grad_shape) * 10.0 grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad): def mp(input, grad):
grad_op = AveragePoolGrad(ndim=len(avgpoolshp), grad_op = AveragePoolGrad(ndim=len(avgpoolshp),
ignore_border=True, ignore_border=True,
mode=mode) mode=mode)
return grad_op(input, grad, avgpoolshp, stridesize, paddingsize) return grad_op(input, grad, avgpoolshp, stridesize, padsize)
utt.verify_grad(mp, [imval, grad_val], rng=rng) utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_DownsampleFactorMax_hessian(self): def test_DownsampleFactorMax_hessian(self):
...@@ -822,7 +822,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -822,7 +822,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
x_vec = tensor.vector('x') x_vec = tensor.vector('x')
z = tensor.dot(x_vec.dimshuffle(0, 'x'), z = tensor.dot(x_vec.dimshuffle(0, 'x'),
x_vec.dimshuffle('x', 0)) x_vec.dimshuffle('x', 0))
y = pool_2d(input=z, ds=(2, 2), ignore_border=True) y = pool_2d(input=z, ws=(2, 2), ignore_border=True)
C = tensor.exp(tensor.sum(y)) C = tensor.exp(tensor.sum(y))
grad_hess = tensor.hessian(cost=C, wrt=x_vec) grad_hess = tensor.hessian(cost=C, wrt=x_vec)
...@@ -835,7 +835,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -835,7 +835,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
def test_DownsampleFactorMaxGradGrad_grad(self): def test_DownsampleFactorMaxGradGrad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed()) rng = numpy.random.RandomState(utt.fetch_seed())
# maxpool, stride, padding, input sizes # maxpool, stride, pad, input sizes
examples = ( examples = (
((3,), (2,), (2,), (10,)), ((3,), (2,), (2,), (10,)),
((3,), (2,), (2,), (2, 10,)), ((3,), (2,), (2,), (2, 10,)),
...@@ -849,17 +849,17 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -849,17 +849,17 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
((3, 3, 5), (2, 2, 3), (2, 2, 1), (1, 1, 5, 5, 10)), ((3, 3, 5), (2, 2, 3), (2, 2, 1), (1, 1, 5, 5, 10)),
) )
for (maxpoolshp, stridesize, paddingsize, inputsize) in examples: for (maxpoolshp, stridesize, padsize, inputsize) in examples:
imval1 = rng.rand(*inputsize) * 10.0 imval1 = rng.rand(*inputsize) * 10.0
imval2 = rng.rand(*inputsize) * 10.0 imval2 = rng.rand(*inputsize) * 10.0
def mp(input1, input2): def mp(input1, input2):
op1 = Pool(ndim=len(maxpoolshp), ignore_border=True) op1 = Pool(ndim=len(maxpoolshp), ignore_border=True)
pooled_out = op1(input1, maxpoolshp, stridesize, paddingsize) pooled_out = op1(input1, maxpoolshp, stridesize, padsize)
op2 = DownsampleFactorMaxGradGrad( op2 = DownsampleFactorMaxGradGrad(
ndim=len(maxpoolshp), ndim=len(maxpoolshp),
ignore_border=True) ignore_border=True)
out = op2(input1, pooled_out, input2, maxpoolshp, stridesize, paddingsize) out = op2(input1, pooled_out, input2, maxpoolshp, stridesize, padsize)
return out return out
utt.verify_grad(mp, [imval1, imval2], rng=rng) utt.verify_grad(mp, [imval1, imval2], rng=rng)
...@@ -1015,13 +1015,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -1015,13 +1015,13 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for i, maxpoolshp in enumerate(maxpoolshps): for i, maxpoolshp in enumerate(maxpoolshps):
for j, ignore_border in enumerate([True, False]): for j, ignore_border in enumerate([True, False]):
for k, padding in enumerate([(0, 0), (1, 1), (1, 2)]): for k, pad in enumerate([(0, 0), (1, 1), (1, 2)]):
if out_shapes[k][i][j] is None: if out_shapes[k][i][j] is None:
continue continue
# checking shapes generated by Pool # checking shapes generated by Pool
self._compile_and_check([image], self._compile_and_check([image],
[Pool(ignore_border=ignore_border) [Pool(ignore_border=ignore_border)
(image, maxpoolshp, pad=padding)], (image, maxpoolshp, pad=pad)],
[image_val], Pool) [image_val], Pool)
# checking shapes generated by MaxPoolGrad # checking shapes generated by MaxPoolGrad
...@@ -1031,7 +1031,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -1031,7 +1031,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
[MaxPoolGrad( [MaxPoolGrad(
ignore_border=ignore_border) ignore_border=ignore_border)
(image, maxout, gz, maxpoolshp, (image, maxout, gz, maxpoolshp,
pad=padding)], pad=pad)],
[image_val, maxout_val, gz_val], [image_val, maxout_val, gz_val],
MaxPoolGrad, MaxPoolGrad,
warn=False) warn=False)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论