提交 8d8f527f authored 作者: vdumoulin's avatar vdumoulin

Merge pull request #3621 from shabanian/tensor_signal_pep8

pep8 tensor signal list directory
......@@ -3,8 +3,6 @@ Contains a wrapper function for tensor.nnet.ConvOp, which can be used to perform
generic 2D convolution.
"""
__docformat__ = "restructuredtext en"
import warnings
import theano
......@@ -12,6 +10,10 @@ import theano.tensor as tensor
from theano.tensor.nnet import conv
import logging
__docformat__ = "restructuredtext en"
_logger = logging.getLogger("theano.tensor.signal.conv")
......@@ -52,7 +54,7 @@ def conv2d(input, filters, image_shape=None, filter_shape=None,
assert input.ndim in (2, 3)
assert filters.ndim in (2, 3)
### use shape information if it is given to us ###
# use shape information if it is given to us ###
if filter_shape and image_shape:
if input.ndim == 3:
bsize = image_shape[0]
......@@ -69,7 +71,7 @@ def conv2d(input, filters, image_shape=None, filter_shape=None,
nkern, kshp = None, None
bsize, imshp = None, None
### reshape tensors to 4D, for compatibility with ConvOp ###
# reshape tensors to 4D, for compatibility with ConvOp ###
if input.ndim == 3:
sym_bsize = input.shape[0]
else:
......@@ -86,10 +88,10 @@ def conv2d(input, filters, image_shape=None, filter_shape=None,
new_filter_shape = tensor.join(0, tensor.stack([sym_nkern, 1]), filters.shape[-2:])
filters4D = tensor.reshape(filters, new_filter_shape, ndim=4)
### perform actual convolution ###
# perform actual convolution ###
op = conv.ConvOp(output_mode=border_mode,
dx=subsample[0], dy=subsample[1],
imshp=imshp, kshp=kshp, nkern=nkern, bsize=bsize, **kargs)
dx=subsample[0], dy=subsample[1],
imshp=imshp, kshp=kshp, nkern=nkern, bsize=bsize, **kargs)
output = op(input4D, filters4D)
......
"""
"""
Ops for downsampling images.
Planned:
......@@ -18,6 +18,7 @@ from theano import gof, Op, tensor, Variable, Apply
from theano.tensor.opt import register_canonicalize
def max_pool2D(*args, **kwargs):
import sys
print("DEPRECATION: max_pool2D renamed to max_pool_2d", file=sys.stderr)
......@@ -206,8 +207,8 @@ class DownsampleFactorMax(Op):
if isinstance(r, theano.Variable):
nr = tensor.switch(tensor.ge(st[0], ds[0]),
(r - 1) // st[0] + 1,
tensor.maximum(0, (r - 1 - ds[0])
// st[0] + 1) + 1)
tensor.maximum(0, (r - 1 - ds[0]) //
st[0] + 1) + 1)
elif st[0] >= ds[0]:
nr = (r - 1) // st[0] + 1
else:
......@@ -216,8 +217,8 @@ class DownsampleFactorMax(Op):
if isinstance(c, theano.Variable):
nc = tensor.switch(tensor.ge(st[1], ds[1]),
(c - 1) // st[1] + 1,
tensor.maximum(0, (c - 1 - ds[1])
// st[1] + 1) + 1)
tensor.maximum(0, (c - 1 - ds[1]) //
st[1] + 1) + 1)
elif st[1] >= ds[1]:
nc = (c - 1) // st[1] + 1
else:
......@@ -289,7 +290,7 @@ class DownsampleFactorMax(Op):
y = numpy.zeros(
(x.shape[0], x.shape[1], img_rows, img_cols),
dtype=x.dtype)
y[:, :, pad_h:(img_rows-pad_h), pad_w:(img_cols-pad_w)] = x
y[:, :, pad_h:(img_rows - pad_h), pad_w:(img_cols - pad_w)] = x
else:
y = x
func = numpy.max
......@@ -312,7 +313,7 @@ class DownsampleFactorMax(Op):
if not inc_pad:
col_st = builtins.max(col_st, self.padding[1])
col_end = builtins.min(col_end,
x.shape[-1] + pad_w)
x.shape[-1] + pad_w)
zz[n, k, r, c] = func(y[
n, k, row_st:row_end, col_st:col_end])
......@@ -336,6 +337,7 @@ class DownsampleFactorMax(Op):
st=self.st, padding=self.padding,
mode=self.mode)(
x, gz)]
def c_headers(self):
return ['<algorithm>']
......@@ -522,6 +524,7 @@ class DownsampleFactorMax(Op):
def c_code_cache_version(self):
return (0, 6, 8, 3)
class PoolGrad(Op):
__props__ = ('ds', 'ignore_border', 'st', 'padding', 'mode')
......@@ -582,8 +585,8 @@ class PoolGrad(Op):
if isinstance(r, theano.Variable):
nr = tensor.switch(tensor.ge(st[0], ds[0]),
(r - 1) // st[0] + 1,
tensor.maximum(0, (r - 1 - ds[0])
// st[0] + 1) + 1)
tensor.maximum(0, (r - 1 - ds[0]) //
st[0] + 1) + 1)
elif st[0] >= ds[0]:
nr = (r - 1) // st[0] + 1
else:
......@@ -592,8 +595,8 @@ class PoolGrad(Op):
if isinstance(c, theano.Variable):
nc = tensor.switch(tensor.ge(st[1], ds[1]),
(c - 1) // st[1] + 1,
tensor.maximum(0, (c - 1 - ds[1])
// st[1] + 1) + 1)
tensor.maximum(0, (c - 1 - ds[1]) //
st[1] + 1) + 1)
elif st[1] >= ds[1]:
nc = (c - 1) // st[1] + 1
else:
......@@ -656,7 +659,7 @@ class MaxPoolGrad(PoolGrad):
y = numpy.zeros(
(x.shape[0], x.shape[1], img_rows, img_cols),
dtype=x.dtype)
y[:, :, pad_h:(img_rows-pad_h), pad_w:(img_cols-pad_w)] = x
y[:, :, pad_h:(img_rows - pad_h), pad_w:(img_cols - pad_w)] = x
else:
y = x
gx = numpy.zeros_like(y)
......@@ -673,7 +676,7 @@ class MaxPoolGrad(PoolGrad):
if (maxout[n, k, r, c] == y[n, k, row_ind, col_ind]):
gx[n, k, row_ind, col_ind] += gz[n, k, r, c]
# unpad the image
gx = gx[:, :, pad_h:(img_rows-pad_h), pad_w:(img_cols-pad_w)]
gx = gx[:, :, pad_h:(img_rows - pad_h), pad_w:(img_cols - pad_w)]
gx_stg[0] = gx
def grad(self, inp, grads):
......@@ -804,6 +807,7 @@ class MaxPoolGrad(PoolGrad):
DownsampleFactorMaxGrad = MaxPoolGrad
class AveragePoolGrad(PoolGrad):
def __init__(self, ds, ignore_border, st=None, padding=(0, 0), mode='average_inc_pad'):
......@@ -848,7 +852,7 @@ class AveragePoolGrad(PoolGrad):
y = numpy.zeros(
(x.shape[0], x.shape[1], img_rows, img_cols),
dtype=x.dtype)
y[:, :, pad_h:(img_rows-pad_h), pad_w:(img_cols-pad_w)] = x
y[:, :, pad_h:(img_rows - pad_h), pad_w:(img_cols - pad_w)] = x
else:
y = x
gx = numpy.zeros_like(y)
......@@ -865,16 +869,16 @@ class AveragePoolGrad(PoolGrad):
col_st = c * st1
else:
col_st = builtins.max(c * st1,
self.padding[1])
self.padding[1])
col_end = builtins.min(col_st + ds1, img_cols)
if sum_mode:
val = gz[n, k, r, c]
val = gz[n, k, r, c]
else:
val = gz[n, k, r, c] / ((row_end - row_st) *
(col_end - col_st))
val = gz[n, k, r, c] / ((row_end - row_st) *
(col_end - col_st))
gx[n, k, row_st:row_end, col_st:col_end] += val
# unpad the image
gx = gx[:, :, pad_h:(img_rows-pad_h), pad_w:(img_cols-pad_w)]
gx = gx[:, :, pad_h:(img_rows - pad_h), pad_w:(img_cols - pad_w)]
gx_stg[0] = gx
def grad(self, inp, grads):
......@@ -885,6 +889,7 @@ class AveragePoolGrad(PoolGrad):
self.ds, ignore_border=self.ignore_border,
st=self.st, padding=self.padding, mode=self.mode)(ggx)]
class DownsampleFactorMaxGradGrad(Op):
__props__ = ('ds', 'ignore_border', 'st', 'padding', 'mode')
......@@ -893,7 +898,7 @@ class DownsampleFactorMaxGradGrad(Op):
"""
Return the shape of the output from this op, for input of given
shape and flags.
Parameters
----------
imgshape : tuple, list, or similar of integer or scalar Theano variable
......@@ -946,8 +951,8 @@ class DownsampleFactorMaxGradGrad(Op):
if isinstance(r, theano.Variable):
nr = tensor.switch(tensor.ge(st[0], ds[0]),
(r - 1) // st[0] + 1,
tensor.maximum(0, (r - 1 - ds[0])
// st[0] + 1) + 1)
tensor.maximum(0, (r - 1 - ds[0]) //
st[0] + 1) + 1)
elif st[0] >= ds[0]:
nr = (r - 1) // st[0] + 1
else:
......@@ -956,8 +961,8 @@ class DownsampleFactorMaxGradGrad(Op):
if isinstance(c, theano.Variable):
nc = tensor.switch(tensor.ge(st[1], ds[1]),
(c - 1) // st[1] + 1,
tensor.maximum(0, (c - 1 - ds[1])
// st[1] + 1) + 1)
tensor.maximum(0, (c - 1 - ds[1]) //
st[1] + 1) + 1)
elif st[1] >= ds[1]:
nc = (c - 1) // st[1] + 1
else:
......@@ -966,7 +971,7 @@ class DownsampleFactorMaxGradGrad(Op):
rval = list(imgshape[:-2]) + [nr, nc]
return rval
def __init__(self, ds, ignore_border, st=None, padding=(0,0), mode='max'):
def __init__(self, ds, ignore_border, st=None, padding=(0, 0), mode='max'):
self.ds = tuple(ds)
if not all([isinstance(d, int) for d in ds]):
raise ValueError(
......@@ -1009,7 +1014,7 @@ class DownsampleFactorMaxGradGrad(Op):
self.st, self.padding)
if (z[0] is None) or (z[0].shape != z_shape):
z[0] = numpy.zeros(z_shape, dtype=x.dtype)
ggz = z[0] # grad wrt maxout_grad has the same shape as maxout
ggz = z[0] # grad wrt maxout_grad has the same shape as maxout
# number of pooling output rows
pr = ggz.shape[-2]
# number of pooling output cols
......@@ -1025,11 +1030,11 @@ class DownsampleFactorMaxGradGrad(Op):
y_padded = numpy.zeros(
(x.shape[0], x.shape[1], img_rows, img_cols),
dtype=x.dtype) + x.min() - 1
y_padded[:, :, pd0:(img_rows-pd0), pd1:(img_cols-pd1)] = x
y_padded[:, :, pd0:(img_rows - pd0), pd1:(img_cols - pd1)] = x
ggx_padded = numpy.zeros(
(x.shape[0], x.shape[1], img_rows, img_cols),
dtype=x.dtype)
ggx_padded[:, :, pd0:(img_rows-pd0), pd1:(img_cols-pd1)] = ggx
ggx_padded[:, :, pd0:(img_rows - pd0), pd1:(img_cols - pd1)] = ggx
else:
y_padded = x
......@@ -1054,7 +1059,7 @@ class DownsampleFactorMaxGradGrad(Op):
if self.mode != 'max':
raise theano.gof.utils.MethodNotDefined()
x, maxout, ggx = inp
z, = out # the grad of grad
z, = out # the grad of grad
fail = sub['fail']
ignore_border = int(self.ignore_border)
ds0, ds1 = self.ds
......@@ -1130,10 +1135,11 @@ class DownsampleFactorMaxGradGrad(Op):
}
}
}
"""%locals()
""" % locals()
def c_code_cache_version(self):
return (0,1)
return (0, 1)
@register_canonicalize('fast_compile')
@gof.local_optimizer([MaxPoolGrad])
......@@ -1141,7 +1147,7 @@ def local_average_pool_grad(node):
# To assure backward compatibility with
# DownsampleFactorMaxGrad
if (not isinstance(node.op, MaxPoolGrad) or node.op.mode not in
['sum','average_exc_pad', 'average_inc_pad']):
['sum', 'average_exc_pad', 'average_inc_pad']):
return False
return [AveragePoolGrad(ds=node.op.ds,
ignore_border=node.op.ignore_border,
......
......@@ -31,7 +31,7 @@ class TestSignalConv2D(unittest.TestCase):
if filter_dim != 3:
nkern = 1
############# THEANO IMPLEMENTATION ############
# THEANO IMPLEMENTATION ############
# we create a symbolic function so that verify_grad can work
def sym_conv2d(input, filters):
return conv.conv2d(input, filters)
......@@ -44,9 +44,8 @@ class TestSignalConv2D(unittest.TestCase):
filter_data = numpy.random.random(filter_shape)
theano_output = theano_conv(image_data, filter_data)
############# REFERENCE IMPLEMENTATION ############
out_shape2d = numpy.array(image_shape[-2:]) -\
numpy.array(filter_shape[-2:]) + 1
# REFERENCE IMPLEMENTATION ############
out_shape2d = numpy.array(image_shape[-2:]) - numpy.array(filter_shape[-2:]) + 1
ref_output = numpy.zeros(tuple(out_shape2d))
# reshape as 3D input tensors to make life easier
......@@ -76,7 +75,7 @@ class TestSignalConv2D(unittest.TestCase):
self.assertTrue(_allclose(theano_output4d[b, k, :, :],
output2d))
############# TEST GRADIENT ############
# TEST GRADIENT ############
if verify_grad:
utt.verify_grad(sym_conv2d, [image_data, filter_data])
......@@ -87,8 +86,8 @@ class TestSignalConv2D(unittest.TestCase):
signal.conv.conv2d can support inputs and filters of type
matrix or tensor3.
"""
if (not theano.tensor.nnet.conv.imported_scipy_signal and
theano.config.cxx == ""):
if(not theano.tensor.nnet.conv.imported_scipy_signal and
theano.config.cxx == ""):
raise SkipTest("conv2d tests need SciPy or a c++ compiler")
self.validate((1, 4, 5), (2, 2, 3), out_dim=4, verify_grad=True)
......
......@@ -63,9 +63,9 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
def pad_img(x):
y = numpy.zeros(
(x.shape[0], x.shape[1],
x.shape[2]+pad_h*2, x.shape[3]+pad_w*2),
x.shape[2] + pad_h * 2, x.shape[3] + pad_w * 2),
dtype=x.dtype)
y[:, :, pad_h:(x.shape[2]+pad_h), pad_w:(x.shape[3]+pad_w)] = x
y[:, :, pad_h:(x.shape[2] + pad_h), pad_w:(x.shape[3] + pad_w)] = x
return y
img_rows = h + 2 * pad_h
......@@ -78,7 +78,6 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
ds0, ds1 = ds
st0, st1 = st
output_val = numpy.zeros(out_shp)
tt = []
y = pad_img(x)
func = numpy.max
if mode == 'sum':
......@@ -117,8 +116,6 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
if st is None:
st = ds
xi = 0
yi = 0
img_rows = input.shape[-2]
img_cols = input.shape[-1]
......@@ -364,12 +361,12 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
imval = rng.rand(1, 2, 16, 16)
for maxpoolshp, ignore_border, mode, stride in product(maxpoolshps,
[True, False],
['max',
'sum',
'average_inc_pad',
'average_exc_pad'],
stridesizes):
[True, False],
['max',
'sum',
'average_inc_pad',
'average_exc_pad'],
stridesizes):
def mp(input):
return DownsampleFactorMax(maxpoolshp,
ignore_border=ignore_border,
......@@ -581,10 +578,12 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
stridesize = stridesizes[i]
paddingsize = paddingsizes[i]
grad_shape = DownsampleFactorMaxGradGrad.out_shape(
imval.shape, maxpoolsize, st=stridesize,
ignore_border=True, padding=paddingsize)
grad_shape = DownsampleFactorMaxGradGrad.out_shape(imval.shape,
maxpoolsize, st=stridesize,
ignore_border=True,
padding=paddingsize)
grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad):
out = DownsampleFactorMax(
maxpoolsize, ignore_border=True,
......@@ -592,7 +591,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
padding=paddingsize,
)(input)
grad_op = MaxPoolGrad(maxpoolsize, ignore_border=True,
st=stridesize, padding=paddingsize)
st=stridesize, padding=paddingsize)
return grad_op(input, out, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
......@@ -610,16 +609,17 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
stridesize = stridesizes[i]
paddingsize = paddingsizes[i]
#'average_exc_pad' with non-zero padding is not implemented
# 'average_exc_pad' with non-zero padding is not implemented
for mode in ['sum', 'average_inc_pad']:
grad_shape = DownsampleFactorMax.out_shape(
imval.shape, avgpoolsize, st=stridesize,
ignore_border=True, padding=paddingsize)
grad_shape = DownsampleFactorMax.out_shape(imval.shape,
avgpoolsize, st=stridesize,
ignore_border=True, padding=paddingsize)
grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad):
grad_op = AveragePoolGrad(avgpoolsize, ignore_border=True,
st=stridesize, padding=paddingsize,
mode=mode)
st=stridesize, padding=paddingsize,
mode=mode)
return grad_op(input, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
......@@ -637,7 +637,8 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
# The value has been manually computed from the theoretical gradient,
# and confirmed by the implementation.
assert numpy.allclose(fn_hess([1, 2]), [[0., 0.], [0., 982.7667]])
assert numpy.allclose(fn_hess( [1, 2]), [[0., 0.], [0., 982.7667]])
def test_max_pool_2d_2D(self):
rng = numpy.random.RandomState(utt.fetch_seed())
......@@ -683,9 +684,11 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
op_output = function([input], op)(test_input_array)
assert numpy.all(op_output == test_answer_array), (
"op_output is %s, test_answer_array is %s" % (
op_output, numpy_output_val
op_output, test_answer_array
)
)
def mp(input):
return max_pool_2d_same_size(input, patch_size)
utt.verify_grad(mp, [test_input_array], rng=rng)
......@@ -712,14 +715,6 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
assert numpy.all(output_val == numpy_output_val), (
"output_val is %s, numpy_output_val is %s"
% (output_val, numpy_output_val))
c = tensor.sum(output)
c_val = function([images], c)(imval)
g = tensor.grad(c, images)
g_val = function([images],
[g.shape,
tensor.min(g, axis=(0, 1, 2)),
tensor.max(g, axis=(0, 1, 2))]
)(imval)
# removed as already tested in test_max_pool_2d_2D
# This make test in debug mode too slow.
......@@ -780,14 +775,14 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
for i, maxpoolshp in enumerate(maxpoolshps):
for j, ignore_border in enumerate([True, False]):
for k, padding in enumerate([(0,0), (1,1), (1,2)]):
for k, padding in enumerate([(0, 0), (1, 1), (1, 2)]):
if out_shapes[k][i][j] is None:
continue
# checking shapes generated by DownsampleFactorMax
self._compile_and_check([image],
[DownsampleFactorMax(maxpoolshp,
ignore_border=ignore_border,
padding=padding)(image)],
ignore_border=ignore_border,
padding=padding)(image)],
[image_val], DownsampleFactorMax)
# checking shapes generated by MaxPoolGrad
......@@ -795,8 +790,8 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
gz_val = rng.rand(*out_shapes[k][i][j])
self._compile_and_check([image, maxout, gz],
[MaxPoolGrad(maxpoolshp,
ignore_border=ignore_border,
padding=padding)
ignore_border=ignore_border,
padding=padding)
(image, maxout, gz)],
[image_val, maxout_val, gz_val],
MaxPoolGrad,
......@@ -817,26 +812,26 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
maxout = theano.tensor.tensor4()
grad = theano.tensor.tensor4()
compilation_mode=theano.compile.get_default_mode().including(
compilation_mode = theano.compile.get_default_mode().including(
'local_average_pool_grad')
for mode in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:
f = theano.function([im, maxout, grad],
DownsampleFactorMaxGrad(ds=(3,3),
ignore_border=False,
mode=mode)(im, maxout, grad),
mode=compilation_mode)
DownsampleFactorMaxGrad(ds=(3, 3),
ignore_border=False,
mode=mode)(im, maxout, grad),
mode=compilation_mode)
if mode == 'max':
assert any(isinstance(n.op, MaxPoolGrad)
for n in f.maker.fgraph.toposort())
for n in f.maker.fgraph.toposort())
assert not any(isinstance(n.op, AveragePoolGrad)
for n in f.maker.fgraph.toposort())
for n in f.maker.fgraph.toposort())
else:
assert not any(isinstance(n.op, MaxPoolGrad)
for n in f.maker.fgraph.toposort())
for n in f.maker.fgraph.toposort())
assert any(isinstance(n.op, AveragePoolGrad)
for n in f.maker.fgraph.toposort())
for n in f.maker.fgraph.toposort())
if __name__ == '__main__':
unittest.main()
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论