提交 b92c918d authored 作者: Iban Harlouchet's avatar Iban Harlouchet 提交者: Frederic

flake8 for theano/tensor/nnet/conv.py

上级 1fef9423
from __future__ import print_function
"""
Contains an Op for convolving input images with a set of filters. This was
developed especially for Convolutional Neural Networks.
......@@ -9,7 +8,7 @@ tensor.signal and tensor.signal.downsample.
See especially conv2d().
"""
__docformat__ = "restructuredtext en"
from __future__ import print_function
import logging
......@@ -17,12 +16,11 @@ import numpy
from six.moves import xrange
import theano
from theano import OpenMPOp
from theano.tensor import (as_tensor_variable, blas, get_scalar_constant_value,
patternbroadcast, NotScalarConstantError)
from theano import OpenMPOp, config
from theano.gof import Apply
imported_scipy_signal = False
try:
# TODO: move these back out to global scope when they no longer
# cause an atexit error
......@@ -32,6 +30,8 @@ try:
except ImportError:
pass
imported_scipy_signal = False
__docformat__ = "restructuredtext en"
_logger = logging.getLogger("theano.tensor.nnet.conv")
......@@ -103,7 +103,7 @@ def conv2d(input, filters, image_shape=None, filter_shape=None,
try:
image_shape[i] = get_scalar_constant_value(
as_tensor_variable(image_shape[i]))
except NotScalarConstantError as e:
except NotScalarConstantError:
raise NotScalarConstantError(
"The convolution need that the shape"
" information are constant values. We got"
......@@ -118,7 +118,7 @@ def conv2d(input, filters, image_shape=None, filter_shape=None,
try:
filter_shape[i] = get_scalar_constant_value(
as_tensor_variable(filter_shape[i]))
except NotScalarConstantError as e:
except NotScalarConstantError:
raise NotScalarConstantError(
"The convolution need that the shape"
" information are constant values. We got"
......@@ -509,7 +509,7 @@ class ConvOp(OpenMPOp):
self.out_mode = output_mode
if not self.out_mode in ["valid", "full"]:
if self.out_mode not in ["valid", "full"]:
raise Exception("Mode %s not implemented" % self.out_mode)
if any((shp is not None) and (shp <= 0) for shp in self.outshp):
......@@ -522,7 +522,6 @@ class ConvOp(OpenMPOp):
if (self.unroll_kern is None and
self.unroll_batch is None and
self.unroll_patch is None):
# no version specified. Find the faster we have
if self.bsize is None and self.nkern is None:
self.unroll_patch = True
......@@ -613,7 +612,6 @@ class ConvOp(OpenMPOp):
inputs - 4 dim: batches x stacksize x rows x cols
kerns - 4 dim: nkern x stackidx x rows x cols
"""
outdim = kerns.ndim
_inputs = as_tensor_variable(inputs)
_kerns = as_tensor_variable(kerns)
# TODO: lift this restriction by upcasting either inputs or kerns
......@@ -778,7 +776,7 @@ class ConvOp(OpenMPOp):
img2d2[:, :, kshp[0] - 1:kshp[0] - 1 + imshp[1],
kshp[1] - 1:kshp[1] - 1 + imshp[2]] = img2d
img2d = img2d2
#N_image_shape = image_data.shape
# N_image_shape = image_data.shape
for b in xrange(bsize):
for n in xrange(nkern):
......@@ -786,7 +784,9 @@ class ConvOp(OpenMPOp):
for im0 in xrange(stacklen):
for row in xrange(0, zz.shape[2], self.dx):
for col in xrange(0, zz.shape[3], self.dy):
zz[b, n, row, col] += (img2d[b, im0, row:row + kshp[0], col:col + kshp[1]] *
zz[b, n, row, col] += (
img2d[b, im0, row:row + kshp[0],
col:col + kshp[1]] *
filtersflipped[n, im0, ::-1, ::-1]).sum()
# We copy it to remove the Stride mismatch warning from DEBUG_MODE.
......@@ -843,8 +843,8 @@ class ConvOp(OpenMPOp):
# mimic what happens inside theano.grad: get the input gradient
# of the final cost wrt all variables involved.
return theano.gradient.grad(cost=None,
known_grads={node: gz}, wrt=[inputs, kerns])
return theano.gradient.grad(cost=None, known_grads={node: gz},
wrt=[inputs, kerns])
if self.dx not in (1, 2) or self.dy not in (1, 2):
raise NotImplementedError(
......@@ -858,7 +858,7 @@ class ConvOp(OpenMPOp):
raise Exception("ConvOp.grad when dx!=1 or dy!=1 we must have all "
"the optional shape information")
####### Determine gradient on kernels ########
# Determine gradient on kernels ########
assert inputs.ndim == 4 and kerns.ndim == 4
newin = inputs.dimshuffle((1, 0, 2, 3))
......@@ -943,7 +943,7 @@ class ConvOp(OpenMPOp):
dw = dw.dimshuffle((1, 0, 2, 3))
dw = dw[:, :, ::-1, ::-1]
####### Determine gradient on inputs ########
# Determine gradient on inputs ########
mode = 'valid'
if not self.out_mode == 'full':
mode = 'full'
......@@ -1015,7 +1015,6 @@ using namespace std;
self.unroll_patch or
self.unroll_batch > 0 or
self.unroll_kern > 0):
return False
return True
return False
......@@ -1030,7 +1029,6 @@ using namespace std;
# compilation with -O3. This don't happen at -O2
if (theano.gof.cmodule.gcc_version() in ['4.3.0'] and
self.kshp == (1, 1)):
return ['-O3']
else:
return []
......
......@@ -89,7 +89,6 @@ whitelist_flake8 = [
"tensor/signal/tests/test_conv.py",
"tensor/signal/tests/test_downsample.py",
"tensor/nnet/__init__.py",
"tensor/nnet/conv.py",
"tensor/nnet/neighbours.py",
"tensor/nnet/tests/test_conv.py",
"tensor/nnet/tests/test_neighbours.py",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论