提交 9d804c53 authored 作者: Frederic's avatar Frederic

pep8

上级 e521b20e
...@@ -103,10 +103,10 @@ def conv2d(input, ...@@ -103,10 +103,10 @@ def conv2d(input,
border_mode='valid', border_mode='valid',
subsample=(1, 1), subsample=(1, 1),
filter_flip=True): filter_flip=True):
""" """This function will build the symbolic graph for convolving a
This function will build the symbolic graph for convolving a mini-batch of a mini-batch of a stack of 2D inputs with a set of 2D filters. The
stack of 2D inputs with a set of 2D filters. The implementation is modelled implementation is modelled after Convolutional Neural Networks
after Convolutional Neural Networks (CNN). (CNN).
:type input: symbolic 4D tensor :type input: symbolic 4D tensor
:param input: mini-batch of feature map stacks, of shape :param input: mini-batch of feature map stacks, of shape
...@@ -153,7 +153,8 @@ def conv2d(input, ...@@ -153,7 +153,8 @@ def conv2d(input,
:param filter_flip: If ``True``, will flip the filter rows and columns :param filter_flip: If ``True``, will flip the filter rows and columns
before sliding them over the input. This operation is normally referred before sliding them over the input. This operation is normally referred
to as a convolution, and this is the default. If ``False``, the filters to as a convolution, and this is the default. If ``False``, the filters
are not flipped and the operation is referred to as a cross-correlation. are not flipped and the operation is referred to as a
cross-correlation.
:rtype: symbolic 4D tensor :rtype: symbolic 4D tensor
:return: set of feature maps generated by convolutional layer. Tensor is :return: set of feature maps generated by convolutional layer. Tensor is
...@@ -169,9 +170,10 @@ def conv2d(input, ...@@ -169,9 +170,10 @@ def conv2d(input,
class BaseAbstractConv2d(Op): class BaseAbstractConv2d(Op):
""" """Base class for AbstractConv
Base class for AbstractConv
Define an abstract convolution op that will be replaced with the appropriate implementation Define an abstract convolution op that will be replaced with the
appropriate implementation
:type imshp: None, tuple/list of len 4 of int or Constant variable :type imshp: None, tuple/list of len 4 of int or Constant variable
:param imshp: The shape of the input parameter. :param imshp: The shape of the input parameter.
...@@ -211,7 +213,9 @@ class BaseAbstractConv2d(Op): ...@@ -211,7 +213,9 @@ class BaseAbstractConv2d(Op):
:param filter_flip: If ``True``, will flip the filter rows and columns :param filter_flip: If ``True``, will flip the filter rows and columns
before sliding them over the input. This operation is normally referred before sliding them over the input. This operation is normally referred
to as a convolution, and this is the default. If ``False``, the filters to as a convolution, and this is the default. If ``False``, the filters
are not flipped and the operation is referred to as a cross-correlation. are not flipped and the operation is referred to as a
cross-correlation.
""" """
check_broadcast = False check_broadcast = False
__props__ = ('border_mode', 'subsample', 'filter_flip', 'imshp', 'kshp') __props__ = ('border_mode', 'subsample', 'filter_flip', 'imshp', 'kshp')
...@@ -270,7 +274,8 @@ class AbstractConv2d(BaseAbstractConv2d): ...@@ -270,7 +274,8 @@ class AbstractConv2d(BaseAbstractConv2d):
subsample=(1, 1), subsample=(1, 1),
filter_flip=True): filter_flip=True):
super(AbstractConv2d, self).__init__(imshp, kshp, super(AbstractConv2d, self).__init__(imshp, kshp,
border_mode, subsample, filter_flip) border_mode, subsample,
filter_flip)
def make_node(self, img, kern): def make_node(self, img, kern):
if img.type.ndim != 4: if img.type.ndim != 4:
...@@ -319,7 +324,9 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d): ...@@ -319,7 +324,9 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
subsample=(1, 1), subsample=(1, 1),
filter_flip=True): filter_flip=True):
super(AbstractConv2d_gradWeights, self).__init__(imshp, kshp, super(AbstractConv2d_gradWeights, self).__init__(imshp, kshp,
border_mode, subsample, filter_flip) border_mode,
subsample,
filter_flip)
# Update shape/height_width # Update shape/height_width
def make_node(self, img, topgrad, shape): def make_node(self, img, topgrad, shape):
...@@ -336,7 +343,8 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d): ...@@ -336,7 +343,8 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
return Apply(self, [img, topgrad, shape], [output]) return Apply(self, [img, topgrad, shape], [output])
def perform(self, node, inp, out_): def perform(self, node, inp, out_):
raise NotImplementedError('AbstractConv2d_gradWeight theano optimization failed') raise NotImplementedError(
'AbstractConv2d_gradWeight theano optimization failed')
def grad(self, inp, grads): def grad(self, inp, grads):
bottom, top = inp[:2] bottom, top = inp[:2]
...@@ -344,7 +352,10 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d): ...@@ -344,7 +352,10 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp, d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,
self.border_mode, self.border_mode,
self.subsample, self.subsample,
self.filter_flip)(weights, top, bottom.shape[-2:]) self.filter_flip)(
weights,
top,
bottom.shape[-2:])
d_top = AbstractConv2d(self.imshp, d_top = AbstractConv2d(self.imshp,
self.kshp, self.kshp,
self.border_mode, self.border_mode,
...@@ -373,7 +384,9 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d): ...@@ -373,7 +384,9 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d):
subsample=(1, 1), subsample=(1, 1),
filter_flip=True): filter_flip=True):
super(AbstractConv2d_gradInputs, self).__init__(imshp, kshp, super(AbstractConv2d_gradInputs, self).__init__(imshp, kshp,
border_mode, subsample, filter_flip) border_mode,
subsample,
filter_flip)
# Update shape/height_width # Update shape/height_width
def make_node(self, kern, topgrad, shape): def make_node(self, kern, topgrad, shape):
...@@ -390,16 +403,20 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d): ...@@ -390,16 +403,20 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d):
return Apply(self, [kern, topgrad, shape], [output]) return Apply(self, [kern, topgrad, shape], [output])
def perform(self, node, inp, out_): def perform(self, node, inp, out_):
raise NotImplementedError('AbstractConv2d_gradWeight theano optimization failed') raise NotImplementedError(
'AbstractConv2d_gradWeight theano optimization failed')
def grad(self, inp, grads): def grad(self, inp, grads):
weights, top = inp[:2] weights, top = inp[:2]
bottom, = grads bottom, = grads
d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp, d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,
self.border_mode, self.border_mode,
self.subsample)(bottom, top, weights.shape[-2:]) self.subsample)(
bottom, top,
weights.shape[-2:])
d_top = AbstractConv2d(self.imshp, self.kshp, d_top = AbstractConv2d(self.imshp, self.kshp,
self.border_mode, self.subsample)(bottom, weights) self.border_mode, self.subsample)(
bottom, weights)
d_height_width = (theano.gradient.DisconnectedType()(),) d_height_width = (theano.gradient.DisconnectedType()(),)
return (d_weights, d_top) + d_height_width return (d_weights, d_top) + d_height_width
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论