提交 f27a3981 authored 作者: Nicolas Ballas's avatar Nicolas Ballas

change filters_flip to filter_flip

上级 06bc1277
......@@ -2697,7 +2697,7 @@ def local_abstractconv_gemm(node):
border_mode = node.op.border_mode
subsample = node.op.subsample
if (border_mode == 'full') and (subsample == (1, 1)):
if not node.op.filters_flip:
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
# need to dimshuffle the kernel for full convolution
kern = kern.dimshuffle(1, 0, 2, 3)
......@@ -2706,7 +2706,7 @@ def local_abstractconv_gemm(node):
gpu_contiguous(kern), gpu_contiguous(img))
else:
# need to flip the kernel if necessary
if node.op.filters_flip:
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
# By default use GpuCorrMM
rval = GpuCorrMM(border_mode, subsample)(gpu_contiguous(img),
......@@ -2754,7 +2754,7 @@ def local_abstractconv_gradweight_gemm(node):
rval = GpuCorrMM_gradWeights(border_mode=node.op.border_mode,
subsample=node.op.subsample)(
gpu_contiguous(img), gpu_contiguous(topgrad), shape)
if node.op.filters_flip:
if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1]
rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
rval = as_cuda_ndarray_variable(rval)
......@@ -2769,7 +2769,7 @@ def local_abstractconv_gradinputs_gemm(node):
not isinstance(topgrad.type, CudaNdarrayType):
return None
if node.op.filters_flip:
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
rval = GpuCorrMM_gradInputs(border_mode=node.op.border_mode,
......
......@@ -34,7 +34,7 @@ class TestConv2d(unittest.TestCase):
(1, 1, 2, 5), (4, 1, 2, 2), (4, 5, 2, 2)]
self.subsamples = [(1, 1), (2, 2), (2, 4)]
self.border_modes = ["valid", "full", (0, 0), (1, 1), (5, 5), (5, 2)]
self.filters_flip = [True, False]
self.filter_flip = [True, False]
def get_output_shape(self, inputs_shape, filters_shape, subsample, border_mode):
......@@ -52,7 +52,7 @@ class TestConv2d(unittest.TestCase):
def run_fwd(self, inputs_shape, filters_shape, ref=dnn_conv,
subsample=(1, 1), verify_grad=True, mode=mode_without_gpu,
border_mode='valid', filters_flip=True, device='cpu', provide_shape=False):
border_mode='valid', filter_flip=True, device='cpu', provide_shape=False):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
......@@ -68,7 +68,7 @@ class TestConv2d(unittest.TestCase):
else:
imshp = None
kshp = None
if filters_flip:
if filter_flip:
conv_mode = 'conv'
else:
conv_mode = 'cross'
......@@ -80,7 +80,7 @@ class TestConv2d(unittest.TestCase):
c = conv.conv2d(inputs, filters,
border_mode=border_mode,
subsample=subsample,
filter_flip=filters_flip,
filter_flip=filter_flip,
input_shape=imshp,
filter_shape=kshp)
f_ref = theano.function([], c_ref, mode=mode)
......@@ -95,7 +95,7 @@ class TestConv2d(unittest.TestCase):
mode=mode)
def run_gradweight(self, inputs_shape, filters_shape, output_shape,
ref=dnn_gradweight, subsample=(1, 1), filters_flip=True,
ref=dnn_gradweight, subsample=(1, 1), filter_flip=True,
verify_grad=True, mode=mode_without_gpu, border_mode='valid',
device='cpu', provide_shape = False):
......@@ -113,12 +113,12 @@ class TestConv2d(unittest.TestCase):
else:
imshp = None
kshp = None
if filters_flip:
if filter_flip:
conv_mode = 'conv'
else:
conv_mode = 'cross'
c = conv.AbstractConv2d_gradWeights(border_mode=border_mode,
filters_flip=filters_flip,
filter_flip=filter_flip,
subsample=subsample,
imshp=imshp, kshp=kshp)
c = c(inputs, output, filters_shape[-2:])
......@@ -142,7 +142,7 @@ class TestConv2d(unittest.TestCase):
mode=mode, eps=1)
def run_gradinput(self, inputs_shape, filters_shape, output_shape, ref=dnn_gradinput,
subsample=(1, 1), filters_flip=True, verify_grad=True, mode=mode_without_gpu,
subsample=(1, 1), filter_flip=True, verify_grad=True, mode=mode_without_gpu,
border_mode='valid', device='cpu', provide_shape = False):
output_val = numpy.random.random(output_shape).astype('float32')
......@@ -159,13 +159,13 @@ class TestConv2d(unittest.TestCase):
else:
imshp = None
kshp = None
if filters_flip:
if filter_flip:
conv_mode = 'conv'
else:
conv_mode = 'cross'
c = conv.AbstractConv2d_gradInputs(border_mode=border_mode,
subsample=subsample,
filters_flip=filters_flip,
filter_flip=filter_flip,
imshp=imshp, kshp=kshp)
c = c(filters, output, inputs_shape[-2:])
c_ref = ref(filters, output, inputs_shape,
......@@ -195,22 +195,22 @@ class TestConv2d(unittest.TestCase):
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filters_flip):
self.filter_flip):
o = self.get_output_shape(i, f, s, b)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filters_flip=flip)
filter_flip=flip)
self.run_gradweight(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filters_flip=flip)
filter_flip=flip)
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filters_flip=flip)
filter_flip=flip)
def test_cormm_conv(self):
if not dnn_available():
......@@ -221,24 +221,24 @@ class TestConv2d(unittest.TestCase):
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filters_flip,
self.filter_flip,
[False, True]):
o = self.get_output_shape(i, f, s, b)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filters_flip=flip)
filter_flip=flip)
self.run_gradweight(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filters_flip=flip)
filter_flip=flip)
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filters_flip=flip)
filter_flip=flip)
def test_cpu_conv(self):
if not dnn_available():
......@@ -249,7 +249,7 @@ class TestConv2d(unittest.TestCase):
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filters_flip,
self.filter_flip,
[False, True]):
o = self.get_output_shape(i, f, s, b)
......@@ -279,7 +279,7 @@ class TestConv2d(unittest.TestCase):
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode, device='cpu',
provide_shape=provide_shape, border_mode=b,
filters_flip=flip)
filter_flip=flip)
else:
self.assertRaises(NotImplementedError,
self.run_fwd,
......@@ -291,14 +291,14 @@ class TestConv2d(unittest.TestCase):
device='cpu',
provide_shape=provide_shape,
border_mode=b,
filters_flip=flip)
filter_flip=flip)
if gradweight_OK:
self.run_gradweight(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode, device='cpu',
provide_shape=provide_shape, border_mode=b,
filters_flip=flip)
filter_flip=flip)
else:
self.assertRaises(NotImplementedError,
self.run_gradweight,
......@@ -311,14 +311,14 @@ class TestConv2d(unittest.TestCase):
device='cpu',
provide_shape=provide_shape,
border_mode=b,
filters_flip=flip)
filter_flip=flip)
if gradinput_OK:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode, device='cpu',
provide_shape=provide_shape, border_mode=b,
filters_flip=flip)
filter_flip=flip)
else:
self.assertRaises(NotImplementedError,
self.run_gradinput,
......@@ -331,4 +331,4 @@ class TestConv2d(unittest.TestCase):
device='cpu',
provide_shape=provide_shape,
border_mode=b,
filters_flip=flip)
filter_flip=flip)
......@@ -76,7 +76,7 @@ def conv2d(input,
Also called strides elsewhere.
:type filter_flip: bool
:param filters_flip: If ``True``, will flip the filter rows and columns
:param filter_flip: If ``True``, will flip the filter rows and columns
before sliding them over the input. This operation is normally referred
to as a convolution, and this is the default. If ``False``, the filters
are not flipped and the operation is referred to as a cross-correlation.
......@@ -132,19 +132,19 @@ class BaseAbstractConv2d(Op):
:param subsample: factor by which to subsample the output.
Also called strides elsewhere.
:type filters_flip: bool
:param filters_flip: If ``True``, will flip the filter rows and columns
:type filter_flip: bool
:param filter_flip: If ``True``, will flip the filter rows and columns
before sliding them over the input. This operation is normally referred
to as a convolution, and this is the default. If ``False``, the filters
are not flipped and the operation is referred to as a cross-correlation.
"""
check_broadcast = False
__props__ = ('border_mode', 'subsample', 'filters_flip', 'imshp', 'kshp')
__props__ = ('border_mode', 'subsample', 'filter_flip', 'imshp', 'kshp')
def __init__(self,
imshp=None, kshp=None,
border_mode="valid", subsample=(1, 1),
filters_flip = True):
filter_flip = True):
if isinstance(border_mode, int):
border_mode = (border_mode, border_mode)
if isinstance(border_mode, tuple):
......@@ -160,7 +160,7 @@ class BaseAbstractConv2d(Op):
self.imshp = imshp
self.kshp = kshp
self.border_mode = border_mode
self.filters_flip = filters_flip
self.filter_flip = filter_flip
if len(subsample) != 2:
raise ValueError("subsample must have two elements")
......@@ -192,9 +192,9 @@ class AbstractConv2d(BaseAbstractConv2d):
kshp=None,
border_mode="valid",
subsample=(1, 1),
filters_flip = True):
filter_flip = True):
super(AbstractConv2d, self).__init__(imshp, kshp,
border_mode, subsample, filters_flip)
border_mode, subsample, filter_flip)
def make_node(self, img, kern):
if img.type.ndim != 4:
......@@ -217,12 +217,12 @@ class AbstractConv2d(BaseAbstractConv2d):
d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,
self.border_mode,
self.subsample,
self.filters_flip)(
self.filter_flip)(
weights, top, bottom.shape[-2:])
d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,
self.border_mode,
self.subsample,
self.filters_flip)(
self.filter_flip)(
bottom, top, weights.shape[-2:])
return d_bottom, d_weights
......@@ -240,9 +240,9 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
kshp=None,
border_mode="valid",
subsample=(1, 1),
filters_flip=True):
filter_flip=True):
super(AbstractConv2d_gradWeights, self).__init__(imshp, kshp,
border_mode, subsample, filters_flip)
border_mode, subsample, filter_flip)
# Update shape/height_width
def make_node(self, img, topgrad, shape):
......@@ -267,12 +267,12 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,
self.border_mode,
self.subsample,
self.filters_flip)(weights, top, bottom.shape[-2:])
self.filter_flip)(weights, top, bottom.shape[-2:])
d_top = AbstractConv2d(self.imshp,
self.kshp,
self.border_mode,
self.subsample,
self.filters_flip)(bottom, weights)
self.filter_flip)(bottom, weights)
d_height_width = (theano.gradient.DisconnectedType()(),)
return (d_bottom, d_top) + d_height_width
......@@ -294,9 +294,9 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d):
kshp=None,
border_mode="valid",
subsample=(1, 1),
filters_flip=True):
filter_flip=True):
super(AbstractConv2d_gradInputs, self).__init__(imshp, kshp,
border_mode, subsample, filters_flip)
border_mode, subsample, filter_flip)
# Update shape/height_width
def make_node(self, kern, topgrad, shape):
......@@ -343,7 +343,7 @@ def local_conv2d_cpu(node):
return None
if node.op.border_mode not in ['full', 'valid']:
return None
if not node.op.filters_flip:
if not node.op.filter_flip:
# Not tested yet
return None
......@@ -365,7 +365,7 @@ def local_conv2d_gradweight_cpu(node):
return None
if node.op.border_mode not in ['full', 'valid']:
return None
if not node.op.filters_flip:
if not node.op.filter_flip:
# Not tested yet
return
......@@ -474,7 +474,7 @@ def local_conv2d_gradinputs_cpu(node):
return None
if node.op.border_mode not in ['full', 'valid']:
return None
if not node.op.filters_flip:
if not node.op.filter_flip:
# Not tested yet
return None
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论