提交 0da931e9 authored 作者: Gijs van Tulder's avatar Gijs van Tulder

Test for inconsistency in AbstractConv_gradInput.

For some inputs with a non-standard size, different implementations of AbstractConv_gradInput produce a different-sized output.
上级 c9565520
...@@ -49,6 +49,18 @@ class TestDnnConv2d(test_abstract_conv.BaseTestConv2d): ...@@ -49,6 +49,18 @@ class TestDnnConv2d(test_abstract_conv.BaseTestConv2d):
provide_shape=provide_shape, border_mode=b, provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI) filter_flip=flip, target_op=GpuDnnConvGradI)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
if not dnn_available(test_ctx_name):
raise SkipTest(dnn_available.msg)
if fd != (1, 1):
raise SkipTest("Doesn't have CUDNN implementation")
mode = mode_with_gpu
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI)
class TestDnnConv3d(test_abstract_conv.BaseTestConv3d): class TestDnnConv3d(test_abstract_conv.BaseTestConv3d):
@classmethod @classmethod
...@@ -82,6 +94,18 @@ class TestDnnConv3d(test_abstract_conv.BaseTestConv3d): ...@@ -82,6 +94,18 @@ class TestDnnConv3d(test_abstract_conv.BaseTestConv3d):
provide_shape=provide_shape, border_mode=b, provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI) filter_flip=flip, target_op=GpuDnnConvGradI)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1)):
if not dnn_available(test_ctx_name):
raise SkipTest(dnn_available.msg)
if fd != (1, 1, 1):
raise SkipTest("Doesn't have CUDNN implementation")
mode = mode_with_gpu
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI)
class TestCorrMMConv2d(test_abstract_conv.BaseTestConv2d): class TestCorrMMConv2d(test_abstract_conv.BaseTestConv2d):
@classmethod @classmethod
...@@ -115,6 +139,16 @@ class TestCorrMMConv2d(test_abstract_conv.BaseTestConv2d): ...@@ -115,6 +139,16 @@ class TestCorrMMConv2d(test_abstract_conv.BaseTestConv2d):
target_op=GpuCorrMM_gradInputs, target_op=GpuCorrMM_gradInputs,
filter_dilation=fd) filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
mode = self.mode
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradInputs,
filter_dilation=fd)
class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d): class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d):
@classmethod @classmethod
...@@ -148,6 +182,16 @@ class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d): ...@@ -148,6 +182,16 @@ class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d):
target_op=GpuCorr3dMM_gradInputs, target_op=GpuCorr3dMM_gradInputs,
filter_dilation=fd) filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1)):
mode = self.mode
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorr3dMM_gradInputs,
filter_dilation=fd)
class TestDnnConvTypes(test_abstract_conv.TestConvTypes): class TestDnnConvTypes(test_abstract_conv.TestConvTypes):
def setUp(self): def setUp(self):
......
...@@ -57,6 +57,19 @@ class TestDnnConv2d(test_abstract_conv.BaseTestConv2d): ...@@ -57,6 +57,19 @@ class TestDnnConv2d(test_abstract_conv.BaseTestConv2d):
filter_flip=flip, target_op=GpuDnnConvGradI, filter_flip=flip, target_op=GpuDnnConvGradI,
filter_dilation=fd) filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
if fd != (1, 1):
raise SkipTest("No dilation implementation for cuDNN ConvOp.")
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
mode = mode_with_gpu
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI,
filter_dilation=fd)
class TestDnnConv3d(test_abstract_conv.BaseTestConv3d): class TestDnnConv3d(test_abstract_conv.BaseTestConv3d):
@classmethod @classmethod
...@@ -91,6 +104,19 @@ class TestDnnConv3d(test_abstract_conv.BaseTestConv3d): ...@@ -91,6 +104,19 @@ class TestDnnConv3d(test_abstract_conv.BaseTestConv3d):
filter_flip=flip, target_op=GpuDnnConv3dGradI, filter_flip=flip, target_op=GpuDnnConv3dGradI,
filter_dilation=fd) filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1)):
if fd != (1, 1, 1):
raise SkipTest("No dilation implementation for cuDNN ConvOp.")
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
mode = mode_with_gpu
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI,
filter_dilation=fd)
class TestCorrMMConv2d(test_abstract_conv.BaseTestConv2d): class TestCorrMMConv2d(test_abstract_conv.BaseTestConv2d):
@classmethod @classmethod
...@@ -124,6 +150,16 @@ class TestCorrMMConv2d(test_abstract_conv.BaseTestConv2d): ...@@ -124,6 +150,16 @@ class TestCorrMMConv2d(test_abstract_conv.BaseTestConv2d):
target_op=GpuCorrMM_gradInputs, target_op=GpuCorrMM_gradInputs,
filter_dilation=fd) filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
mode = self.mode
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradInputs,
filter_dilation=fd)
class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d): class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d):
@classmethod @classmethod
...@@ -157,6 +193,16 @@ class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d): ...@@ -157,6 +193,16 @@ class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d):
target_op=GpuCorr3dMM_gradInputs, target_op=GpuCorr3dMM_gradInputs,
filter_dilation=fd) filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1)):
mode = self.mode
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorr3dMM_gradInputs,
filter_dilation=fd)
class TestDnnConvTypes(test_abstract_conv.TestConvTypes): class TestDnnConvTypes(test_abstract_conv.TestConvTypes):
def setUp(self): def setUp(self):
......
...@@ -371,6 +371,7 @@ class BaseTestConv2d(BaseTestConv): ...@@ -371,6 +371,7 @@ class BaseTestConv2d(BaseTestConv):
cls.subsamples = [(1, 1), (2, 2), (2, 4)] cls.subsamples = [(1, 1), (2, 2), (2, 4)]
cls.default_subsamples = (1, 1) cls.default_subsamples = (1, 1)
cls.filters_dilations = [(1, 1), (1, 2), (2, 1)] cls.filters_dilations = [(1, 1), (1, 2), (2, 1)]
cls.default_filters_dilations = (1, 1)
cls.border_modes = ["valid", "half", "full", (0, 0), (1, 1), (5, 5), (5, 2)] cls.border_modes = ["valid", "half", "full", (0, 0), (1, 1), (5, 5), (5, 2)]
cls.default_border_mode = (0, 0) cls.default_border_mode = (0, 0)
cls.filter_flip = [True, False] cls.filter_flip = [True, False]
...@@ -379,6 +380,30 @@ class BaseTestConv2d(BaseTestConv): ...@@ -379,6 +380,30 @@ class BaseTestConv2d(BaseTestConv):
cls.default_provide_shape = True cls.default_provide_shape = True
cls.shared = staticmethod(theano.compile.shared) cls.shared = staticmethod(theano.compile.shared)
def test_gradinput_arbitrary_output_shapes(self):
# this computes the grad wrt inputs for an output shape
# that the forward convolution would not produce
input_shape = (2, 1, 7, 7)
filter_shape = (2, 1, 3, 3)
for output_shape in [(2, 2, 8, 8), (2, 2, 9, 9), (2, 2, 12, 12)]:
for border_mode in ["valid", "half", "full"]:
# is this output shape large enough?
min_output_shape = self.get_output_shape(
input_shape, filter_shape, self.default_subsamples,
border_mode, self.default_filters_dilations)
if not all(o >= min_o for (o, min_o) in zip(output_shape, min_output_shape)):
continue
for provide_shape in self.provide_shape:
yield (self.tcase_gi,
input_shape,
filter_shape,
output_shape,
self.default_subsamples,
border_mode,
True,
provide_shape,
self.default_filters_dilations)
def run_fwd(self, inputs_shape, filters_shape, def run_fwd(self, inputs_shape, filters_shape,
conv_fn=conv.conv2d, conv_op=conv.AbstractConv2d, conv_fn=conv.conv2d, conv_op=conv.AbstractConv2d,
ref=conv2d_corr, **kwargs): ref=conv2d_corr, **kwargs):
...@@ -438,6 +463,17 @@ class TestCorrConv2d(BaseTestConv2d): ...@@ -438,6 +463,17 @@ class TestCorrConv2d(BaseTestConv2d):
filter_flip=flip, target_op=CorrMM_gradInputs, filter_flip=flip, target_op=CorrMM_gradInputs,
check_trace=True, filter_dilation=fd) check_trace=True, filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
# This tests can run even when theano.config.blas.ldflags is empty.
if (not theano.config.cxx or
theano.config.mode == "FAST_COMPILE"):
raise SkipTest("Need blas to test conv2d")
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s, verify_grad=True,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=CorrMM_gradInputs,
check_trace=True, filter_dilation=fd)
class TestAbstractConvNoOptim(BaseTestConv2d): class TestAbstractConvNoOptim(BaseTestConv2d):
@classmethod @classmethod
...@@ -477,6 +513,15 @@ class TestAbstractConvNoOptim(BaseTestConv2d): ...@@ -477,6 +513,15 @@ class TestAbstractConvNoOptim(BaseTestConv2d):
check_trace=True, filter_dilation=fd, check_trace=True, filter_dilation=fd,
mode=mode) mode=mode)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
mode = theano.Mode(optimizer=None)
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s, verify_grad=True,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=None,
check_trace=True, filter_dilation=fd,
mode=mode)
class TestCpuConv2d(BaseTestConv2d): class TestCpuConv2d(BaseTestConv2d):
@classmethod @classmethod
...@@ -592,6 +637,29 @@ class TestCpuConv2d(BaseTestConv2d): ...@@ -592,6 +637,29 @@ class TestCpuConv2d(BaseTestConv2d):
check_trace=True, check_trace=True,
filter_dilation=fd) filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
if fd != (1, 1):
raise SkipTest("No dilation implementation for basic cpu ConvOp.")
mode = self.mode
if not flip:
return
if b not in ((0, 0), 'valid', 'full'):
return
if (not provide_shape) and (s != (1, 1)) and (b == 'full'):
return
if ((s[0] not in (1, 2)) or (s[1] not in (1, 2))) and (b == 'full'):
return
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=(ConvOp, ConvTransp3D),
check_trace=True,
filter_dilation=fd)
class BaseTestConv3d(BaseTestConv): class BaseTestConv3d(BaseTestConv):
@classmethod @classmethod
...@@ -602,6 +670,7 @@ class BaseTestConv3d(BaseTestConv): ...@@ -602,6 +670,7 @@ class BaseTestConv3d(BaseTestConv):
cls.subsamples = [(1, 1, 1), (2, 2, 2), (1, 2, 3)] cls.subsamples = [(1, 1, 1), (2, 2, 2), (1, 2, 3)]
cls.default_subsamples = (1, 1, 1) cls.default_subsamples = (1, 1, 1)
cls.filters_dilations = [(1, 1, 1), (1, 2, 1), (2, 1, 2)] cls.filters_dilations = [(1, 1, 1), (1, 2, 1), (2, 1, 2)]
cls.default_filters_dilations = (1, 1, 1)
cls.border_modes = ["valid", "half", "full", (0, 0, 0), (2, 2, 3)] cls.border_modes = ["valid", "half", "full", (0, 0, 0), (2, 2, 3)]
cls.default_border_mode = (0, 0, 0) cls.default_border_mode = (0, 0, 0)
cls.filter_flip = [True, False] cls.filter_flip = [True, False]
...@@ -610,6 +679,30 @@ class BaseTestConv3d(BaseTestConv): ...@@ -610,6 +679,30 @@ class BaseTestConv3d(BaseTestConv):
cls.default_provide_shape = True cls.default_provide_shape = True
cls.shared = staticmethod(theano.compile.shared) cls.shared = staticmethod(theano.compile.shared)
def test_gradinput_arbitrary_output_shapes(self):
# this computes the grad wrt inputs for an output shape
# that the forward convolution would not produce
input_shape = (2, 1, 7, 7, 7)
filter_shape = (1, 1, 3, 3, 3)
for output_shape in [(2, 1, 8, 8, 8), (2, 1, 9, 9, 9), (2, 1, 12, 12, 12)]:
for border_mode in ["valid", "half", "full"]:
# is this output shape large enough?
min_output_shape = self.get_output_shape(
input_shape, filter_shape, self.default_subsamples,
border_mode, self.default_filters_dilations)
if not all(o >= min_o for (o, min_o) in zip(output_shape, min_output_shape)):
continue
for provide_shape in self.provide_shape:
yield (self.tcase_gi,
input_shape,
filter_shape,
output_shape,
self.default_subsamples,
border_mode,
True,
provide_shape,
self.default_filters_dilations)
def run_fwd(self, inputs_shape, filters_shape, def run_fwd(self, inputs_shape, filters_shape,
conv_fn=conv.conv3d, conv_op=conv.AbstractConv3d, conv_fn=conv.conv3d, conv_op=conv.AbstractConv3d,
ref=conv3d_corr, **kwargs): ref=conv3d_corr, **kwargs):
...@@ -669,6 +762,17 @@ class TestCorrConv3d(BaseTestConv3d): ...@@ -669,6 +762,17 @@ class TestCorrConv3d(BaseTestConv3d):
filter_flip=flip, target_op=Corr3dMM_gradInputs, filter_flip=flip, target_op=Corr3dMM_gradInputs,
check_trace=True, filter_dilation=fd) check_trace=True, filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1)):
# This test can run even when theano.config.blas.ldflags is empty.
if (not theano.config.cxx or
theano.config.mode == "FAST_COMPILE"):
raise SkipTest("Need blas to test conv3d")
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s, verify_grad=True,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=Corr3dMM_gradInputs,
check_trace=True, filter_dilation=fd)
class TestCpuConv3d(BaseTestConv3d): class TestCpuConv3d(BaseTestConv3d):
@classmethod @classmethod
...@@ -770,6 +874,23 @@ class TestCpuConv3d(BaseTestConv3d): ...@@ -770,6 +874,23 @@ class TestCpuConv3d(BaseTestConv3d):
check_trace=True, check_trace=True,
filter_dilation=fd) filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1)):
if fd != (1, 1, 1):
raise SkipTest("No dilation implementation for basic cpu Conv3D.")
mode = self.mode
if b not in ((0, 0, 0), 'valid'):
return
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=ConvTransp3D,
check_trace=True,
filter_dilation=fd)
def test_constant_shapes(): def test_constant_shapes():
# Check that the `imshp` and `kshp` parameters of the AbstractConv Ops # Check that the `imshp` and `kshp` parameters of the AbstractConv Ops
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论