提交 59a17284 authored 作者: Gijs van Tulder's avatar Gijs van Tulder

Brute-force test for inconsistent conv shapes.

上级 0e40d582
from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises
import numpy
......@@ -49,17 +50,30 @@ class TestDnnConv2d(test_abstract_conv.BaseTestConv2d):
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1), expect_error=False):
if not dnn_available(test_ctx_name):
raise SkipTest(dnn_available.msg)
if fd != (1, 1):
raise SkipTest("Doesn't have CUDNN implementation")
mode = mode_with_gpu
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI)
if not expect_error:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI,
filter_dilation=fd)
else:
assert_raises((RuntimeError, ValueError),
self.run_gradinput,
inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI,
ref=None,
filter_dilation=fd)
class TestDnnConv3d(test_abstract_conv.BaseTestConv3d):
......@@ -94,17 +108,30 @@ class TestDnnConv3d(test_abstract_conv.BaseTestConv3d):
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1)):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1), expect_error=False):
if not dnn_available(test_ctx_name):
raise SkipTest(dnn_available.msg)
if fd != (1, 1, 1):
raise SkipTest("Doesn't have CUDNN implementation")
mode = mode_with_gpu
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI)
if not expect_error:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI,
filter_dilation=fd)
else:
assert_raises((RuntimeError, ValueError),
self.run_gradinput,
inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI,
ref=None,
filter_dilation=fd)
class TestCorrMMConv2d(test_abstract_conv.BaseTestConv2d):
......@@ -139,15 +166,27 @@ class TestCorrMMConv2d(test_abstract_conv.BaseTestConv2d):
target_op=GpuCorrMM_gradInputs,
filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1), expect_error=False):
mode = self.mode
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradInputs,
filter_dilation=fd)
if not expect_error:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradInputs,
filter_dilation=fd)
else:
assert_raises(ValueError,
self.run_gradinput,
inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradInputs,
ref=None,
filter_dilation=fd)
class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d):
......@@ -182,15 +221,27 @@ class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d):
target_op=GpuCorr3dMM_gradInputs,
filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1)):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1), expect_error=False):
mode = self.mode
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorr3dMM_gradInputs,
filter_dilation=fd)
if not expect_error:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorr3dMM_gradInputs,
filter_dilation=fd)
else:
assert_raises(ValueError,
self.run_gradinput,
inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorr3dMM_gradInputs,
ref=None,
filter_dilation=fd)
class TestDnnConvTypes(test_abstract_conv.TestConvTypes):
......
......@@ -13,6 +13,7 @@ from theano.sandbox.cuda.blas import (
GpuCorrMM, GpuCorrMM_gradWeights, GpuCorrMM_gradInputs,
GpuCorr3dMM, GpuCorr3dMM_gradWeights, GpuCorr3dMM_gradInputs)
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises
import theano.sandbox.cuda as cuda
if not cuda.cuda_available:
......@@ -57,18 +58,30 @@ class TestDnnConv2d(test_abstract_conv.BaseTestConv2d):
filter_flip=flip, target_op=GpuDnnConvGradI,
filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1), expect_error=False):
if fd != (1, 1):
raise SkipTest("No dilation implementation for cuDNN ConvOp.")
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
mode = mode_with_gpu
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI,
filter_dilation=fd)
if not expect_error:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI,
filter_dilation=fd)
else:
assert_raises((RuntimeError, ValueError),
self.run_gradinput,
inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI,
ref=None,
filter_dilation=fd)
class TestDnnConv3d(test_abstract_conv.BaseTestConv3d):
......@@ -104,18 +117,30 @@ class TestDnnConv3d(test_abstract_conv.BaseTestConv3d):
filter_flip=flip, target_op=GpuDnnConv3dGradI,
filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1)):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1), expect_error=False):
if fd != (1, 1, 1):
raise SkipTest("No dilation implementation for cuDNN ConvOp.")
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
mode = mode_with_gpu
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI,
filter_dilation=fd)
if not expect_error:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI,
filter_dilation=fd)
else:
assert_raises((RuntimeError, ValueError),
self.run_gradinput,
inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI,
ref=None,
filter_dilation=fd)
class TestCorrMMConv2d(test_abstract_conv.BaseTestConv2d):
......@@ -150,15 +175,27 @@ class TestCorrMMConv2d(test_abstract_conv.BaseTestConv2d):
target_op=GpuCorrMM_gradInputs,
filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1), expect_error=False):
mode = self.mode
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradInputs,
filter_dilation=fd)
if not expect_error:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradInputs,
filter_dilation=fd)
else:
assert_raises(ValueError,
self.run_gradinput,
inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradInputs,
ref=None,
filter_dilation=fd)
class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d):
......@@ -193,15 +230,27 @@ class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d):
target_op=GpuCorr3dMM_gradInputs,
filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1)):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1), expect_error=False):
mode = self.mode
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorr3dMM_gradInputs,
filter_dilation=fd)
if not expect_error:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorr3dMM_gradInputs,
filter_dilation=fd)
else:
assert_raises(ValueError,
self.run_gradinput,
inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorr3dMM_gradInputs,
ref=None,
filter_dilation=fd)
class TestDnnConvTypes(test_abstract_conv.TestConvTypes):
......
......@@ -392,11 +392,16 @@ class BaseTestConv(object):
imshp=imshp, kshp=kshp,
filter_dilation=filter_dilation)
c = c(filters, output, inputs_shape[2:])
c_ref = ref(filters, output, inputs_shape,
border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode, filter_dilation=filter_dilation)
f = theano.function([], c, mode=mode)
f_ref = theano.function([], c_ref, mode='FAST_RUN')
# ref is set to None for the inconsistent-shape tests.
# The reference function also raises an exception, which would
# mask the exception generated by the target implementation.
if ref is not None:
c_ref = ref(filters, output, inputs_shape,
border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode, filter_dilation=filter_dilation)
f_ref = theano.function([], c_ref, mode='FAST_RUN')
if target_op is not None:
assert any([isinstance(n.op, target_op) for n
......@@ -404,9 +409,11 @@ class BaseTestConv(object):
if check_trace:
assert_true(check_stack_trace(f, ops_to_check=target_op))
res_ref = numpy.array(f_ref())
res = numpy.array(f())
utt.assert_allclose(res_ref, res)
if ref is not None:
res_ref = numpy.array(f_ref())
utt.assert_allclose(res_ref, res)
def abstract_conv_gradinputs(filters_val, output_val):
conv_op = gradInputs_fn(border_mode=border_mode,
......@@ -482,6 +489,33 @@ class BaseTestConv2d(BaseTestConv):
provide_shape,
self.default_filters_dilations)
def test_gradinput_impossible_output_shapes(self):
for i in range(1, 20):
for k in range(1, 10):
for border_mode in ('valid', 'half', 'full', (0, 2)):
for s in (1, 2, 3):
for d in (1, 2, 3):
image_shape = (1, 1, i, i)
kernel_shape = (1, 1, k, k)
# compute the output that these inputs and parameters would produce
computed_shape = get_conv_output_shape(
image_shape, kernel_shape, border_mode, (s, s), (d, d))
# outputs that are too large or too small should be rejected
for o in (-3, -2, -1, 1, 2, 3):
output_shape = (1, 1, computed_shape[2] + o, computed_shape[3] + o)
yield (self.tcase_gi,
image_shape,
kernel_shape,
output_shape,
(s, s),
border_mode,
True,
True,
(d, d),
True)
def run_fwd(self, inputs_shape, filters_shape,
conv_fn=conv.conv2d, conv_op=conv.AbstractConv2d,
ref=conv2d_corr, **kwargs):
......@@ -541,16 +575,25 @@ class TestCorrConv2d(BaseTestConv2d):
filter_flip=flip, target_op=CorrMM_gradInputs,
check_trace=True, filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1), expect_error=False):
# This tests can run even when theano.config.blas.ldflags is empty.
if (not theano.config.cxx or
theano.config.mode == "FAST_COMPILE"):
raise SkipTest("Need blas to test conv2d")
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s, verify_grad=True,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=CorrMM_gradInputs,
check_trace=True, filter_dilation=fd)
if not expect_error:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s, verify_grad=True,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=CorrMM_gradInputs,
check_trace=True, filter_dilation=fd)
else:
assert_raises(ValueError,
self.run_gradinput,
inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s, verify_grad=False,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=CorrMM_gradInputs,
ref=None, check_trace=True, filter_dilation=fd)
class TestAbstractConvNoOptim(BaseTestConv2d):
......@@ -591,14 +634,24 @@ class TestAbstractConvNoOptim(BaseTestConv2d):
check_trace=True, filter_dilation=fd,
mode=mode)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1), expect_error=False):
mode = theano.Mode(optimizer=None)
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s, verify_grad=True,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=None,
check_trace=True, filter_dilation=fd,
mode=mode)
if not expect_error:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s, verify_grad=True,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=None,
check_trace=True, filter_dilation=fd,
mode=mode)
else:
assert_raises(ValueError,
self.run_gradinput,
inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s, verify_grad=False,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=None,
check_trace=True, filter_dilation=fd,
ref=None, mode=mode)
class TestCpuConv2d(BaseTestConv2d):
......@@ -715,7 +768,7 @@ class TestCpuConv2d(BaseTestConv2d):
check_trace=True,
filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1)):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1), expect_error=False):
if fd != (1, 1):
raise SkipTest("No dilation implementation for basic cpu ConvOp.")
mode = self.mode
......@@ -729,14 +782,19 @@ class TestCpuConv2d(BaseTestConv2d):
if ((s[0] not in (1, 2)) or (s[1] not in (1, 2))) and (b == 'full'):
return
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=(ConvOp, ConvTransp3D),
check_trace=True,
filter_dilation=fd)
if not expect_error:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=(ConvOp, ConvTransp3D),
check_trace=True,
filter_dilation=fd)
else:
# we do not check for inconsistent shapes,
# because this older implementation does not check that
raise SkipTest('Inconsistent shapes are not tested for old cpu ConvOp.')
class BaseTestConv3d(BaseTestConv):
......@@ -781,6 +839,34 @@ class BaseTestConv3d(BaseTestConv):
provide_shape,
self.default_filters_dilations)
def test_gradinput_impossible_output_shapes(self):
for i in range(1, 20):
for k in range(1, 10):
for border_mode in ('valid', 'half', 'full', (0, 2, 1)):
for s in (1, 2, 3):
for d in (1, 2, 3):
image_shape = (1, 1, i, i, i)
kernel_shape = (1, 1, k, k, k)
# compute the output that these inputs and parameters would produce
computed_shape = get_conv_output_shape(
image_shape, kernel_shape, border_mode, (s, s, s), (d, d, d))
# outputs that are too large or too small should be rejected
for o in (-3, -2, -1, 1, 2, 3):
output_shape = (1, 1, computed_shape[2] + o,
computed_shape[3] + o, computed_shape[4] + o)
yield (self.tcase_gi,
image_shape,
kernel_shape,
output_shape,
(s, s, s),
border_mode,
True,
True,
(d, d, d),
True)
def run_fwd(self, inputs_shape, filters_shape,
conv_fn=conv.conv3d, conv_op=conv.AbstractConv3d,
ref=conv3d_corr, **kwargs):
......@@ -840,16 +926,25 @@ class TestCorrConv3d(BaseTestConv3d):
filter_flip=flip, target_op=Corr3dMM_gradInputs,
check_trace=True, filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1)):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1), expect_error=False):
# This test can run even when theano.config.blas.ldflags is empty.
if (not theano.config.cxx or
theano.config.mode == "FAST_COMPILE"):
raise SkipTest("Need blas to test conv3d")
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s, verify_grad=True,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=Corr3dMM_gradInputs,
check_trace=True, filter_dilation=fd)
if not expect_error:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s, verify_grad=True,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=Corr3dMM_gradInputs,
check_trace=True, filter_dilation=fd)
else:
assert_raises(ValueError,
self.run_gradinput,
inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s, verify_grad=False,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=Corr3dMM_gradInputs,
ref=None, check_trace=True, filter_dilation=fd)
class TestCpuConv3d(BaseTestConv3d):
......@@ -952,7 +1047,7 @@ class TestCpuConv3d(BaseTestConv3d):
check_trace=True,
filter_dilation=fd)
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1)):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1), expect_error=False):
if fd != (1, 1, 1):
raise SkipTest("No dilation implementation for basic cpu Conv3D.")
mode = self.mode
......@@ -960,14 +1055,19 @@ class TestCpuConv3d(BaseTestConv3d):
if b not in ((0, 0, 0), 'valid'):
return
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=ConvTransp3D,
check_trace=True,
filter_dilation=fd)
if not expect_error:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=ConvTransp3D,
check_trace=True,
filter_dilation=fd)
else:
# we do not check for inconsistent shapes,
# because this older implementation does not check that
raise SkipTest('Inconsistent shapes are not tested for old cpu Conv3D.')
def test_constant_shapes():
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论