提交 6c8f1a15 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Refactor how the parameters are iterated over to reduce the total time of the…

Refactor how the parameters are iterated over to reduce the total time of the test. (This covers less cases, but should be almost equivalent.)
上级 6f4a125d
......@@ -24,19 +24,14 @@ else:
class TestDnnConv2d(test_abstract_conv.TestConv2d):
def setUp(self):
super(TestDnnConv2d, self).setUp()
# provide_shape is not used by the CuDNN impementation
self.provide_shape = [False]
self.shared = gpu_shared
def test_dnn_conv(self):
def tcase(self, i, f, s, b, flip, provide_shape):
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
mode = mode_with_gpu
# provide_shape is not used by the CuDNN impementation
provide_shape = False
for (i, f), s, b, flip in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filter_flip):
o = self.get_output_shape(i, f, s, b)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode,
......@@ -58,16 +53,10 @@ class TestCorrMMConv2d(test_abstract_conv.TestConv2d):
def setUp(self):
super(TestCorrMMConv2d, self).setUp()
self.shared = gpu_shared
self.mode = mode_with_gpu.excluding('cudnn')
def test_gpucorrmm_conv(self):
mode = mode_with_gpu.excluding('cudnn')
for (i, f), s, b, flip, provide_shape in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filter_flip,
[False, True]):
def test_gpucorrmm_conv(self, i, f, s, b, flip, provide_shape):
mode = self.mode
o = self.get_output_shape(i, f, s, b)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode,
......
......@@ -15,19 +15,14 @@ class TestDnnConv2d(test_abstract_conv.TestConv2d):
def setUp(self):
super(TestDnnConv2d, self).setUp()
self.shared = gpuarray_shared_constructor
# provide_shape is not used by the CuDNN impementation
self.provide_shape = [False]
def test_dnn_conv(self):
def tcase(self, i, f, s, b, flip, provide_shape):
if not dnn_available(test_ctx_name):
raise SkipTest(dnn_available.msg)
mode = mode_with_gpu
# provide_shape is not used by the CuDNN impementation
provide_shape = False
for (i, f), s, b, flip in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filter_flip):
o = self.get_output_shape(i, f, s, b)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode,
......
......@@ -67,6 +67,7 @@ class TestConv2d(unittest.TestCase):
self.subsamples = [(1, 1), (2, 2), (2, 4)]
self.border_modes = ["valid", "full", (0, 0), (1, 1), (5, 5), (5, 2)]
self.filter_flip = [True, False]
self.provide_shape = [True, False]
self.shared = theano.compile.shared
def get_output_shape(self, inputs_shape, filters_shape, subsample,
......@@ -237,16 +238,24 @@ class TestConv2d(unittest.TestCase):
[filters_val, output_val],
mode=mode, eps=1)
def test_all(self):
ds = [0, 0]
db = (0, 0)
dflip = True in self.filter_flip
dprovide_shape = True in self.provide_shapes
for (i, f) in zip(self.inputs_shapes, self.filters_shapes):
for provide_shape in self.provide_shape:
self.tcase(i, f, ds, db, dflip, provide_shape)
for s in self.subsamples:
self.tcase(i, f, s, db, dflip, dprovide_shape)
for b in self.border_modes:
self.tcase(i, f, ds, b, dflip, dprovide_shape)
for flip in self.filter_flip:
self.tcase(i, f, ds, db, flip, dprovide_shape)
class TestCorrConv2d(TestConv2d):
def test_corrmm_conv(self):
for (i, f), s, b, flip, provide_shape in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filter_flip,
[False, True]):
class TestCorrConv2d(TestConv2d):
def tcase(self, i, f, s, b, flip, provide_shape):
o = self.get_output_shape(i, f, s, b)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, provide_shape=provide_shape,
......@@ -262,15 +271,11 @@ class TestCorrConv2d(TestConv2d):
class TestCpuConv2d(TestConv2d):
def test_cpu_conv(self):
mode = theano.compile.mode.get_default_mode().excluding('conv_gemm')
for (i, f), s, b, flip, provide_shape in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filter_flip,
[False, True]):
def setUp(self):
super(TestCpuConv2d, self).setUp()
self.mode = theano.compile.mode.get_default_mode().excluding('conv_gemm')
def tcase(self, i, f, s, b, flip, provide_shape):
mode = self.mode
o = self.get_output_shape(i, f, s, b)
fwd_OK = True
gradweight_OK = True
......@@ -296,9 +301,9 @@ class TestCpuConv2d(TestConv2d):
if fwd_OK:
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=ConvOp)
verify_grad=(gradweights_ok and gradinput_ok),
mode=mode, provide_shape=provide_shape,
border_mode=b, filter_flip=flip, target_op=ConvOp)
else:
self.assertRaises(NotImplementedError,
self.run_fwd,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论