提交 e97223be authored 作者: Gabe Schwartz's avatar Gabe Schwartz

Updated cudnn tests to always handle dilation.

上级 6217e848
...@@ -641,7 +641,7 @@ class GpuDnnConv(DnnBase): ...@@ -641,7 +641,7 @@ class GpuDnnConv(DnnBase):
return [[1], [1], [1], [0], [1], [1]] return [[1], [1], [1], [0], [1], [1]]
@staticmethod @staticmethod
def get_out_shape(ishape, kshape, border_mode, subsample): def get_out_shape(ishape, kshape, border_mode, subsample, dilation):
""" """
This function computes the output shape for a convolution with This function computes the output shape for a convolution with
the specified parameters. `ishape` and `kshape` can be symbolic the specified parameters. `ishape` and `kshape` can be symbolic
...@@ -660,7 +660,8 @@ class GpuDnnConv(DnnBase): ...@@ -660,7 +660,8 @@ class GpuDnnConv(DnnBase):
ishape, ishape,
kshape, kshape,
border_mode, border_mode,
subsample) subsample,
dilation)
def infer_shape(self, node, shape): def infer_shape(self, node, shape):
return [shape[2]] return [shape[2]]
......
...@@ -13,7 +13,7 @@ import theano.tensor as T ...@@ -13,7 +13,7 @@ import theano.tensor as T
import theano.tests.unittest_tools as utt import theano.tests.unittest_tools as utt
from theano.tensor.signal.pool import pool_2d, pool_3d from theano.tensor.signal.pool import pool_2d, pool_3d
from theano.tensor.signal.pool import Pool, MaxPoolGrad, AveragePoolGrad from theano.tensor.signal.pool import Pool, MaxPoolGrad, AveragePoolGrad
from theano.tensor.nnet.abstract_conv import get_conv_output_shape from theano.tensor.nnet.abstract_conv import get_conv_output_shape, get_conv_gradinputs_shape
from theano.tensor.nnet import bn from theano.tensor.nnet import bn
from .. import dnn from .. import dnn
...@@ -45,9 +45,9 @@ def test_dnn_conv_desc_merge(): ...@@ -45,9 +45,9 @@ def test_dnn_conv_desc_merge():
raise SkipTest(dnn.dnn_available.msg) raise SkipTest(dnn.dnn_available.msg)
kern_shp = T.as_tensor_variable( kern_shp = T.as_tensor_variable(
np.asarray([3, 1, 2, 2]).astype('int64')) np.asarray([3, 1, 2, 2]).astype('int64'))
desc1 = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(2, 2), desc1 = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(2, 2), dilation=(1, 1),
conv_mode='conv')(kern_shp) conv_mode='conv')(kern_shp)
desc2 = dnn.GpuDnnConvDesc(border_mode='full', subsample=(1, 1), desc2 = dnn.GpuDnnConvDesc(border_mode='full', subsample=(1, 1), dilation=(1, 1),
conv_mode='cross')(kern_shp) conv_mode='cross')(kern_shp)
# CDataType is not DeepCopyable so this will crash if we don't use # CDataType is not DeepCopyable so this will crash if we don't use
# borrow=True # borrow=True
...@@ -601,32 +601,35 @@ class TestDnnInferShapes(utt.InferShapeTester): ...@@ -601,32 +601,35 @@ class TestDnnInferShapes(utt.InferShapeTester):
dnn.GpuDnnSoftmaxGrad dnn.GpuDnnSoftmaxGrad
) )
def _test_conv(self, img, kerns, out, img_val, kern_vals, border_mode, conv_mode, subsamples, algo): def _test_conv(self, img, kerns, out, img_val, kern_vals, border_mode, conv_mode, subsamples, dilations, algo):
if not dnn.dnn_available(test_ctx_name): if not dnn.dnn_available(test_ctx_name):
raise SkipTest(dnn.dnn_available.msg) raise SkipTest(dnn.dnn_available.msg)
img_val = np.asarray(img_val, dtype=theano.config.floatX) img_val = np.asarray(img_val, dtype=theano.config.floatX)
kern_vals = np.asarray(kern_vals, dtype=theano.config.floatX) kern_vals = np.asarray(kern_vals, dtype=theano.config.floatX)
for subsample in subsamples: for dilation in dilations:
out_vals = np.zeros( for subsample in subsamples:
dnn.GpuDnnConv.get_out_shape(img_val.shape, kern_vals.shape, out_vals = np.zeros(
border_mode=border_mode, dnn.GpuDnnConv.get_out_shape(img_val.shape, kern_vals.shape,
subsample=subsample), border_mode=border_mode,
dtype=theano.config.floatX) subsample=subsample,
desc = dnn.GpuDnnConvDesc( dilation=dilation),
border_mode=border_mode, dtype=theano.config.floatX)
subsample=subsample, desc = dnn.GpuDnnConvDesc(
conv_mode=conv_mode, border_mode=border_mode,
precision=set_precision(theano.config.floatX) subsample=subsample,
)(kerns.shape) dilation=dilation,
conv = dnn.GpuDnnConv(algo=algo)(img, kerns, out, desc) conv_mode=conv_mode,
self._compile_and_check( precision=set_precision(theano.config.floatX)
[img, kerns, out], )(kerns.shape)
[conv], conv = dnn.GpuDnnConv(algo=algo)(img, kerns, out, desc)
[img_val, kern_vals, out_vals], self._compile_and_check(
dnn.GpuDnnConv [img, kerns, out],
) [conv],
[img_val, kern_vals, out_vals],
dnn.GpuDnnConv
)
@parameterized.expand(chain(product([SUPPORTED_DNN_CONV_ALGO_FWD[0]], @parameterized.expand(chain(product([SUPPORTED_DNN_CONV_ALGO_FWD[0]],
border_modes, border_modes,
...@@ -636,18 +639,25 @@ class TestDnnInferShapes(utt.InferShapeTester): ...@@ -636,18 +639,25 @@ class TestDnnInferShapes(utt.InferShapeTester):
[conv_modes[0]])), [conv_modes[0]])),
testcase_func_name=utt.custom_name_func) testcase_func_name=utt.custom_name_func)
def test_conv(self, algo, border_mode, conv_mode): def test_conv(self, algo, border_mode, conv_mode):
# Currently only CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM (algo 'none')
# supports dilation > 1.
dilations = [(1, 1), (2, 2)] if algo == "none" else [(1, 1)]
self._test_conv(T.tensor4('img'), self._test_conv(T.tensor4('img'),
T.tensor4('kerns'), T.tensor4('kerns'),
T.tensor4('out'), T.tensor4('out'),
np.random.rand(7, 2, 8, 4), np.random.rand(7, 2, 12, 16),
np.random.rand(8, 2, 4, 3), np.random.rand(8, 2, 4, 3),
border_mode, border_mode,
conv_mode, conv_mode,
[(1, 1), (2, 2)], [(1, 1), (2, 2)],
dilations,
algo) algo)
@parameterized.expand(product(border_modes, conv_modes), utt.custom_name_func) @parameterized.expand(product(border_modes, conv_modes), utt.custom_name_func)
def test_conv3d_none(self, border_mode, conv_mode): def test_conv3d_none(self, border_mode, conv_mode):
# CUDNN docs don't say that 3D conv can't handle dilation, but it returns
# CUDNN_STATUS_NOT_SUPPORTED if you try it.
self._test_conv(T.tensor5('img'), self._test_conv(T.tensor5('img'),
T.tensor5('kerns'), T.tensor5('kerns'),
T.tensor5('out'), T.tensor5('out'),
...@@ -656,44 +666,49 @@ class TestDnnInferShapes(utt.InferShapeTester): ...@@ -656,44 +666,49 @@ class TestDnnInferShapes(utt.InferShapeTester):
border_mode, border_mode,
conv_mode, conv_mode,
[(1, 1, 1), (2, 2, 2)], [(1, 1, 1), (2, 2, 2)],
[(1, 1, 1)],
'none') 'none')
def _test_conv_gradw(self, img, topgrad, kerns, img_shape, kerns_shape, border_mode, conv_mode, subsample): def _test_conv_gradw(self, img, topgrad, kerns, img_shape, kerns_shape, border_mode, conv_mode, subsamples, dilations):
if not dnn.dnn_available(test_ctx_name): if not dnn.dnn_available(test_ctx_name):
raise SkipTest(dnn.dnn_available.msg) raise SkipTest(dnn.dnn_available.msg)
topgrad_shape = get_conv_output_shape(img_shape, kerns_shape, kerns_vals = np.zeros(kerns_shape, dtype=theano.config.floatX)
border_mode, subsample) kerns_shape_shared = theano.shared(np.asarray(kerns_shape))
img_val = np.asarray( for dilation in dilations:
np.random.rand(*img_shape), for subsample in subsamples:
dtype=theano.config.floatX topgrad_shape = get_conv_output_shape(img_shape, kerns_shape,
) border_mode, subsample, dilation)
topgrad_vals = np.asarray(
np.random.rand(*topgrad_shape),
dtype=theano.config.floatX
)
kerns_vals = np.zeros(kerns_shape, dtype=theano.config.floatX) img_val = np.asarray(
kerns_shape = theano.shared(np.asarray(kerns_shape)) np.random.rand(*img_shape),
desc = dnn.GpuDnnConvDesc( dtype=theano.config.floatX
border_mode=border_mode, )
subsample=subsample, topgrad_vals = np.asarray(
conv_mode=conv_mode, np.random.rand(*topgrad_shape),
precision=set_precision(theano.config.floatX) dtype=theano.config.floatX
)(kerns_shape) )
conv_grad_w = dnn.GpuDnnConvGradW()(
img, desc = dnn.GpuDnnConvDesc(
topgrad, border_mode=border_mode,
kerns, subsample=subsample,
desc, dilation=dilation,
) conv_mode=conv_mode,
self._compile_and_check( precision=set_precision(theano.config.floatX)
[img, topgrad, kerns], )(kerns_shape_shared)
[conv_grad_w], conv_grad_w = dnn.GpuDnnConvGradW()(
[img_val, topgrad_vals, kerns_vals], img,
dnn.GpuDnnConvGradW topgrad,
) kerns,
desc,
)
self._compile_and_check(
[img, topgrad, kerns],
[conv_grad_w],
[img_val, topgrad_vals, kerns_vals],
dnn.GpuDnnConvGradW
)
@parameterized.expand(product(border_modes, conv_modes), utt.custom_name_func) @parameterized.expand(product(border_modes, conv_modes), utt.custom_name_func)
def test_conv_gradw(self, border_mode, conv_mode): def test_conv_gradw(self, border_mode, conv_mode):
...@@ -704,7 +719,8 @@ class TestDnnInferShapes(utt.InferShapeTester): ...@@ -704,7 +719,8 @@ class TestDnnInferShapes(utt.InferShapeTester):
(1, 2, 3, 7), (1, 2, 3, 7),
border_mode, border_mode,
conv_mode, conv_mode,
(1, 1)) [(1, 1)],
[(1, 1), (2, 2)])
def test_conv_gradi(self): def test_conv_gradi(self):
if not dnn.dnn_available(test_ctx_name): if not dnn.dnn_available(test_ctx_name):
...@@ -713,29 +729,27 @@ class TestDnnInferShapes(utt.InferShapeTester): ...@@ -713,29 +729,27 @@ class TestDnnInferShapes(utt.InferShapeTester):
kerns = T.tensor4('kerns') kerns = T.tensor4('kerns')
out = T.tensor4('out') out = T.tensor4('out')
kern_vals = np.asarray( kern_vals = np.asarray(
np.random.rand(13, 14, 15, 16), np.random.rand(13, 4, 5, 6),
dtype=theano.config.floatX dtype=theano.config.floatX
) )
out_vals = np.asarray( out_vals = np.asarray(
np.random.rand(3, 13, 5, 6), np.random.rand(3, 13, 9, 11),
dtype=theano.config.floatX dtype=theano.config.floatX
) )
for params in product( for border_mode, subsample, dilation, conv_mode in product(
['valid'], # Should this work for 'full'? ['valid', 'full'],
[(1, 1)], [(1, 1)],
[(1, 1), (2, 2)],
['conv', 'cross'] ['conv', 'cross']
): ):
shape = ( shape = get_conv_gradinputs_shape(kern_vals.shape, out_vals.shape, border_mode, subsample, dilation)
out_vals.shape[0], kern_vals.shape[1],
out_vals.shape[2] + kern_vals.shape[2] - 1,
out_vals.shape[3] + kern_vals.shape[3] - 1
)
img_vals = np.zeros(shape, dtype=theano.config.floatX) img_vals = np.zeros(shape, dtype=theano.config.floatX)
desc = dnn.GpuDnnConvDesc( desc = dnn.GpuDnnConvDesc(
border_mode=params[0], border_mode=border_mode,
subsample=params[1], subsample=subsample,
conv_mode=params[2], dilation=dilation,
conv_mode=conv_mode,
precision=set_precision(theano.config.floatX) precision=set_precision(theano.config.floatX)
)(kerns.shape) )(kerns.shape)
conv_grad_i = dnn.GpuDnnConvGradI()( conv_grad_i = dnn.GpuDnnConvGradI()(
...@@ -981,18 +995,18 @@ def test_dnn_conv_grad(): ...@@ -981,18 +995,18 @@ def test_dnn_conv_grad():
iw - kw + 1)).astype(theano.config.floatX) iw - kw + 1)).astype(theano.config.floatX)
def dconv(img, kern, out): def dconv(img, kern, out):
desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1), desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1), dilation=(1, 1),
conv_mode='conv', precision=set_precision(theano.config.floatX))(kern.shape) conv_mode='conv', precision=set_precision(theano.config.floatX))(kern.shape)
return dnn.GpuDnnConv()(img, kern, out, desc, alpha=0.5, beta=0.75) return dnn.GpuDnnConv()(img, kern, out, desc, alpha=0.5, beta=0.75)
def dconvi(img, kern, out): def dconvi(img, kern, out):
desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1), desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1), dilation=(1, 1),
conv_mode='conv', precision=set_precision(theano.config.floatX))(kern.shape) conv_mode='conv', precision=set_precision(theano.config.floatX))(kern.shape)
return dnn.GpuDnnConvGradI()(kern, out, img, desc, alpha=-1.0, return dnn.GpuDnnConvGradI()(kern, out, img, desc, alpha=-1.0,
beta=0.0) beta=0.0)
def dconvw(img, kern, out): def dconvw(img, kern, out):
desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1), desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1), dilation=(1, 1),
conv_mode='conv', precision=set_precision(theano.config.floatX))(kern.shape) conv_mode='conv', precision=set_precision(theano.config.floatX))(kern.shape)
return dnn.GpuDnnConvGradW()(img, out, kern, desc, alpha=0.75, return dnn.GpuDnnConvGradW()(img, out, kern, desc, alpha=0.75,
beta=-1.0) beta=-1.0)
...@@ -1004,29 +1018,29 @@ def test_dnn_conv_grad(): ...@@ -1004,29 +1018,29 @@ def test_dnn_conv_grad():
def get_conv3d_test_cases(): def get_conv3d_test_cases():
# Every element of test_shapes follows the format # Every element of test_shapes follows the format
# [input_shape, filter_shape, subsample] # [input_shape, filter_shape, subsample, dilation]
test_shapes = [[(128, 3, 5, 5, 5), (64, 3, 1, 2, 4), (1, 1, 1)], test_shapes = [[(128, 3, 5, 5, 5), (64, 3, 1, 2, 4), (1, 1, 1), (1, 1, 1)],
[(8, 4, 20, 12, 15), (5, 4, 6, 12, 4), (2, 2, 2)], [(8, 4, 20, 12, 15), (5, 4, 6, 12, 4), (2, 2, 2), (1, 1, 1)],
[(8, 1, 20, 12, 15), (5, 1, 6, 12, 4), (3, 3, 3)], [(8, 1, 20, 12, 15), (5, 1, 6, 12, 4), (3, 3, 3), (1, 1, 1)],
[(8, 1, 20, 12, 15), (5, 1, 6, 12, 4), (3, 2, 1)], [(8, 1, 20, 12, 15), (5, 1, 6, 12, 4), (3, 2, 1), (1, 1, 1)],
# Test with 1x1x1 filters # Test with 1x1x1 filters
[(8, 1, 10, 10, 10), (10, 1, 1, 1, 1), (1, 1, 1)], [(8, 1, 10, 10, 10), (10, 1, 1, 1, 1), (1, 1, 1), (1, 1, 1)],
# Test with dimensions larger than 1024 (thread block dim) # Test with dimensions larger than 1024 (thread block dim)
[(1025, 1, 2, 3, 4), (5, 1, 1, 2, 3), (1, 1, 1)], [(1025, 1, 2, 3, 4), (5, 1, 1, 2, 3), (1, 1, 1), (1, 1, 1)],
[(8, 1, 2, 3, 4), (1025, 1, 1, 2, 3), (1, 1, 1)], [(8, 1, 2, 3, 4), (1025, 1, 1, 2, 3), (1, 1, 1), (1, 1, 1)],
[(8, 1025, 2, 3, 4), (5, 1025, 1, 1, 2), (1, 1, 1)], [(8, 1025, 2, 3, 4), (5, 1025, 1, 1, 2), (1, 1, 1), (1, 1, 1)],
[(8, 1, 1030, 3, 4), (5, 1, 1025, 1, 1), (1, 1, 1)], [(8, 1, 1030, 3, 4), (5, 1, 1025, 1, 1), (1, 1, 1), (1, 1, 1)],
[(8, 1, 2, 1030, 4), (5, 1, 2, 1025, 1), (1, 1, 1)], [(8, 1, 2, 1030, 4), (5, 1, 2, 1025, 1), (1, 1, 1), (1, 1, 1)],
[(8, 1, 2, 3, 1030), (5, 1, 1, 2, 1025), (1, 1, 1)], [(8, 1, 2, 3, 1030), (5, 1, 1, 2, 1025), (1, 1, 1), (1, 1, 1)],
# The equivalent of this caused a crash with conv2d # The equivalent of this caused a crash with conv2d
[(1, 1, 1, 44800, 1), (6, 1, 1, 1, 1), (1, 1, 1)]] [(1, 1, 1, 44800, 1), (6, 1, 1, 1, 1), (1, 1, 1), (1, 1, 1)]]
# With border mode 'full', test with kernel bigger than image in some/all # With border mode 'full', test with kernel bigger than image in some/all
# dimensions # dimensions
test_shapes_full = [[(6, 2, 2, 2, 2), (4, 2, 3, 1, 1), (1, 1, 1)], test_shapes_full = [[(6, 2, 2, 2, 2), (4, 2, 3, 1, 1), (1, 1, 1), (1, 1, 1)],
[(6, 2, 2, 2, 2), (4, 2, 1, 3, 1), (1, 1, 1)], [(6, 2, 2, 2, 2), (4, 2, 1, 3, 1), (1, 1, 1), (1, 1, 1)],
[(6, 2, 2, 2, 2), (4, 2, 1, 1, 3), (1, 1, 1)], [(6, 2, 2, 2, 2), (4, 2, 1, 1, 3), (1, 1, 1), (1, 1, 1)],
[(6, 2, 2, 2, 2), (4, 2, 5, 5, 5), (1, 1, 1)]] [(6, 2, 2, 2, 2), (4, 2, 5, 5, 5), (1, 1, 1), (1, 1, 1)]]
border_modes = ['valid', 'full', 'half', (1, 2, 3), (3, 2, 1), 1, 2] border_modes = ['valid', 'full', 'half', (1, 2, 3), (3, 2, 1), 1, 2]
conv_modes = ['conv', 'cross'] conv_modes = ['conv', 'cross']
...@@ -1043,7 +1057,7 @@ def test_conv3d_fwd(): ...@@ -1043,7 +1057,7 @@ def test_conv3d_fwd():
utt.seed_rng() utt.seed_rng()
def run_conv3d_fwd(inputs_shape, filters_shape, subsample, def run_conv3d_fwd(inputs_shape, filters_shape, subsample,
border_mode, conv_mode): dilation, border_mode, conv_mode):
inputs_val = np.random.random(inputs_shape).astype(theano.config.floatX) inputs_val = np.random.random(inputs_shape).astype(theano.config.floatX)
filters_val = np.random.random(filters_shape).astype(theano.config.floatX) filters_val = np.random.random(filters_shape).astype(theano.config.floatX)
...@@ -1059,6 +1073,7 @@ def test_conv3d_fwd(): ...@@ -1059,6 +1073,7 @@ def test_conv3d_fwd():
# Compile a theano function for the cuDNN implementation # Compile a theano function for the cuDNN implementation
conv = dnn.dnn_conv3d(img=inputs, kerns=filters, conv = dnn.dnn_conv3d(img=inputs, kerns=filters,
border_mode=border_mode, subsample=subsample, border_mode=border_mode, subsample=subsample,
dilation=dilation,
conv_mode=conv_mode) conv_mode=conv_mode)
f = theano.function([], conv, mode=mode_with_gpu) f = theano.function([], conv, mode=mode_with_gpu)
...@@ -1071,7 +1086,8 @@ def test_conv3d_fwd(): ...@@ -1071,7 +1086,8 @@ def test_conv3d_fwd():
# Compile a theano function for the reference implementation # Compile a theano function for the reference implementation
conv_ref = theano.tensor.nnet.corr3d.Corr3dMM(border_mode=border_mode, conv_ref = theano.tensor.nnet.corr3d.Corr3dMM(border_mode=border_mode,
subsample=subsample subsample=subsample,
filter_dilation=dilation,
)(ref_cast(inputs), flipped_filters) )(ref_cast(inputs), flipped_filters)
f_ref = theano.function([], conv_ref, mode="FAST_RUN") f_ref = theano.function([], conv_ref, mode="FAST_RUN")
...@@ -1086,8 +1102,8 @@ def test_conv3d_fwd(): ...@@ -1086,8 +1102,8 @@ def test_conv3d_fwd():
utt.assert_allclose(res_ref, res, rtol=rtol) utt.assert_allclose(res_ref, res, rtol=rtol)
test_cases = get_conv3d_test_cases() test_cases = get_conv3d_test_cases()
for (i_shape, f_shape, subsample), border_mode, conv_mode in test_cases: for (i_shape, f_shape, subsample, dilation), border_mode, conv_mode in test_cases:
yield (run_conv3d_fwd, i_shape, f_shape, subsample, border_mode, yield (run_conv3d_fwd, i_shape, f_shape, subsample, dilation, border_mode,
conv_mode) conv_mode)
...@@ -1098,7 +1114,7 @@ def test_conv3d_bwd(): ...@@ -1098,7 +1114,7 @@ def test_conv3d_bwd():
utt.seed_rng() utt.seed_rng()
def run_conv3d_bwd(inputs_shape, filters_shape, subsample, def run_conv3d_bwd(inputs_shape, filters_shape, subsample,
border_mode, conv_mode): dilation, border_mode, conv_mode):
inputs_val = np.random.random(inputs_shape).astype(theano.config.floatX) inputs_val = np.random.random(inputs_shape).astype(theano.config.floatX)
filters_val = np.random.random(filters_shape).astype(theano.config.floatX) filters_val = np.random.random(filters_shape).astype(theano.config.floatX)
...@@ -1108,7 +1124,9 @@ def test_conv3d_bwd(): ...@@ -1108,7 +1124,9 @@ def test_conv3d_bwd():
# Compile a theano function for the cuDNN implementation # Compile a theano function for the cuDNN implementation
conv = dnn.dnn_conv3d(img=inputs, kerns=filters, conv = dnn.dnn_conv3d(img=inputs, kerns=filters,
border_mode=border_mode, subsample=subsample, border_mode=border_mode,
subsample=subsample,
dilation=dilation,
conv_mode=conv_mode) conv_mode=conv_mode)
grad_i, grad_w = theano.tensor.grad(conv.sum(), [inputs, filters]) grad_i, grad_w = theano.tensor.grad(conv.sum(), [inputs, filters])
...@@ -1124,7 +1142,8 @@ def test_conv3d_bwd(): ...@@ -1124,7 +1142,8 @@ def test_conv3d_bwd():
# Compile a theano function for the reference implementation # Compile a theano function for the reference implementation
conv_ref = theano.tensor.nnet.corr3d.Corr3dMM(border_mode=border_mode, conv_ref = theano.tensor.nnet.corr3d.Corr3dMM(border_mode=border_mode,
subsample=subsample subsample=subsample,
filter_dilation=dilation,
)(ref_cast(inputs), flipped_filters) )(ref_cast(inputs), flipped_filters)
(grad_i_ref, (grad_i_ref,
grad_w_ref) = theano.tensor.grad(conv_ref.sum(), grad_w_ref) = theano.tensor.grad(conv_ref.sum(),
...@@ -1144,8 +1163,8 @@ def test_conv3d_bwd(): ...@@ -1144,8 +1163,8 @@ def test_conv3d_bwd():
utt.assert_allclose(res_ref[1], res[1], rtol=rtol) utt.assert_allclose(res_ref[1], res[1], rtol=rtol)
test_cases = get_conv3d_test_cases() test_cases = get_conv3d_test_cases()
for (i_shape, f_shape, subsample), border_mode, conv_mode in test_cases: for (i_shape, f_shape, subsample, dilation), border_mode, conv_mode in test_cases:
yield (run_conv3d_bwd, i_shape, f_shape, subsample, border_mode, yield (run_conv3d_bwd, i_shape, f_shape, subsample, dilation, border_mode,
conv_mode) conv_mode)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论