提交 c776e6fa authored 作者: affanv14's avatar affanv14

make all grouped tests compatible superclass

上级 de38fdfc
...@@ -2290,11 +2290,11 @@ def dconv2di(border_mode, subsample, filter_dilation, num_groups): ...@@ -2290,11 +2290,11 @@ def dconv2di(border_mode, subsample, filter_dilation, num_groups):
class Cudnn_grouped_conv(Grouped_conv_noOptim): class Cudnn_grouped_conv(Grouped_conv_noOptim):
mode = mode_with_gpu mode = mode_with_gpu
conv2d = staticmethod(dconv2d) conv = staticmethod(dconv2d)
conv2d_gradw = staticmethod(dconv2dw) conv_gradw = staticmethod(dconv2dw)
conv2d_gradi = staticmethod(dconv2di) conv_gradi = staticmethod(dconv2di)
conv2d_op = dnn.GpuDnnConv conv_op = dnn.GpuDnnConv
conv2d_gradw_op = dnn.GpuDnnConvGradW conv_gradw_op = dnn.GpuDnnConvGradW
conv2d_gradi_op = dnn.GpuDnnConvGradI conv_gradi_op = dnn.GpuDnnConvGradI
flip_filter = False flip_filter = False
is_dnn = True is_dnn = True
...@@ -224,11 +224,11 @@ class TestCorrMM(unittest.TestCase): ...@@ -224,11 +224,11 @@ class TestCorrMM(unittest.TestCase):
class TestGroupGpuCorr2d(Grouped_conv_noOptim): class TestGroupGpuCorr2d(Grouped_conv_noOptim):
mode = theano.compile.get_mode("FAST_RUN") mode = theano.compile.get_mode("FAST_RUN")
conv2d = GpuCorrMM conv = GpuCorrMM
conv2d_gradw = GpuCorrMM_gradWeights conv_gradw = GpuCorrMM_gradWeights
conv2d_gradi = GpuCorrMM_gradInputs conv_gradi = GpuCorrMM_gradInputs
conv2d_op = GpuCorrMM conv_op = GpuCorrMM
conv2d_gradw_op = GpuCorrMM_gradWeights conv_gradw_op = GpuCorrMM_gradWeights
conv2d_gradi_op = GpuCorrMM_gradInputs conv_gradi_op = GpuCorrMM_gradInputs
flip_filter = True flip_filter = True
is_dnn = False is_dnn = False
...@@ -422,12 +422,12 @@ class TestGroupCorr2d(Grouped_conv_noOptim): ...@@ -422,12 +422,12 @@ class TestGroupCorr2d(Grouped_conv_noOptim):
mode = theano.compile.get_mode("FAST_RUN") mode = theano.compile.get_mode("FAST_RUN")
else: else:
mode = None mode = None
conv2d = corr.CorrMM conv = corr.CorrMM
conv2d_gradw = corr.CorrMM_gradWeights conv_gradw = corr.CorrMM_gradWeights
conv2d_gradi = corr.CorrMM_gradInputs conv_gradi = corr.CorrMM_gradInputs
conv2d_op = corr.CorrMM conv_op = corr.CorrMM
conv2d_gradw_op = corr.CorrMM_gradWeights conv_gradw_op = corr.CorrMM_gradWeights
conv2d_gradi_op = corr.CorrMM_gradInputs conv_gradi_op = corr.CorrMM_gradInputs
flip_filter = True flip_filter = True
is_dnn = False is_dnn = False
...@@ -440,13 +440,13 @@ class TestGroupCorr2d(Grouped_conv_noOptim): ...@@ -440,13 +440,13 @@ class TestGroupCorr2d(Grouped_conv_noOptim):
kern_sym = T.tensor4('kern') kern_sym = T.tensor4('kern')
# grouped convolution graph # grouped convolution graph
conv_group = self.conv2d(num_groups=groups)(bottom_sym, kern_sym) conv_group = self.conv(num_groups=groups)(bottom_sym, kern_sym)
gconv_func = theano.function([bottom_sym, kern_sym], conv_group, mode=self.mode) gconv_func = theano.function([bottom_sym, kern_sym], conv_group, mode=self.mode)
# Graph for the normal hard way # Graph for the normal hard way
kern_offset = kern_sym.shape[0] // groups kern_offset = kern_sym.shape[0] // groups
bottom_offset = bottom_sym.shape[1] // groups bottom_offset = bottom_sym.shape[1] // groups
split_conv_output = [self.conv2d()(bottom_sym[:, i * bottom_offset:(i + 1) * bottom_offset, :, :], split_conv_output = [self.conv()(bottom_sym[:, i * bottom_offset:(i + 1) * bottom_offset, :, :],
kern_sym[i * kern_offset:(i + 1) * kern_offset, :, :, :]) kern_sym[i * kern_offset:(i + 1) * kern_offset, :, :, :])
for i in range(groups)] for i in range(groups)]
concatenated_output = T.concatenate(split_conv_output, axis=1) concatenated_output = T.concatenate(split_conv_output, axis=1)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论