提交 d15656b7 authored 作者: Nicolas Ballas's avatar Nicolas Ballas 提交者: Pascal Lamblin

update

上级 77ecf5c6
...@@ -275,11 +275,11 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d): ...@@ -275,11 +275,11 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
self.border_mode, self.border_mode,
self.subsample, self.subsample,
self.filter_flip)(bottom, weights) self.filter_flip)(bottom, weights)
d_height_width = (theano.gradient.DisconnectedType()(),) * 2 d_height_width = (theano.gradient.DisconnectedType()(),)
return (d_bottom, d_top) + d_height_width return (d_bottom, d_top) + d_height_width
def connection_pattern(self, node): def connection_pattern(self, node):
return [[1], [1], [0], [0]] # no connection to height, width return [[1], [1], [0]] # no connection to height, width
class AbstractConv2d_gradInputs(BaseAbstractConv2d): class AbstractConv2d_gradInputs(BaseAbstractConv2d):
...@@ -331,13 +331,13 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d): ...@@ -331,13 +331,13 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d):
self.bsize, self.bsize,
self.border_mode, self.border_mode,
self.subsample)(bottom, top, weights.shape[-2:]) self.subsample)(bottom, top, weights.shape[-2:])
d_top = AbstractConv2d(self.imshp, self.filter_shape, self.bsize, d_top = AbstractConv2d(self.imshp, self.kshp, self.bsize,
self.border_mode, self.subsample)(bottom, weights) self.border_mode, self.subsample)(bottom, weights)
d_height_width = (theano.gradient.DisconnectedType()(),) * 2 d_height_width = (theano.gradient.DisconnectedType()(),)
return (d_weights, d_top) + d_height_width return (d_weights, d_top) + d_height_width
def connection_pattern(self, node): def connection_pattern(self, node):
return [[1], [1], [0], [0]] # no connection to height, width return [[1], [1], [0]] # no connection to height, width
### Optimizations should be move in their appropriate files ### Optimizations should be move in their appropriate files
......
...@@ -114,10 +114,14 @@ class TestConv2d(unittest.TestCase): ...@@ -114,10 +114,14 @@ class TestConv2d(unittest.TestCase):
print res_ref.shape, res.shape print res_ref.shape, res.shape
utt.assert_allclose(res_ref, res) utt.assert_allclose(res_ref, res)
def abstract_conv2d_gradweight(inputs_val, output_val):
conv_op = conv.AbstractConv2d_gradInputs(border_mode=border_mode,
subsample=subsample)
return conv_op(inputs_val, output_val, filters_shape[-2:])
if verify_grad: if verify_grad:
utt.verify_grad(conv.AbstractConv2d(border_mode="valid", utt.verify_grad(abstract_conv2d_gradweight,
subsample=subsample), [inputs_val, output_val])
[inputs_val, filters_val])
def run_gradinput(self, def run_gradinput(self,
...@@ -173,18 +177,18 @@ class TestConv2d(unittest.TestCase): ...@@ -173,18 +177,18 @@ class TestConv2d(unittest.TestCase):
#def test_corrmm(self): def test_corrmm(self):
# mode = mode_with_gpu mode = mode_with_gpu
# mode = mode.excluding('cudnn') mode = mode.excluding('cudnn')
# self.run_fwd(inputs_shape=(16, 1, 2, 2), self.run_fwd(inputs_shape=(16, 1, 2, 2),
# filters_shape=(10, 1, 2, 2), filters_shape=(10, 1, 2, 2),
# verify_grad=False, mode=mode) verify_grad=False, mode=mode)
# self.run_gradweight(inputs_shape=(16, 1, 2, 2), self.run_gradweight(inputs_shape=(16, 1, 2, 2),
# filters_shape=(10, 1, 2, 2), filters_shape=(10, 1, 2, 2),
# verify_grad=False, mode=mode) verify_grad=False, mode=mode)
# self.run_gradinput(inputs_shape=(1, 1, 2, 2), self.run_gradinput(inputs_shape=(1, 1, 2, 2),
# filters_shape=(10, 1, 2, 2), filters_shape=(10, 1, 2, 2),
# verify_grad=False, mode=mode) verify_grad=False, mode=mode)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论