提交 8244819a authored 作者: Tegan Maharaj's avatar Tegan Maharaj

using tensor.nnet.conv2d, which accepts filter_flip instead of .conv.conv2d,…

using tensor.nnet.conv2d, which accepts filter_flip instead of .conv.conv2d, also tried removing unused inputs
上级 30c18aa4
...@@ -1624,7 +1624,6 @@ class TestConv2dGrads(unittest.TestCase): ...@@ -1624,7 +1624,6 @@ class TestConv2dGrads(unittest.TestCase):
fltr_shape = fltr_shape[::1] # conv2d doesn't seem to have filter_flip fltr_shape = fltr_shape[::1] # conv2d doesn't seem to have filter_flip
# get random values of the right shapes # get random values of the right shapes
input_val = self.random_stream.random_sample(in_shape).astype(theano.config.floatX)
filter_val = self.random_stream.random_sample(fltr_shape).astype(theano.config.floatX) filter_val = self.random_stream.random_sample(fltr_shape).astype(theano.config.floatX)
out_grad_shape = theano.tensor.nnet.abstract_conv.get_conv_output_shape(image_shape=in_shape, out_grad_shape = theano.tensor.nnet.abstract_conv.get_conv_output_shape(image_shape=in_shape,
kernel_shape=fltr_shape, kernel_shape=fltr_shape,
...@@ -1633,16 +1632,17 @@ class TestConv2dGrads(unittest.TestCase): ...@@ -1633,16 +1632,17 @@ class TestConv2dGrads(unittest.TestCase):
out_grad_val = self.random_stream.random_sample(out_grad_shape).astype(theano.config.floatX) out_grad_val = self.random_stream.random_sample(out_grad_shape).astype(theano.config.floatX)
# old conv # old conv
conv_out = theano.tensor.nnet.conv.conv2d(self.x, conv_out = theano.tensor.nnet.conv2d(self.x,
filters=self.w, filters=self.w,
border_mode=bm, border_mode=bm,
subsample=ss, subsample=ss,
image_shape=in_shape, input_shape=in_shape,
filter_shape=fltr_shape filter_shape=fltr_shape,
filter_flip=ff
) )
# grad of old conv # grad of old conv
conv_grad = theano.grad(conv_out.sum(), wrt=[self.x], known_grads={conv_out: self.output_grad}) conv_grad = theano.grad(conv_out.sum(), wrt=self.x, known_grads={conv_out: self.output_grad})
f_old = theano.function([self.x, self.w, self.output_grad], conv_grad) f_old = theano.function([self.w, self.output_grad], conv_grad)
# new conv + grad (wrt i) # new conv + grad (wrt i)
conv_wrt_i_out = theano.tensor.nnet.abstract_conv.conv2d_grad_wrt_inputs(output_grad=self.output_grad_wrt, conv_wrt_i_out = theano.tensor.nnet.abstract_conv.conv2d_grad_wrt_inputs(output_grad=self.output_grad_wrt,
...@@ -1656,7 +1656,7 @@ class TestConv2dGrads(unittest.TestCase): ...@@ -1656,7 +1656,7 @@ class TestConv2dGrads(unittest.TestCase):
f_new = theano.function([self.w, self.output_grad_wrt], conv_wrt_i_out) f_new = theano.function([self.w, self.output_grad_wrt], conv_wrt_i_out)
# check that they're equal # check that they're equal
utt.assert_allclose(f_new(filter_val, out_grad_val), f_old(input_val, filter_val, out_grad_val)) utt.assert_allclose(f_new(filter_val, out_grad_val), f_old(filter_val, out_grad_val))
def test_conv2d_grad_wrt_weights(self): def test_conv2d_grad_wrt_weights(self):
"""Compares calculated abstract grads wrt weights with the fwd grads """Compares calculated abstract grads wrt weights with the fwd grads
...@@ -1669,24 +1669,22 @@ class TestConv2dGrads(unittest.TestCase): ...@@ -1669,24 +1669,22 @@ class TestConv2dGrads(unittest.TestCase):
for bm in self.border_modes: for bm in self.border_modes:
for ss in self.subsamples: for ss in self.subsamples:
for ff in self.filter_flip: for ff in self.filter_flip:
if self.filter_flip:
fltr_shape = fltr_shape[::1] # conv2d doesn't seem to have filter_flip
input_val = self.random_stream.random_sample(in_shape).astype(theano.config.floatX) input_val = self.random_stream.random_sample(in_shape).astype(theano.config.floatX)
filter_val = self.random_stream.random_sample(fltr_shape).astype(theano.config.floatX)
out_grad_shape = theano.tensor.nnet.abstract_conv.get_conv_output_shape(image_shape=in_shape, out_grad_shape = theano.tensor.nnet.abstract_conv.get_conv_output_shape(image_shape=in_shape,
kernel_shape=fltr_shape, kernel_shape=fltr_shape,
border_mode=bm, border_mode=bm,
subsample=ss) subsample=ss)
out_grad_val = self.random_stream.random_sample(out_grad_shape).astype(theano.config.floatX) out_grad_val = self.random_stream.random_sample(out_grad_shape).astype(theano.config.floatX)
conv_out = theano.tensor.nnet.conv.conv2d(self.x, conv_out = theano.tensor.nnet.conv2d(self.x,
filters=self.w, filters=self.w,
border_mode=bm, border_mode=bm,
subsample=ss, subsample=ss,
image_shape=in_shape, input_shape=in_shape,
filter_shape=fltr_shape filter_shape=fltr_shape,
filter_flip=ff
) )
conv_grad = theano.grad(conv_out.sum(), wrt=[self.w], known_grads={conv_out: self.output_grad}) conv_grad = theano.grad(conv_out.sum(), wrt=self.w, known_grads={conv_out: self.output_grad})
f_old = theano.function([self.x, self.w, self.output_grad], conv_grad) f_old = theano.function([self.x, self.output_grad], conv_grad)
conv_wrt_w_out = theano.tensor.nnet.abstract_conv.conv2d_grad_wrt_weights(self.x, conv_wrt_w_out = theano.tensor.nnet.abstract_conv.conv2d_grad_wrt_weights(self.x,
output_grad=self.output_grad_wrt, output_grad=self.output_grad_wrt,
...@@ -1697,4 +1695,4 @@ class TestConv2dGrads(unittest.TestCase): ...@@ -1697,4 +1695,4 @@ class TestConv2dGrads(unittest.TestCase):
filter_flip=ff filter_flip=ff
) )
f_new = theano.function([self.x, self.output_grad_wrt], conv_wrt_w_out) f_new = theano.function([self.x, self.output_grad_wrt], conv_wrt_w_out)
utt.assert_allclose(f_new(input_val, out_grad_val), f_old(input_val, filter_val, out_grad_val)) utt.assert_allclose(f_new(input_val, out_grad_val), f_old(input_val, out_grad_val))
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论