提交 f8ab3cf1 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Fix the tests for the gradient case.

上级 3bdd3ce0
......@@ -26,7 +26,7 @@ from theano.sandbox import cuda
if cuda.cuda_available == False:
raise SkipTest('Optional package cuda disabled')
from theano.sandbox.cuda.dnn import GpuDnnConv, GpuDnnConvBase
from theano.sandbox.cuda.dnn import GpuDnnConv, GpuDnnConvBase, dnn_conv
#needed as the gpu conv don't have a perform implementation.
if theano.config.mode == 'FAST_COMPILE':
......@@ -845,6 +845,13 @@ def gemm_op(mode, subsample):
return theano.sandbox.cuda.blas.GpuCorrMM('valid', subsample, pad)
def dnn_op(mode, subsample):
def f(img, kern):
return dnn_conv(img, kern, border_mode=mode, conv_mode='cross',
subsample=subsample)
return f
def conv_grad(mode, bs, ch, nf, rImg1, rImg2, rFlt1, rFlt2, subsx, subsy, op):
ishape = (bs, ch, rImg1, rImg2)
kshape = (nf, ch, rFlt1, rFlt2)
......@@ -879,14 +886,13 @@ def conv_grad(mode, bs, ch, nf, rImg1, rImg2, rFlt1, rFlt2, subsx, subsy, op):
try:
conv_op_dik = theano.grad(conv_op_di.sum(), k)
conv_op_dki = theano.grad(conv_op_dk.sum(), i)
except Exception:
# skip if the reference implementation can't do it
return
corr_op_dik = theano.grad(corr_op_di.sum(), k)
corr_op_dki = theano.grad(corr_op_dk.sum(), i)
outputs.extend([corr_op_dik, conv_op_dik,
corr_op_dki, conv_op_dki])
except Exception:
# skip if the reference implementation can't do it
pass
f = theano.function([i, k], outputs, mode=theano_mode)
......@@ -907,7 +913,7 @@ def test_conv_grads():
for rImg2 in [2, 8]:
for rFlt1 in [1, 2]:
for rFlt2 in [1, 2]:
for op in [gemm_op, GpuDnnConv]:
for op in [gemm_op, dnn_op]:
yield (conv_grad, mode, bs, ch, nf,
rImg1, rImg2, rFlt1, rFlt2,
1, 1, op)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论