提交 2aadc8ee authored 作者: sebastien-j's avatar sebastien-j

Fix import and gpuarray/dnn

上级 a6388954
...@@ -13,7 +13,7 @@ from theano.compile.ops import shape_i ...@@ -13,7 +13,7 @@ from theano.compile.ops import shape_i
from theano.configparser import AddConfigVar, EnumStr, StrParam from theano.configparser import AddConfigVar, EnumStr, StrParam
from theano.tensor.nnet import SoftmaxGrad from theano.tensor.nnet import SoftmaxGrad
from theano.tensor.signal.downsample import ( from theano.tensor.signal.downsample import (
DownsampleFactorMax, DownsampleFactorMaxGrad) DownsampleFactorMax, MaxPoolGrad, AveragePoolGrad)
from . import pygpu, init_dev from . import pygpu, init_dev
from .basic_ops import (as_gpuarray_variable, from .basic_ops import (as_gpuarray_variable,
...@@ -1654,7 +1654,7 @@ def local_pool_dnn_alternative(node): ...@@ -1654,7 +1654,7 @@ def local_pool_dnn_alternative(node):
@register_opt('cudnn') @register_opt('cudnn')
@op_lifter([DownsampleFactorMaxGrad]) @op_lifter([MaxPoolGrad])
def local_pool_dnn_grad_stride(node): def local_pool_dnn_grad_stride(node):
if not dnn_available(): if not dnn_available():
return return
...@@ -1672,6 +1672,24 @@ def local_pool_dnn_grad_stride(node): ...@@ -1672,6 +1672,24 @@ def local_pool_dnn_grad_stride(node):
gpu_contiguous(inp_grad), gpu_contiguous(inp_grad),
desc) desc)
@register_opt('cudnn')
@op_lifter([AveragePoolGrad])
def local_avg_pool_dnn_grad_stride(node):
if not dnn_available():
return
if not node.op.ignore_border:
return
inp, inp_grad = node.inputs
ds = node.op.ds
st = node.op.st
pad = node.op.padding
mode = node.op.mode
desc = GpuDnnPoolDesc(ws=ds, stride=st, mode=mode, pad=pad)()
return GpuDnnPoolGrad()(gpu_contiguous(inp),
gpu_contiguous(numpy.empty((1,1,1,1), dtype=numpy.float32)),
gpu_contiguous(inp_grad),
desc)
@register_opt('cudnn') @register_opt('cudnn')
@local_optimizer([GpuSoftmax]) @local_optimizer([GpuSoftmax])
......
...@@ -10,7 +10,7 @@ import theano.tensor as T ...@@ -10,7 +10,7 @@ import theano.tensor as T
import theano.tests.unittest_tools as utt import theano.tests.unittest_tools as utt
from theano.sandbox.neighbours import images2neibs from theano.sandbox.neighbours import images2neibs
from theano.tensor.signal.downsample import max_pool_2d from theano.tensor.signal.downsample import max_pool_2d
from theano.tensor.signal.downsample import DownsampleFactorMaxGrad from theano.tensor.signal.downsample import MaxPoolGrad, AveragePoolGrad
from .. import dnn from .. import dnn
from ..basic_ops import GpuAllocEmpty from ..basic_ops import GpuAllocEmpty
...@@ -264,8 +264,12 @@ def test_pooling(): ...@@ -264,8 +264,12 @@ def test_pooling():
ignore_border=True, mode=mode) ignore_border=True, mode=mode)
fc = theano.function([x], theano.grad(out.sum(), x), fc = theano.function([x], theano.grad(out.sum(), x),
mode=mode_without_gpu) mode=mode_without_gpu)
assert any([isinstance(node.op, DownsampleFactorMaxGrad) if mode == 'max':
for node in fc.maker.fgraph.toposort()]) assert any([isinstance(node.op, MaxPoolGrad)
for node in fc.maker.fgraph.toposort()])
else:
assert any([isinstance(node.op, AveragePoolGrad)
for node in fc.maker.fgraph.toposort()])
c_out = fc(data) c_out = fc(data)
assert numpy.allclose(c_out, g_out) assert numpy.allclose(c_out, g_out)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论