提交 ea45d835 authored 作者: Alexander Matyasko's avatar Alexander Matyasko

Add gpu pooling local optimization

上级 c65fa17f
...@@ -48,7 +48,7 @@ from .blas import (gpu_dot22, GpuGemm, GpuGer, GpuGemmBatch, ...@@ -48,7 +48,7 @@ from .blas import (gpu_dot22, GpuGemm, GpuGer, GpuGemmBatch,
gpugemv_no_inplace, gpugemv_inplace, gpugemv_no_inplace, gpugemv_inplace,
GpuCorrMM, GpuCorrMM_gradInputs, GpuCorrMM_gradWeights, GpuCorrMM, GpuCorrMM_gradInputs, GpuCorrMM_gradWeights,
GpuCorr3dMM, GpuCorr3dMM_gradInputs, GpuCorr3dMM_gradWeights, GpuCorr3dMM, GpuCorr3dMM_gradInputs, GpuCorr3dMM_gradWeights,
GpuDownsampleFactorMaxGradGrad) GpuPool, GpuDownsampleFactorMaxGradGrad)
from .blocksparse import (GpuSparseBlockGemv, GpuSparseBlockOuter, from .blocksparse import (GpuSparseBlockGemv, GpuSparseBlockOuter,
gpu_sparse_block_outer, gpu_sparse_block_outer,
gpu_sparse_block_outer_inplace, gpu_sparse_block_outer_inplace,
...@@ -1591,6 +1591,27 @@ def local_gpua_lift_abstractconv_graph(op, context_name, inputs, outputs): ...@@ -1591,6 +1591,27 @@ def local_gpua_lift_abstractconv_graph(op, context_name, inputs, outputs):
return [op(*inps)] return [op(*inps)]
@register_opt()
@op_lifter([pool.Pool])
@register_opt2([pool.Pool])
def local_gpu_pool(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
op = GpuPool(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
ret_padded = op(inp_padded, ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
@register_opt() @register_opt()
@op_lifter([pool.DownsampleFactorMaxGradGrad]) @op_lifter([pool.DownsampleFactorMaxGradGrad])
@register_opt2([pool.DownsampleFactorMaxGradGrad]) @register_opt2([pool.DownsampleFactorMaxGradGrad])
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论