提交 830e5c51 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron 提交者: Pascal Lamblin

Regroup optimizations and add an error for ConvOp.

上级 25bce5d7
......@@ -780,36 +780,37 @@ def local_assert(node, context_name):
*node.inputs[1:]))]
# These two deal with any abstract convs that have a transfer somewhere
@register_opt()
@op_lifter([AbstractConv2d])
def local_lift_abstractconv2d(node, context_name):
return [node.op(as_gpuarray_variable(node.inputs[0],
context_name=context_name),
as_gpuarray_variable(node.inputs[0],
context_name=context_name))]
@register_opt('fast_compile')
@op_lifter([ConvOp])
def local_error_convop(node, context_name):
assert False, """
ConvOp does not work with the gpuarray backend.
Use the new convolution interface to have GPU convolution working:
theano.tensor.nnet.abstract_conv2d.conv2d()
"""
@register_opt()
@op_lifter([AbstractConv2d_gradWeights,
# This deals with any abstract convs that have a transfer somewhere
@register_opt('fast_compile')
@op_lifter([AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs])
def local_lift_abstractconv2dgrad(node, context_name):
return [node.op(as_gpuarray_variable(node.inputs[0],
context_name=context_name),
as_gpuarray_variable(node.inputs[0],
context_name=context_name),
node.inputs[2])]
# Register this here so that it goes after the abstract lifting
register_opt()(conv_groupopt)
def local_lift_abstractconv2d(node, context_name):
inps = list(node.inputs)
inps[0] = as_gpuarray_variable(node.inputs[0],
context_name=context_name)
inps[1] = as_gpuarray_variable(node.inputs[1],
context_name=context_name)
return [node.op(*inps)]
# This will deal with ops that don't have an explicit transfer but
# have one of their inputs on the GPU already and the other not on the
# GPU (to avoid endlessly replacing things).
@register_opt()
@local_optimizer([AbstractConv2d])
@register_opt('fast_compile')
@local_optimizer([AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs])
def local_gpu_abstractconv2d(node):
if isinstance(node.op, BaseAbstractConv2d):
if ((isinstance(node.inputs[0].type, GpuArrayType) or
......@@ -822,6 +823,9 @@ def local_gpu_abstractconv2d(node):
inps[1] = as_gpuarray_variable(inps[1], context_name=ctx_name)
return as_tensor_variable(node.op(*inps))
# Register this here so that it goes after the abstract lifting
register_opt()(conv_groupopt)
@register_opt("low_memory")
@local_optimizer([GpuCAReduceCuda])
......
......@@ -31,7 +31,7 @@ def test_local_remove_all_assert():
a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())
# By default `unsafe` should not be there
f = theano.function([x], a, mode=mode_with_gpu)
f = theano.function([x], a, mode=mode_with_gpu.excluding('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论