提交 d0dfb0be authored 作者: sentient07's avatar sentient07

Cleaned up and fixed pep8

上级 c3e8f153
...@@ -25,7 +25,7 @@ from theano.tensor.signal.pool import ( ...@@ -25,7 +25,7 @@ from theano.tensor.signal.pool import (
from . import pygpu from . import pygpu
from .type import get_context, gpu_context_type, list_contexts, GpuArrayType from .type import get_context, gpu_context_type, list_contexts, GpuArrayType
from .basic_ops import (as_gpuarray_variable, infer_context_name, from .basic_ops import (as_gpuarray_variable, infer_context_name,
gpu_contiguous, GpuAllocEmpty, gpu_alloc_empty, gpu_contiguous, gpu_alloc_empty,
empty_like) empty_like)
from .elemwise import GpuElemwise from .elemwise import GpuElemwise
...@@ -1462,8 +1462,8 @@ def local_abstractconv_cudnn_graph(op, context_name, inputs): ...@@ -1462,8 +1462,8 @@ def local_abstractconv_cudnn_graph(op, context_name, inputs):
@local_optimizer([AbstractConv2d, AbstractConv2d_gradWeights, @local_optimizer([AbstractConv2d, AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs]) AbstractConv2d_gradInputs])
def local_abstractconv_cudnn(node): def local_abstractconv_cudnn(node):
ctx = infer_context(*node.inputs) ctx = infer_context_name(*node.inputs)
return local_abstractconv_dnn_graph(node.op, ctx, node.inputs) return local_abstractconv_cudnn_graph(node.op, ctx, node.inputs)
conv_groupopt.register('local_abstractconv_cudnn_graph', conv_groupopt.register('local_abstractconv_cudnn_graph',
local_abstractconv_cudnn_graph, 20, local_abstractconv_cudnn_graph, 20,
......
...@@ -9,7 +9,7 @@ except ImportError: ...@@ -9,7 +9,7 @@ except ImportError:
pass pass
from .basic_ops import (as_gpuarray_variable, GpuKernelBase, Kernel, from .basic_ops import (as_gpuarray_variable, GpuKernelBase, Kernel,
infer_context_name, GpuFromHost) infer_context_name)
from .opt import register_opt, op_lifter, register_opt2 from .opt import register_opt, op_lifter, register_opt2
...@@ -450,10 +450,11 @@ class GpuCumsum(GpuKernelBase, Op): ...@@ -450,10 +450,11 @@ class GpuCumsum(GpuKernelBase, Op):
""" % locals() """ % locals()
return super(GpuCumsum, self).c_support_code_struct(node, nodename) + code return super(GpuCumsum, self).c_support_code_struct(node, nodename) + code
@register_opt('fast_compile') @register_opt('fast_compile')
@op_lifter([CumsumOp]) @op_lifter([CumsumOp])
@register_opt2([CumsumOp], 'fast_compile') @register_opt2([CumsumOp], 'fast_compile')
def use_gpu_cumsumop(op, ctx_name, inputs, ): def use_gpu_cumsumop(op, ctx_name, inputs):
if inputs[0].dtype == 'float32': if inputs[0].dtype == 'float32':
axis = op.axis axis = op.axis
x = inputs[0] x = inputs[0]
......
...@@ -10,7 +10,7 @@ from theano.scalar import as_scalar, constant ...@@ -10,7 +10,7 @@ from theano.scalar import as_scalar, constant
from . import opt from . import opt
from .basic_ops import (as_gpuarray_variable, GpuAllocEmpty, from .basic_ops import (as_gpuarray_variable, GpuAllocEmpty,
infer_context_name) infer_context_name, gpu_alloc_empty)
from .type import gpu_context_type from .type import gpu_context_type
from .opt_util import alpha_merge, output_merge from .opt_util import alpha_merge, output_merge
......
差异被折叠。
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论