提交 567b2db8 authored 作者: Frederic's avatar Frederic

lower the number of GPU op compiled

上级 22471e39
......@@ -298,6 +298,8 @@ class GpuDimShuffle(GpuOp):
"""
Implement DimShuffle on the gpu.
"""
check_broadcast = False
def __init__(self, input_broadcastable, new_order):
input_broadcastable = tuple(input_broadcastable)
self.input_broadcastable = input_broadcastable
......@@ -2355,6 +2357,8 @@ class GpuSubtensor(GpuOp, tensor.Subtensor):
"""
Implement subtensor on the gpu.
"""
check_broadcast = False
# __hash__, __eq__, __str__ come from tensor.Subtensor
def make_node(self, x, *inputs):
assert isinstance(x.type, CudaNdarrayType)
......
......@@ -513,8 +513,9 @@ class BaseGpuCorrMM(GpuOp):
integers
:param subsample: perform subsampling of the output (default: (1, 1))
:param pad: *deprecated*, now you should always use border_mode
"""
check_broadcast = False
def __init__(self, border_mode="valid", subsample=(1, 1), pad=(0, 0)):
if pad != (0, 0):
......@@ -1498,6 +1499,8 @@ class GpuConv(GpuOp):
"""
Implement the batched and stacked 2d convolution on the gpu.
"""
check_broadcast = False
@staticmethod
def logical_output_shape_2d(imshp, kshp, mode):
if mode == 'valid':
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论