提交 c260ecf4 authored 作者: sentient07's avatar sentient07

Cleanup #2

上级 486faa29
......@@ -121,14 +121,7 @@ class GpuFromHost(GpuOp):
check_input = False
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def __str__(self):
return 'GpuFromHost'
__props__ = ()
def make_node(self, x):
if not isinstance(x.type, tensor.TensorType):
......@@ -320,18 +313,13 @@ class GpuDimShuffle(GpuOp):
check_broadcast = False
__props__ = ("input_broadcastable", "new_order", "inplace")
__props__ = ("input_broadcastable", "new_order")
def __init__(self, input_broadcastable, new_order, inplace=True):
def __init__(self, input_broadcastable, new_order):
input_broadcastable = tuple(input_broadcastable)
self.input_broadcastable = input_broadcastable
self.new_order = tuple(new_order)
self.inplace = int(inplace)
if inplace is True:
self.inplace = inplace
self._props_dict().pop('inplace')
else:
raise ValueError("DimShuffle is inplace by default and hence the inplace for DimShuffle must be true")
self.inplace = True
for i, b in enumerate(input_broadcastable):
if i not in new_order:
......@@ -393,6 +381,9 @@ class GpuDimShuffle(GpuOp):
hash(self.new_order) ^
hash(self.input_broadcastable))
def __str__(self):
return "GpuDimShuffle{%s}" % ",".join(str(x) for x in self.new_order)
def c_code(self, node, name, inp, out, sub):
input, = inp
res, = out
......
......@@ -398,14 +398,24 @@ def local_gpu_dimshuffle_0(node):
input, = node.inputs
if input.owner and isinstance(input.owner.op, HostFromGpu):
# move the add to a GpuAdd
new_op = GpuDimShuffle(**node.op._props_dict())
p_dict = node.op._props_dict()
try:
p_dict.pop('inplace')
except KeyError:
pass
new_op = GpuDimShuffle(**p_dict)
return [host_from_gpu(new_op(as_cuda_ndarray_variable(input)))]
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
tensor.DimShuffle):
dimshuffle_node = host_input.owner
new_op = GpuDimShuffle(**dimshuffle_node.op._props_dict())
p_dict = dimshuffle_node.op._props_dict()
try:
p_dict.pop('inplace')
except KeyError:
pass
new_op = GpuDimShuffle(**p_dict)
return [new_op(
as_cuda_ndarray_variable(dimshuffle_node.inputs[0]))]
return False
......@@ -1929,7 +1939,7 @@ def local_gpu_downsample_factor_max_grad(node):
if pad != (0, 0) or node.op.mode != 'max' or stride != ws:
return
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_ds_grad = GpuDownsampleFactorMaxGrad(**node.op._props_dict())
gpu_ds_grad = GpuDownsampleFactorMaxGrad(node.op.ds, node.op.ignore_border)
return [host_from_gpu(gpu_ds_grad(x.owner.inputs[0],
as_cuda_ndarray_variable(z),
as_cuda_ndarray_variable(gz)))]
......
......@@ -740,9 +740,6 @@ def test_scan_debugprint5():
>Elemwise{mul,no_inplace} [id CS] ''"""
for truth, out in zip(expected_output.split("\n"), lines):
if truth.strip() != out.strip():
import pdb
pdb.set_trace()
assert truth.strip() == out.strip()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论