提交 7885227c authored 作者: sentient07's avatar sentient07

Made the suggested changes

上级 0792f335
......@@ -121,7 +121,6 @@ class GpuFromHost(GpuOp):
check_input = False
__props__ = ()
def make_node(self, x):
if not isinstance(x.type, tensor.TensorType):
......@@ -178,8 +177,6 @@ class GpuElemwise(GpuOp):
"""
__props__ = ("scalar_op", "inplace_pattern", "sync", )
nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout)
......@@ -231,6 +228,12 @@ class GpuElemwise(GpuOp):
assert h == getattr(self, '_hashval', h)
self._hashval = h
def __eq__(self, other):
return (type(self) == type(other) and
self.scalar_op == other.scalar_op and
self.inplace_pattern == other.inplace_pattern and
self.sync == other.sync)
def __hash__(self):
return self._hashval
......@@ -309,9 +312,9 @@ class GpuDimShuffle(GpuOp):
check_broadcast = False
__props__ = ("input_broadcastable", "inplace", "new_order")
__props__ = ("input_broadcastable", "new_order")
def __init__(self, input_broadcastable, inplace, new_order):
def __init__(self, input_broadcastable, new_order):
input_broadcastable = tuple(input_broadcastable)
self.input_broadcastable = input_broadcastable
self.new_order = tuple(new_order)
......
......@@ -2207,6 +2207,9 @@ class GpuDownsampleFactorMax(GpuOp):
Implement downsample with max on the gpu.
"""
__props__ = ('ds', 'ignore_border')
def __init__(self, ds, ignore_border=False):
self.ds = tuple(ds)
self.ignore_border = ignore_border
......
......@@ -288,7 +288,7 @@ def local_gpu_elemwise_0(node):
new_op = GpuElemwise(erfcx_gpu)
else:
try:
new_op = GpuElemwise(**node.op._props_dict())
new_op = GpuElemwise(node.op.scalar_op)
except SupportCodeError:
# This happens when scalar_op requires support code
return False
......@@ -398,9 +398,6 @@ def local_gpu_dimshuffle_0(node):
input, = node.inputs
if input.owner and isinstance(input.owner.op, HostFromGpu):
# move the add to a GpuAdd
if 'inplace' in node.op._props_dict():
import pdb
pdb.set_trace()
new_op = GpuDimShuffle(**node.op._props_dict())
return [host_from_gpu(new_op(as_cuda_ndarray_variable(input)))]
if isinstance(node.op, GpuFromHost):
......@@ -1130,7 +1127,7 @@ def local_gpu_advanced_incsubtensor1(node):
compute_capability = device_properties(active_device_no)['major']
if (compute_capability < 2 or y.ndim != 2 or x.ndim != 2):
gpu_op = tensor.AdvancedIncSubtensor1(**node.op._props_dict())
gpu_op = GpuAdvancedIncSubtensor1(set_instead_of_inc=set_instead_of_inc)
else:
gpu_op = theano.sandbox.cuda.basic_ops.GPUAdvancedIncSubtensor1_dev20(**node.op._props_dict())
return [gpu_op(as_cuda_ndarray_variable(x),
......@@ -1915,7 +1912,7 @@ def local_gpu_downsample_factor_max(node):
if (pad) != (0, 0) or node.op.mode != 'max' or stride != ws:
return
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_ds = GpuDownsampleFactorMax(node.op.ds, node.op.ignore_border)
gpu_ds = GpuDownsampleFactorMax(**node.op._props_dict())
return [host_from_gpu(gpu_ds(x.owner.inputs[0]))]
......
......@@ -140,7 +140,11 @@ class DimShuffle(Op):
self.input_broadcastable = input_broadcastable
new_order = tuple(new_order)
self.new_order = new_order
self.inplace = inplace
if inplace is True:
self.inplace = inplace
self._props_dict().pop('inplace')
else:
raise ValueError("DimShuffle is inplace by default and hence the inplace for DimShuffle must be true")
for i, j in enumerate(new_order):
if j != 'x':
......@@ -503,8 +507,6 @@ second dimension
"""
__props__ = ("scalar_op", "inplace_pattern")
def __init__(self, scalar_op, inplace_pattern=None, name=None,
nfunc_spec=None, openmp=None):
if inplace_pattern is None:
......@@ -800,7 +802,7 @@ second dimension
# dimensions
res = theano.tensor.constant(numpy.asarray(r.data),
dtype=r.type.dtype)
return DimShuffle((), ['x'] * nd, inplace=False)(res)
return DimShuffle((), ['x'] * nd)(res)
new_r = Elemwise(node.op, {})(
*[transform(ipt) for ipt in node.inputs])
......
......@@ -561,8 +561,7 @@ def local_dimshuffle_lift(node):
new_inputs = []
for inp in inode.inputs:
new_inp = op.__class__(inp.type.broadcastable,
op.new_order,
op.inplace)(inp)
op.new_order)(inp)
new_inputs.append(apply_local_dimshuffle_lift(new_inp))
copy_stack_trace(node.outputs[0], new_inputs)
ret = inode.op(*new_inputs, **dict(return_list=True))
......@@ -570,14 +569,12 @@ def local_dimshuffle_lift(node):
if inode and isinstance(inode.op, DimShuffle):
new_order = [x == 'x' and 'x' or inode.op.new_order[x] for x in
new_order]
inplace = op.inplace and inode.op.inplace
input = inode.inputs[0]
if is_dimshuffle_useless(new_order, input):
return [input]
elif inode and isinstance(inode.op, DimShuffle):
ret = op.__class__(input.type.broadcastable, new_order,
inplace)(input)
ret = op.__class__(input.type.broadcastable, new_order)(input)
ret = apply_local_dimshuffle_lift(ret)
copy_stack_trace(node.outputs[0], ret)
return [ret]
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论