提交 0792f335 authored 作者: sentient07's avatar sentient07

Changed few more ground truths to inplace

上级 c1fdb3bb
......@@ -309,10 +309,13 @@ class GpuDimShuffle(GpuOp):
check_broadcast = False
def __init__(self, input_broadcastable, new_order):
__props__ = ("input_broadcastable", "inplace", "new_order")
def __init__(self, input_broadcastable, inplace, new_order):
input_broadcastable = tuple(input_broadcastable)
self.input_broadcastable = input_broadcastable
self.new_order = tuple(new_order)
self.inplace = int(inplace)
for i, b in enumerate(input_broadcastable):
if i not in new_order:
......
......@@ -398,6 +398,9 @@ def local_gpu_dimshuffle_0(node):
input, = node.inputs
if input.owner and isinstance(input.owner.op, HostFromGpu):
# move the add to a GpuAdd
if 'inplace' in node.op._props_dict():
import pdb
pdb.set_trace()
new_op = GpuDimShuffle(**node.op._props_dict())
return [host_from_gpu(new_op(as_cuda_ndarray_variable(input)))]
if isinstance(node.op, GpuFromHost):
......@@ -994,8 +997,7 @@ def local_gpu_reshape(node):
if host_input.owner and \
isinstance(host_input.owner.op, tensor.Reshape):
x, shp = host_input.owner.inputs
gpu_reshape = GpuReshape(**host_input.owner.op._props_dict())(as_cuda_ndarray_variable(x),
shp)
gpu_reshape = GpuReshape(**host_input.owner.op._props_dict())(as_cuda_ndarray_variable(x), shp)
if gpu_reshape.broadcastable != node.outputs[0].broadcastable:
# this can happen as we always return False for all broadcast
# dim in GpuReshape but not for Reshape
......@@ -1130,7 +1132,7 @@ def local_gpu_advanced_incsubtensor1(node):
gpu_op = tensor.AdvancedIncSubtensor1(**node.op._props_dict())
else:
gpu_op = GPUAdvancedIncSubtensor1_dev20(**node.op._props_dict())
gpu_op = theano.sandbox.cuda.basic_ops.GPUAdvancedIncSubtensor1_dev20(**node.op._props_dict())
return [gpu_op(as_cuda_ndarray_variable(x),
as_cuda_ndarray_variable(y), *coords)]
......@@ -1180,7 +1182,6 @@ def local_gpu_incsubtensor(node):
host_output = node.inputs[0]
if host_output.owner and \
type(host_output.owner.op) == tensor.IncSubtensor:
incsubt = host_output.owner.op
x, y = host_output.owner.inputs[0:2]
coords = host_output.owner.inputs[2:]
if x.dtype != "float32":
......@@ -1189,10 +1190,9 @@ def local_gpu_incsubtensor(node):
# The IncSubtensor upcast to float32 y, so we do it
# explicitly to move it to the GPU.
y = y.astype('float32')
ret = GpuIncSubtensor(**node.op._props_dict())(
as_cuda_ndarray_variable(x),
as_cuda_ndarray_variable(y),
*coords)
ret = GpuIncSubtensor(**node.op._props_dict())(as_cuda_ndarray_variable(x),
as_cuda_ndarray_variable(y),
*coords)
ret.tag.nan_guard_mode_check = getattr(
host_output.tag, 'nan_guard_mode_check', True)
return [ret]
......@@ -1219,8 +1219,7 @@ def local_gpu_incsubtensor(node):
y = tensor.cast(y, 'float32')
gpu_y = as_cuda_ndarray_variable(y)
if go_gpu:
ret = GpuIncSubtensor(**node.op._props_dict())(
gpu_x, gpu_y, *coords)
ret = GpuIncSubtensor(**node.op._props_dict())(gpu_x, gpu_y, *coords)
val = getattr(node.outputs[0].tag, 'nan_guard_mode_check', True)
ret.tag.nan_guard_mode_check = val
......
......@@ -201,7 +201,7 @@ class test_dimshuffle_lift(unittest.TestCase):
x, _, _ = inputs()
e = ds(x, (0, 1))
g = FunctionGraph([x], [e])
self.assertTrue(str(g) == "[DimShuffle{0,1}(x)]")
self.assertTrue(str(g) == "[InplaceDimShuffle{0,1}(x)]")
dimshuffle_lift.optimize(g)
self.assertTrue(str(g) == "[x]")
# Check stacktrace was copied over correctly after opt was applied
......@@ -215,9 +215,9 @@ class test_dimshuffle_lift(unittest.TestCase):
ds_z = ds(z, (2, 1, 0)) # usefull
ds_u = ds(u, ('x')) # usefull
g = FunctionGraph([x, y, z, u], [ds_x, ds_y, ds_z, ds_u])
self.assertTrue(str(g) == "[DimShuffle{0,x}(x), DimShuffle{2,1,0}(y), DimShuffle{2,1,0}(z), DimShuffle{x}(TensorConstant{1})]")
self.assertTrue(str(g) == "[InplaceDimShuffle{0,x}(x), InplaceDimShuffle{2,1,0}(y), InplaceDimShuffle{2,1,0}(z), InplaceDimShuffle{x}(TensorConstant{1})]")
dimshuffle_lift.optimize(g)
self.assertTrue(str(g) == "[x, y, DimShuffle{2,1,0}(z), DimShuffle{x}(TensorConstant{1})]")
self.assertTrue(str(g) == "[x, y, InplaceDimShuffle{2,1,0}(z), InplaceDimShuffle{x}(TensorConstant{1})]")
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))
......@@ -237,10 +237,11 @@ def test_local_useless_dimshuffle_in_reshape():
[reshape_dimshuffle_vector, reshape_dimshuffle_mat,
reshape_dimshuffle_row, reshape_dimshuffle_col])
assert_true(str(g) == "[Reshape{1}(DimShuffle{x,0}(vector), Shape(vector)), "
"Reshape{2}(DimShuffle{x,0,x,1}(mat), Shape(mat)), "
"Reshape{2}(DimShuffle{1,x}(row), Shape(row)), "
"Reshape{2}(DimShuffle{0}(col), Shape(col))]")
print(str(g))
assert_true(str(g) == "[Reshape{1}(InplaceDimShuffle{x,0}(vector), Shape(vector)), "
"Reshape{2}(InplaceDimShuffle{x,0,x,1}(mat), Shape(mat)), "
"Reshape{2}(InplaceDimShuffle{1,x}(row), Shape(row)), "
"Reshape{2}(InplaceDimShuffle{0}(col), Shape(col))]")
useless_dimshuffle_in_reshape = out2in(local_useless_dimshuffle_in_reshape)
useless_dimshuffle_in_reshape.optimize(g)
assert_true(str(g) == "[Reshape{1}(vector, Shape(vector)), "
......@@ -3762,15 +3763,15 @@ class Test_local_canonicalize_alloc(unittest.TestCase):
"TensorConstant{2})]"))
alloc_lift.optimize(g)
self.assertTrue(str(g) == "[DimShuffle{x,0,1}"
self.assertTrue(str(g) == "[InplaceDimShuffle{x,0,1}"
"(Alloc(<TensorType(float64, vector)>, "
"TensorConstant{3}, "
"TensorConstant{2})), "
"DimShuffle{x,x}"
"InplaceDimShuffle{x,x}"
"(<TensorType(float64, scalar)>), "
"DimShuffle{x,0,1}"
"InplaceDimShuffle{x,0,1}"
"(Alloc(<TensorType(float64, matrix)>, "
"TensorConstant{1}, "
"TensorConstant{2})), "
......@@ -6264,9 +6265,9 @@ class Test_local_reshape_to_dimshuffle(unittest.TestCase):
reshape_lift.optimize(g)
useless_reshape.optimize(g)
self.assertTrue(str(g) == "[DimShuffle{x,0}"
self.assertTrue(str(g) == "[InplaceDimShuffle{x,0}"
"(<TensorType(float64, vector)>), "
"DimShuffle{x,0,x,1,x,x}"
"InplaceDimShuffle{x,0,x,1,x,x}"
"(Reshape{2}(<TensorType(float64, matrix)>, "
"TensorConstant{[5 6]}))]")
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论