提交 0039353e authored 作者: James Bergstra's avatar James Bergstra

test_elemwise4 just passed

上级 ff24c985
...@@ -18,17 +18,33 @@ tensor.opt.register_specialize(local_host_gpu_host, 'gpu') ...@@ -18,17 +18,33 @@ tensor.opt.register_specialize(local_host_gpu_host, 'gpu')
@gof.local_optimizer([]) @gof.local_optimizer([])
def local_gpu_elemwise(node): def local_gpu_elemwise_0(node):
if isinstance(node.op, tensor.Elemwise): if isinstance(node.op, tensor.Elemwise):
if any(hasattr(i.owner, 'op') and isinstance(i.owner.op, HostFromGpu) for i in node.inputs): if any(hasattr(i.owner, 'op') and isinstance(i.owner.op, HostFromGpu) for i in node.inputs):
# move the add to a GpuAdd # move the add to a GpuAdd
new_op = GpuElemwise(node.op.scalar_op, node.op.inplace_pattern) new_op = GpuElemwise(node.op.scalar_op, node.op.inplace_pattern)
return [host_from_gpu(new_op(*(gpu_from_host(i) for i in node.inputs)))] return [host_from_gpu(new_op(*(gpu_from_host(i) for i in node.inputs)))]
return False return False
tensor.opt.register_specialize(local_gpu_elemwise, 'gpu') tensor.opt.register_specialize(local_gpu_elemwise_0, 'gpu')
@gof.local_optimizer([])
def local_gpu_elemwise_1(node):
"""
gpu_from_host(Elemwise)) -> GpuElemwise(gpu_from_host(...))
"""
if node.op == gpu_from_host:
host_i, = node.inputs
if host_i.owner and isinstance(host_i.owner.op, tensor.Elemwise) and len(host_i.clients)==1:
elemwise_node = host_i.owner
new_op = GpuElemwise(elemwise_node.op.scalar_op, elemwise_node.op.inplace_pattern)
return [new_op(*(gpu_from_host(i) for i in elemwise_node.inputs))]
return False
tensor.opt.register_specialize(local_gpu_elemwise_1, 'gpu')
@gof.local_optimizer([]) @gof.local_optimizer([])
def local_gpu_dimshuffle(node): def local_gpu_dimshuffle_0(node):
"""
dimshuffle(host_from_gpu()) -> host_from_gpu(gpu_dimshuffle)
"""
if isinstance(node.op, tensor.DimShuffle): if isinstance(node.op, tensor.DimShuffle):
input, = node.inputs input, = node.inputs
if input.owner and isinstance(input.owner.op, HostFromGpu): if input.owner and isinstance(input.owner.op, HostFromGpu):
...@@ -40,4 +56,20 @@ def local_gpu_dimshuffle(node): ...@@ -40,4 +56,20 @@ def local_gpu_dimshuffle(node):
else: else:
return [host_from_gpu(new_op(gpu_from_host(tensor.tensor_copy(input))))] return [host_from_gpu(new_op(gpu_from_host(tensor.tensor_copy(input))))]
return False return False
tensor.opt.register_specialize(local_gpu_dimshuffle, 'gpu') tensor.opt.register_specialize(local_gpu_dimshuffle_0, 'gpu')
@gof.local_optimizer([])
def local_gpu_dimshuffle_1(node):
"""
gpu_from_host(dimshuffle) -> gpu_dimshuffle(gpu_from_host)
"""
if node.op == gpu_from_host:
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op, tensor.DimShuffle):
dimshuffle_node = host_input.owner
new_op = GpuDimShuffle(dimshuffle_node.op.input_broadcastable,
dimshuffle_node.op.new_order)
return [new_op(gpu_from_host(dimshuffle_node.inputs[0]))]
return False
tensor.opt.register_specialize(local_gpu_dimshuffle_1, 'gpu')
...@@ -110,7 +110,7 @@ def test_elemwise4(): ...@@ -110,7 +110,7 @@ def test_elemwise4():
a = tcn.shared_constructor(numpy.random.rand(*shape), 'a') a = tcn.shared_constructor(numpy.random.rand(*shape), 'a')
b = tensor.fvector() b = tensor.fvector()
c = tensor.fvector() c = tensor.fvector()
f = pfunc([b,c], [], updates=[(a, (a+b.dimshuffle('x', 0)*x.dimshuffle(0, 'x')))]) f = pfunc([b,c], [], updates=[(a, (a+b.dimshuffle('x', 0)*c.dimshuffle(0, 'x')))])
has_elemwise = False has_elemwise = False
for i, node in enumerate(f.maker.env.toposort()): for i, node in enumerate(f.maker.env.toposort()):
print >> sys.stderr, i, node print >> sys.stderr, i, node
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论