提交 047c8913 authored 作者: Frederic's avatar Frederic

remove GpuShape and associated opt as now we reuse the Shape Op with specific c code.

上级 b68d4a87
...@@ -654,15 +654,6 @@ class GpuAlloc(HideC, Alloc): ...@@ -654,15 +654,6 @@ class GpuAlloc(HideC, Alloc):
gpu_alloc = GpuAlloc() gpu_alloc = GpuAlloc()
class GpuShape(HideC, tensor.Shape):
"""
Implement Shape on the gpu.
"""
def make_node(self, x):
return Apply(self, [x], [tensor.lvector()])
gpu_shape = GpuShape()
class GpuReshape(HideC, tensor.Reshape): class GpuReshape(HideC, tensor.Reshape):
""" """
Implement Reshape on the gpu. Implement Reshape on the gpu.
......
...@@ -14,9 +14,7 @@ from theano.sandbox.gpuarray.type import GpuArrayType ...@@ -14,9 +14,7 @@ from theano.sandbox.gpuarray.type import GpuArrayType
from theano.sandbox.gpuarray.basic_ops import (host_from_gpu, from theano.sandbox.gpuarray.basic_ops import (host_from_gpu,
gpu_from_host, gpu_from_host,
gpu_alloc, gpu_alloc,
gpu_shape,
GpuAlloc, GpuAlloc,
GpuShape,
GpuReshape, GpuReshape,
GpuEye) GpuEye)
from theano.sandbox.gpuarray.blas import gpu_dot22, GpuGemv, GpuGemm from theano.sandbox.gpuarray.blas import gpu_dot22, GpuGemv, GpuGemm
...@@ -339,20 +337,6 @@ def local_gpua_crossentropysoftmax1hotwithbiasdx(node): ...@@ -339,20 +337,6 @@ def local_gpua_crossentropysoftmax1hotwithbiasdx(node):
return GpuCrossentropySoftmax1HotWithBiasDx() return GpuCrossentropySoftmax1HotWithBiasDx()
@register_opt()
@local_optimizer([tensor.Shape])
def local_gpua_shape(node):
"""
Can't use op_lifter as the output is on the GPU.
"""
if isinstance(node.op, tensor.Shape):
x, = node.inputs
if x.owner and x.owner.op == host_from_gpu:
gpu_x, = x.owner.inputs
return [gpu_shape(gpu_x)]
return False
@register_opt() @register_opt()
@op_lifter([gpu_from_host, ConvOp]) @op_lifter([gpu_from_host, ConvOp])
def local_gpu_conv(node): def local_gpu_conv(node):
......
...@@ -36,7 +36,7 @@ from theano.sandbox.gpuarray.basic_ops import (host_from_gpu, gpu_from_host, ...@@ -36,7 +36,7 @@ from theano.sandbox.gpuarray.basic_ops import (host_from_gpu, gpu_from_host,
gpu_alloc, gpu_from_cuda, gpu_alloc, gpu_from_cuda,
cuda_from_gpu, HostFromGpu, cuda_from_gpu, HostFromGpu,
GpuFromHost, GpuReshape, GpuFromHost, GpuReshape,
GpuEye, GpuShape) GpuEye)
from theano.tests import unittest_tools as utt from theano.tests import unittest_tools as utt
utt.seed_rng() utt.seed_rng()
...@@ -307,7 +307,7 @@ def test_shape(): ...@@ -307,7 +307,7 @@ def test_shape():
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert numpy.all(f(v) == (3, 4, 5)) assert numpy.all(f(v) == (3, 4, 5))
assert len(topo) == 1 assert len(topo) == 1
assert isinstance(topo[0].op, GpuShape) assert isinstance(topo[0].op, T.Shape)
class G_reshape(T_reshape): class G_reshape(T_reshape):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论