提交 461302bb authored 作者: Frederic's avatar Frederic

Do the same opt in gpuarray then old backend

上级 47fcb406
...@@ -24,15 +24,13 @@ from .fp16_help import write_w ...@@ -24,15 +24,13 @@ from .fp16_help import write_w
def as_gpuarray_variable(x): def as_gpuarray_variable(x):
# This is needed to lower the number of useless transfer if getattr(x, 'owner', None):
# introduced during optimization. This speed up optimization and if isinstance(x.owner.op, HostFromGpu):
# "canonicalize" the graph, so it make easier making some return x.owner.inputs[0]
# optimization. elif (isinstance(x.owner.op, GpuFromHost) and
if (hasattr(x, 'fgraph') and x.owner.inputs[0].owner and
len(x.clients) == 1 and isinstance(x.owner.inputs[0].owner.op, HostFromGpu)):
x.owner and return x.owner.inputs[0].owner.inputs[0]
isinstance(x.owner.op, HostFromGpu)):
return x.owner.inputs[0]
if hasattr(x, '_as_GpuArrayVariable'): if hasattr(x, '_as_GpuArrayVariable'):
return x._as_GpuArrayVariable() return x._as_GpuArrayVariable()
# TODO we need to have the cuda -> gpu path taken care of. # TODO we need to have the cuda -> gpu path taken care of.
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论