提交 9499b886 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #1865 from caglar/fix_typos

fixed a few typos and removed unnecessary code.
...@@ -88,7 +88,8 @@ register_opt(name='gpu_constant_folding')( ...@@ -88,7 +88,8 @@ register_opt(name='gpu_constant_folding')(
class InputToGpuOptimizer(Optimizer): class InputToGpuOptimizer(Optimizer):
"""Transfert the input of a graph to the gpu if needed """
Transfer the input of a graph to the gpu if it is necessary.
It should make this part of the optimizer faster we will will need only 1 It should make this part of the optimizer faster we will will need only 1
pass on the fgraph. pass on the fgraph.
""" """
...@@ -505,7 +506,6 @@ def local_gpu_gemv(node): ...@@ -505,7 +506,6 @@ def local_gpu_gemv(node):
if isinstance(node.op, GpuFromHost): if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0] host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op, gemvs): if host_input.owner and isinstance(host_input.owner.op, gemvs):
op = host_input.owner.op
z, a, x, y, b = host_input.owner.inputs z, a, x, y, b = host_input.owner.inputs
return [gpu_gemv_no_inplace( return [gpu_gemv_no_inplace(
gpu_from_host(z), gpu_from_host(z),
...@@ -546,7 +546,6 @@ def local_gpu_ger(node): ...@@ -546,7 +546,6 @@ def local_gpu_ger(node):
if isinstance(node.op, GpuFromHost): if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0] host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op, gers): if host_input.owner and isinstance(host_input.owner.op, gers):
op = host_input.owner.op
z, a, x, y = host_input.owner.inputs z, a, x, y = host_input.owner.inputs
return [gpu_ger_no_inplace( return [gpu_ger_no_inplace(
gpu_from_host(z), gpu_from_host(z),
...@@ -582,7 +581,6 @@ def local_gpu_gemm(node): ...@@ -582,7 +581,6 @@ def local_gpu_gemm(node):
host_input = node.inputs[0] host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op, if host_input.owner and isinstance(host_input.owner.op,
tensor.blas.Gemm): tensor.blas.Gemm):
op = host_input.owner.op
z, a, x, y, b = host_input.owner.inputs z, a, x, y, b = host_input.owner.inputs
return [gpu_gemm_no_inplace(gpu_from_host(z), return [gpu_gemm_no_inplace(gpu_from_host(z),
a, a,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论