提交 7936ca43 authored 作者: Frederic Bastien's avatar Frederic Bastien

Remove useless comment

上级 8c607e8f
...@@ -1943,30 +1943,20 @@ def local_gpu_join(node): ...@@ -1943,30 +1943,20 @@ def local_gpu_join(node):
# optimizing this case: # optimizing this case:
# join(host_from_gpu) -> host_from_gpu(gpu_join) # join(host_from_gpu) -> host_from_gpu(gpu_join)
# print "OPT: we've got a Join instance"
axis_and_tensors = node.inputs axis_and_tensors = node.inputs
# print "OPT: axis_and_tensors=", axis_and_tensors
matches = [t.dtype == 'float32' and matches = [t.dtype == 'float32' and
((t.owner is not None and ((t.owner is not None and
isinstance(t.owner.op, HostFromGpu)) or isinstance(t.owner.op, HostFromGpu)) or
isinstance(t, gof.Constant)) for t in axis_and_tensors[1:]] isinstance(t, gof.Constant)) for t in axis_and_tensors[1:]]
# print "OPT: matches =", matches
# if all input tensors are host_from_gpu'ified
if all(matches): if all(matches):
# the extra gpu_from_host introduced here will
# be removed by further optimizations
new_tensors = [as_cuda_ndarray_variable(t) new_tensors = [as_cuda_ndarray_variable(t)
for t in axis_and_tensors[1:]] for t in axis_and_tensors[1:]]
new_a_and_t = [axis_and_tensors[0]] + new_tensors new_a_and_t = [axis_and_tensors[0]] + new_tensors
replacement_node = host_from_gpu(gpu_join(*new_a_and_t)) replacement_node = host_from_gpu(gpu_join(*new_a_and_t))
# print "OPT: replacement_node", replacement_node
return [replacement_node] return [replacement_node]
# This is a copy of the same opt in tensor to make the tests happy, # This is a copy of the same opt in tensor to make the tests happy,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论