提交 abc6ffb9 authored 作者: goodfeli's avatar goodfeli

Merge pull request #88 from nouiz/fix

Fix
...@@ -413,8 +413,8 @@ class ProfileMode(Mode): ...@@ -413,8 +413,8 @@ class ProfileMode(Mode):
print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %5d %2d %2d %s' % (f, ftot, t, tot, t/nb_call, msg, nb_call, nb_op, nb_apply, a) print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %5d %2d %2d %s' % (f, ftot, t, tot, t/nb_call, msg, nb_call, nb_op, nb_apply, a)
print ' ... (remaining %i single Op account for %.2f%%(%.2fs) of the runtime)'\ print ' ... (remaining %i single Op account for %.2f%%(%.2fs) of the runtime)'\
%(max(0, len(sotimes)-n_ops_to_print), %(max(0, len(sotimes)-n_ops_to_print),
sum(f for f, t, a, ci, nb_call, nb_op in sotimes[n_ops_to_print:]), sum(soinfo[0] for soinfo in sotimes[n_ops_to_print:]),
sum(t for f, t, a, ci, nb_call, nb_op in sotimes[n_ops_to_print:])) sum(soinfo[1] for soinfo in sotimes[n_ops_to_print:]))
print '(*) Op is running a c implementation' print '(*) Op is running a c implementation'
......
...@@ -288,7 +288,7 @@ def handle_shared_float32(tf): ...@@ -288,7 +288,7 @@ def handle_shared_float32(tf):
raise NotImplementedError('removing our handler') raise NotImplementedError('removing our handler')
def reduce_tensor_variable(var): def reduce_tensor_variable(var):
if isinstance(var.owner.op, HostFromGpu) and len(var.owner.inputs) == 1 \ if var.owner and isinstance(var.owner.op, HostFromGpu) and len(var.owner.inputs) == 1 \
and isinstance(var.owner.inputs[0], CudaNdarraySharedVariable): and isinstance(var.owner.inputs[0], CudaNdarraySharedVariable):
return load_shared_variable, (var.owner.inputs[0].get_value(),) return load_shared_variable, (var.owner.inputs[0].get_value(),)
else: else:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论