提交 b44b0d7a authored 作者: Frederic's avatar Frederic

Don't precompute stuff if it isn't used.

上级 df819993
...@@ -1109,18 +1109,21 @@ def _get_preallocated_maps(node, thunk, prealloc_modes, def_val, ...@@ -1109,18 +1109,21 @@ def _get_preallocated_maps(node, thunk, prealloc_modes, def_val,
# is less relevant. # is less relevant.
# Dimensions should be align by the innermost index, so we iterate # Dimensions should be align by the innermost index, so we iterate
# from the end of shapes. # from the end of shapes.
max_ndim = 0 if ('strided' in prealloc_modes or
rev_out_broadcastable = [] 'wrong_size' in prealloc_modes or
for r in considered_outputs: 'ALL' in prealloc_modes):
if isinstance(r.type, (TensorType, CudaNdarrayType)): max_ndim = 0
if max_ndim < r.ndim: rev_out_broadcastable = []
rev_out_broadcastable += [True] * (r.ndim - max_ndim) for r in considered_outputs:
max_ndim = r.ndim if isinstance(r.type, (TensorType, CudaNdarrayType)):
assert len(rev_out_broadcastable) == max_ndim if max_ndim < r.ndim:
rev_out_broadcastable += [True] * (r.ndim - max_ndim)
for i, b in enumerate(r.broadcastable[::-1]): max_ndim = r.ndim
rev_out_broadcastable[i] = rev_out_broadcastable[i] and b assert len(rev_out_broadcastable) == max_ndim
out_broadcastable = rev_out_broadcastable[::-1]
for i, b in enumerate(r.broadcastable[::-1]):
rev_out_broadcastable[i] = rev_out_broadcastable[i] and b
out_broadcastable = rev_out_broadcastable[::-1]
if 'strided' in prealloc_modes or 'ALL' in prealloc_modes: if 'strided' in prealloc_modes or 'ALL' in prealloc_modes:
check_ndim = config.DebugMode.check_preallocated_output_ndim check_ndim = config.DebugMode.check_preallocated_output_ndim
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论