提交 b44e2bbf authored 作者: Frederic's avatar Frederic

[bug] Fix the broadcastable pattern of the output of AdvancedIncSubtensor1.

This cause the infershape to think the shape is one. This bad info can be reused at many places. This was done the 11 August in commit 94ec1975 It was merged the 12 August in gh-2029
上级 89a77322
......@@ -2516,10 +2516,7 @@ class GpuAdvancedIncSubtensor1(tensor.AdvancedIncSubtensor1, GpuOp):
if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar')
bcast = (ilist_.broadcastable[0],) + x_.broadcastable[1:]
return Apply(self, [x_, y_, ilist_],
[CudaNdarrayType(dtype=x_.dtype,
broadcastable=bcast)()])
return Apply(self, [x_, y_, ilist_], [x_.type()])
# CudaNdarray_Subscript() doesn't support Advanced slicing.
# But we can't use the parent version that loops on each index
......@@ -2685,10 +2682,7 @@ class GpuAdvancedIncSubtensor1_dev20(GpuAdvancedIncSubtensor1):
if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar')
bcast = (ilist_.broadcastable[0],) + x_.broadcastable[1:]
return Apply(self, [x_, y_, ilist_],
[CudaNdarrayType(dtype=x_.dtype,
broadcastable=bcast)()])
return Apply(self, [x_, y_, ilist_], [x_.type()])
def c_code_cache_version(self):
return (2,)
......
......@@ -1737,9 +1737,8 @@ class AdvancedIncSubtensor1(Op):
'cannot %s x subtensor with ndim=%s'
' by y with ndim=%s to x subtensor with ndim=%s ' % (
opname, x_.type.ndim, y_.type.ndim))
bcast = (ilist_.broadcastable[0],) + x_.broadcastable[1:]
return Apply(self, [x_, y_, ilist_], [TensorType(dtype=x.dtype,
broadcastable=bcast)()])
return Apply(self, [x_, y_, ilist_], [x_.type()])
def perform(self, node, inp, out_):
# TODO opt to make this inplace
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论