提交 3f4080b9 authored 作者: Frederic's avatar Frederic

Allow Advancedsubtensor1 on gpu to also work on broadcated tensor. Add test.

上级 f4384395
...@@ -1751,9 +1751,6 @@ class GpuAdvancedSubtensor1(tensor.AdvancedSubtensor1): ...@@ -1751,9 +1751,6 @@ class GpuAdvancedSubtensor1(tensor.AdvancedSubtensor1):
raise TypeError('index must be vector') raise TypeError('index must be vector')
if x_.type.ndim == 0: if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar') raise TypeError('cannot index into a scalar')
if x_.type.broadcastable[0]:
# the caller should have made a copy of x len(ilist) times
raise TypeError('cannot index into a broadcastable dimension')
return Apply(self, [x_, ilist_], [x_.type()]) return Apply(self, [x_, ilist_], [x_.type()])
......
...@@ -2101,6 +2101,21 @@ class T_subtensor(unittest.TestCase): ...@@ -2101,6 +2101,21 @@ class T_subtensor(unittest.TestCase):
self.assertTrue(isinstance(topo_[0].op, self.adv_sub1)) self.assertTrue(isinstance(topo_[0].op, self.adv_sub1))
self.assertRaises(IndexError, f) self.assertRaises(IndexError, f)
def test_adv_sub1_broadcast(self):
ones = numpy.ones((1,3), dtype=self.dtype)
n = self.shared(ones*5, broadcastable=(True, False))
idx = tensor.lvector()
t = n[idx]
self.assertTrue(isinstance(t.owner.op, theano.tensor.basic.AdvancedSubtensor1))
f = function([idx], t, mode=self.mode)
topo = f.maker.env.toposort()
topo_ = [node for node in topo if not isinstance(node.op, self.ignore_topo)]
assert len(topo_)==1
self.assertTrue(isinstance(topo_[0].op, self.adv_sub1))
self.assertTrue(numpy.allclose(f([0]),ones[0]*5))
self.assertRaises(IndexError, f, [0,1])
def test_shape_i_const(self): def test_shape_i_const(self):
# Each axis is treated independently by shape_i/shape operators # Each axis is treated independently by shape_i/shape operators
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论