提交 ba408e6b authored 作者: Frederic's avatar Frederic

Fix failing tests due to change in join/subtensor or inconsistency between version.

上级 d304bb64
......@@ -3043,13 +3043,8 @@ class GpuJoin(tensor.Join, GpuOp):
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
if not tensors:
raise ValueError('Cannot join an empty list of tensors')
are_instances = [isinstance(x.type, CudaNdarrayType) \
for x in tensors]
assert numpy.all(are_instances)
# no conversion needed, we just checked everything was
# a CNDA var
as_tensor_variable_args = tensors
as_tensor_variable_args = [as_cuda_ndarray_variable(x)
for x in tensors]
output_maker = \
lambda bcast: CudaNdarrayType(broadcastable=bcast)()
......
......@@ -1000,6 +1000,7 @@ class T_subtensor(theano.tensor.tests.test_subtensor.T_subtensor):
adv_incsub1 = cuda.GpuAdvancedIncSubtensor1
mode = mode_with_gpu
dtype = 'float32'
type = tcn.CudaNdarrayType
ignore_topo = (B.HostFromGpu, B.GpuFromHost, theano.compile.DeepCopyOp)
fast_compile = False
ops = (cuda.GpuSubtensor, cuda.GpuIncSubtensor,
......
......@@ -368,13 +368,19 @@ class GpuAdvancedIncSubtensor1(HideC, tensor.AdvancedIncSubtensor1):
if ilist_.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
if ilist_.type.broadcastable != (False,):
if ilist_.type.ndim != 1:
raise TypeError('index must be vector')
if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar')
if x_.type.broadcastable[0]:
# the caller should have made a copy of x len(ilist) times
raise TypeError('cannot index into a broadcastable dimension')
if y_.type.ndim > x_.type.ndim:
if self.set_instead_of_inc:
opname = 'set'
else:
opname = 'increment'
raise TypeError(
'cannot %s x subtensor with ndim=%s'
' by y with ndim=%s to x subtensor with ndim=%s ' % (
opname, x_.type.ndim, y_.type.ndim))
return gof.Apply(self, [x_, y_, ilist_], [x_.type()])
......@@ -459,13 +465,19 @@ class GpuAdvancedIncSubtensor1_dev20(GpuAdvancedIncSubtensor1):
if ilist_.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
if ilist_.type.broadcastable != (False,):
if ilist_.type.ndim != 1:
raise TypeError('index must be vector')
if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar')
if x_.type.broadcastable[0]:
# the caller should have made a copy of x len(ilist) times
raise TypeError('cannot index into a broadcastable dimension')
if y_.type.ndim > x_.type.ndim:
if self.set_instead_of_inc:
opname = 'set'
else:
opname = 'increment'
raise TypeError(
'cannot %s x subtensor with ndim=%s'
' by y with ndim=%s to x subtensor with ndim=%s ' % (
opname, x_.type.ndim, y_.type.ndim))
return gof.Apply(self, [x_, y_, ilist_], [x_.type()])
......
......@@ -3713,20 +3713,20 @@ class T_Join_and_Split(unittest.TestCase):
self.assertRaises(ValueError, f, a_val, b_val, c_val, d_val, bad_e_val)
def test_infer_shape_join(self):
x1 = matrix()
x2 = matrix()
x3 = matrix()
def get_mat(s1, s2):
return numpy.asarray(numpy.random.uniform(size=(s1, s2)),
dtype=self.floatX)
x1 = self.shared(get_mat(3, 4))
x2 = self.shared(get_mat(2, 4))
x3 = self.shared(get_mat(1, 4))
# Test dim 0
z = self.join_op(0, x1, x2, x3)
f = theano.function([x1, x2, x3], z.shape, mode=self.mode)
f = theano.function([], z.shape, mode=self.mode)
topo = f.maker.fgraph.toposort()
out = f(get_mat(3, 4), get_mat(2, 4), get_mat(1, 4))
out = f()
assert (out == [6, 4]).all()
if theano.config.mode != 'FAST_COMPILE':
......@@ -3735,10 +3735,12 @@ class T_Join_and_Split(unittest.TestCase):
# Test dim 1
z = self.join_op(1, x1, x2, x3)
f = theano.function([x1, x2, x3], z.shape, mode=self.mode)
f = theano.function([], z.shape, mode=self.mode)
topo = f.maker.fgraph.toposort()
out = f(get_mat(3, 4), get_mat(3, 4), get_mat(3, 5))
x1.set_value(get_mat(3, 4))
x2.set_value(get_mat(3, 4))
x3.set_value(get_mat(3, 5))
out = f()
assert (out == [3, 13]).all()
if theano.config.mode != 'FAST_COMPILE':
......@@ -3750,7 +3752,10 @@ class T_Join_and_Split(unittest.TestCase):
self.assertRaises(ValueError, f, get_mat(3, 4), get_mat(3, 4),
get_mat(2, 5))
else:
f(get_mat(3, 4), get_mat(3, 4), get_mat(2, 5))
x1.set_value(get_mat(3, 4))
x2.set_value(get_mat(3, 4))
x3.set_value(get_mat(2, 5))
f()
def test_rebroadcast(self):
# Regression test for a crash that used to happen when rebroadcasting.
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论