提交 83303f27 authored 作者: abergeron's avatar abergeron

Merge pull request #2726 from nouiz/fix_tests

Fix tests related to join and subtensors tests
......@@ -3043,13 +3043,8 @@ class GpuJoin(tensor.Join, GpuOp):
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
if not tensors:
raise ValueError('Cannot join an empty list of tensors')
are_instances = [isinstance(x.type, CudaNdarrayType) \
for x in tensors]
assert numpy.all(are_instances)
# no conversion needed, we just checked everything was
# a CNDA var
as_tensor_variable_args = tensors
as_tensor_variable_args = [as_cuda_ndarray_variable(x)
for x in tensors]
output_maker = \
lambda bcast: CudaNdarrayType(broadcastable=bcast)()
......
......@@ -3679,7 +3679,11 @@ CudaNdarray_CopyFromArray(CudaNdarray * self, PyArrayObject*obj)
CNDA_END_ALLOW_THREADS
if (CUBLAS_STATUS_SUCCESS != cerr)
{
PyErr_SetString(PyExc_RuntimeError, "error copying data to device memory");
PyErr_Format(PyExc_RuntimeError,
"CUBLAS error '%s' while copying %lli data element"
" to device memory",
cublasGetErrorString(cerr),
(long long)py_src_size);
Py_DECREF(py_src);
return -1;
}
......
......@@ -1000,6 +1000,7 @@ class T_subtensor(theano.tensor.tests.test_subtensor.T_subtensor):
adv_incsub1 = cuda.GpuAdvancedIncSubtensor1
mode = mode_with_gpu
dtype = 'float32'
type = tcn.CudaNdarrayType
ignore_topo = (B.HostFromGpu, B.GpuFromHost, theano.compile.DeepCopyOp)
fast_compile = False
ops = (cuda.GpuSubtensor, cuda.GpuIncSubtensor,
......
......@@ -856,7 +856,8 @@ class GpuJoin(HideC, Join):
for i, inp in enumerate(inputs[1:]):
copy_to_list.append("als[%s] = &%s->ga;" % (i, inp))
return """
GpuArray **als = (GpuArray **)PyMem_Malloc(sizeof(GpuArray *) * %(n)s);
const GpuArray **als = (const GpuArray **)PyMem_Malloc(sizeof(GpuArray *) *
%(n)s);
if (als == NULL) {
PyErr_NoMemory();
%(fail)s
......
......@@ -368,13 +368,19 @@ class GpuAdvancedIncSubtensor1(HideC, tensor.AdvancedIncSubtensor1):
if ilist_.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
if ilist_.type.broadcastable != (False,):
if ilist_.type.ndim != 1:
raise TypeError('index must be vector')
if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar')
if x_.type.broadcastable[0]:
# the caller should have made a copy of x len(ilist) times
raise TypeError('cannot index into a broadcastable dimension')
if y_.type.ndim > x_.type.ndim:
if self.set_instead_of_inc:
opname = 'set'
else:
opname = 'increment'
raise TypeError(
'cannot %s x subtensor with ndim=%s'
' by y with ndim=%s to x subtensor with ndim=%s ' % (
opname, x_.type.ndim, y_.type.ndim))
return gof.Apply(self, [x_, y_, ilist_], [x_.type()])
......@@ -459,13 +465,19 @@ class GpuAdvancedIncSubtensor1_dev20(GpuAdvancedIncSubtensor1):
if ilist_.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
if ilist_.type.broadcastable != (False,):
if ilist_.type.ndim != 1:
raise TypeError('index must be vector')
if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar')
if x_.type.broadcastable[0]:
# the caller should have made a copy of x len(ilist) times
raise TypeError('cannot index into a broadcastable dimension')
if y_.type.ndim > x_.type.ndim:
if self.set_instead_of_inc:
opname = 'set'
else:
opname = 'increment'
raise TypeError(
'cannot %s x subtensor with ndim=%s'
' by y with ndim=%s to x subtensor with ndim=%s ' % (
opname, x_.type.ndim, y_.type.ndim))
return gof.Apply(self, [x_, y_, ilist_], [x_.type()])
......
......@@ -377,7 +377,7 @@ class G_Join_and_Split(test_basic.T_Join_and_Split):
m = self.shared(rng.rand(4, 6).astype(self.floatX))
o = T.Split(2)(m, 0, [2, 2])
f = theano.function([], o, mode=self.mode)
assert any([isinstance(node.op, self.split_op)
assert any([isinstance(node.op, self.split_op_class)
for node in f.maker.fgraph.toposort()])
o1, o2 = f()
assert numpy.allclose(o1, m.get_value(borrow=True)[:2])
......
......@@ -3713,20 +3713,20 @@ class T_Join_and_Split(unittest.TestCase):
self.assertRaises(ValueError, f, a_val, b_val, c_val, d_val, bad_e_val)
def test_infer_shape_join(self):
x1 = matrix()
x2 = matrix()
x3 = matrix()
def get_mat(s1, s2):
return numpy.asarray(numpy.random.uniform(size=(s1, s2)),
dtype=self.floatX)
x1 = self.shared(get_mat(3, 4))
x2 = self.shared(get_mat(2, 4))
x3 = self.shared(get_mat(1, 4))
# Test dim 0
z = self.join_op(0, x1, x2, x3)
f = theano.function([x1, x2, x3], z.shape, mode=self.mode)
f = theano.function([], z.shape, mode=self.mode)
topo = f.maker.fgraph.toposort()
out = f(get_mat(3, 4), get_mat(2, 4), get_mat(1, 4))
out = f()
assert (out == [6, 4]).all()
if theano.config.mode != 'FAST_COMPILE':
......@@ -3735,10 +3735,12 @@ class T_Join_and_Split(unittest.TestCase):
# Test dim 1
z = self.join_op(1, x1, x2, x3)
f = theano.function([x1, x2, x3], z.shape, mode=self.mode)
f = theano.function([], z.shape, mode=self.mode)
topo = f.maker.fgraph.toposort()
out = f(get_mat(3, 4), get_mat(3, 4), get_mat(3, 5))
x1.set_value(get_mat(3, 4))
x2.set_value(get_mat(3, 4))
x3.set_value(get_mat(3, 5))
out = f()
assert (out == [3, 13]).all()
if theano.config.mode != 'FAST_COMPILE':
......@@ -3746,11 +3748,13 @@ class T_Join_and_Split(unittest.TestCase):
assert not isinstance(node.op, type(self.join_op))
# Test hide error
x1.set_value(get_mat(3, 4))
x2.set_value(get_mat(3, 4))
x3.set_value(get_mat(2, 5))
if not self.hide_error:
self.assertRaises(ValueError, f, get_mat(3, 4), get_mat(3, 4),
get_mat(2, 5))
self.assertRaises(ValueError, f)
else:
f(get_mat(3, 4), get_mat(3, 4), get_mat(2, 5))
f()
def test_rebroadcast(self):
# Regression test for a crash that used to happen when rebroadcasting.
......
......@@ -127,7 +127,14 @@ class TestBinCountOp(utt.InferShapeTester):
x = T.vector('x', dtype=dtype)
# uint64 always fails
if dtype in ('uint64',):
# int64 and uint32 also fail if python int are 32-bit
int_bitwidth = theano.gof.python_int_bitwidth()
if int_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if int_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
# uint64 always fails
if dtype in numpy_unsupported_dtypes:
self.assertRaises(TypeError, bincount, x)
else:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论