提交 30ff90be authored 作者: Frederic Bastien's avatar Frederic Bastien

modified CudaNdarray_Reshape to accept int as a shape object. Test this case and…

modified CudaNdarray_Reshape to accept int as a shape object. Test this case and test that the GpuFlatten work correctly as it use this code.
上级 37013a1d
......@@ -516,29 +516,40 @@ __global__ void k_copy_reshape_rowmajor(unsigned int numEls,
PyObject * CudaNdarray_Reshape(CudaNdarray * self, PyObject * shape)
{
// check shape tuple
if (!PyTuple_Check(shape))
{
PyErr_SetString(PyExc_TypeError, "shape must be tuple of integers");
unsigned int rval_nd;
unsigned int * rval_dims;
unsigned int rval_size = 1;
if (PyTuple_Check(shape)){
// copy shape to integer array
rval_nd = PyTuple_Size(shape);
}else if (PyInt_Check(shape)){
rval_nd = 1;
}else{
PyErr_SetString(PyExc_TypeError, "shape must be tuple of integers or an integer");
return NULL;
}
// copy shape to integer array
unsigned int rval_nd = PyTuple_Size(shape);
unsigned int * rval_dims = (unsigned int*)malloc(rval_nd * sizeof(int));
unsigned int rval_size = 1;
for (int i = 0; i < rval_nd; ++i)
{
rval_dims[i] = PyInt_AsLong(PyTuple_GetItem(shape, i)); //GetItem returns borrowed reference
if (PyErr_Occurred()) //error in AsLong
{
free(rval_dims);
rval_dims = (unsigned int*)malloc(rval_nd * sizeof(int));
if(PyTuple_Check(shape)){
for (int i = 0; i < rval_nd; ++i)
{
rval_dims[i] = PyInt_AsLong(PyTuple_GetItem(shape, i)); //GetItem returns borrowed reference
if (PyErr_Occurred()) //error in AsLong
{
free(rval_dims);
return NULL;
}
if(rval_dims[i]<=0){
PyErr_Format(PyExc_ValueError, "Reshape has invalid dimension %i (must be >0)",rval_dims[i]);
free(rval_dims);
return NULL;
}
if(rval_dims[i]<=0){
PyErr_Format(PyExc_ValueError, "Reshape has invalid dimension %i (must be >0)",rval_dims[i]);
free(rval_dims);
return NULL;
}
rval_size = rval_size * rval_dims[i];
}
rval_size = rval_size * rval_dims[i];
}else{
rval_size = PyInt_AsLong(shape);
rval_dims[0] = rval_size;
}
// calculate new size, assert same as old size
if (rval_size != CudaNdarray_SIZE(self))
......
......@@ -111,6 +111,10 @@ def test_sum():
assert T.Sum in [x.op.__class__ for x in f.maker.env.toposort()]
assert numpy.allclose(f2(val2),f(val))
def test_flatten():
x = cuda.fmatrix('x')
f = theano.function([x], x.flatten())
assert len(f( [[0.,0.],[0.,0.]] ).shape)==1
def test_reshape():
......
......@@ -176,12 +176,13 @@ def test_reshape():
((1,2,3), (3,2,1)),
((1,2,3), (6,)),
((1,2,3,2), (6,2)),
((2,3,2), (6,2))
((2,3,2), (6,2)),
((2,3,2), 12)
]
def subtest(shape_1, shape_2):
#print >> sys.stdout, "INFO: shapes", shape_1, shape_2
a = theano._asarray(numpy.random.rand(*shape_1), dtype='float32')
a = theano._asarray(numpy.random.random(shape_1), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
aa = a.reshape(shape_2)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论