提交 007540a5 authored 作者: lamblin's avatar lamblin

Merge pull request #919 from nouiz/np17

Np17
......@@ -63,7 +63,7 @@ you should check the strides and alignment.
return """
Py_XDECREF(%(y)s);
%(y)s = (PyArrayObject*)PyArray_FromArray(
%(x)s, 0, NPY_ENSURECOPY);
%(x)s, 0, NPY_ARRAY_ENSURECOPY);
if (!(%y)s) %(fail)s;
dtype_%(y)s * y = (dtype_%(y)s*)%(y)s->data;
dtype_%(x)s * x = (dtype_%(x)s*)%(x)s->data;
......@@ -147,7 +147,7 @@ the correct size for the output. This is essentially simulating the line
Py_XDECREF(%(y)s);
%(y)s = (PyArrayObject*)PyArray_FromArray(
%(x)s, 0, NPY_ENSURECOPY);
%(x)s, 0, NPY_ARRAY_ENSURECOPY);
The first line reduces the reference count of the data that y originally
pointed to. The second line allocates the new data and makes y point to it.
......
......@@ -56,30 +56,30 @@ class BROKEN_ON_PURPOSE_Add(gof.Op):
a, b = inp
z, = out
return """
if (%(a)s->nd != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(a) != 1"); %(fail)s;}
if (%(b)s->nd != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 1"); %(fail)s;}
if (PyArray_NDIM(%(a)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(a) != 1"); %(fail)s;}
if (PyArray_NDIM(%(b)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 1"); %(fail)s;}
if (%(a)s->descr->type_num != PyArray_DOUBLE)
if (PyArray_DESCR(%(a)s)->type_num != NPY_DOUBLE)
{PyErr_SetString(PyExc_NotImplementedError, "a dtype not NPY_DOUBLE"); %(fail)s;}
if (%(b)s->descr->type_num != PyArray_DOUBLE)
if (PyArray_DESCR(%(b)s)->type_num != NPY_DOUBLE)
{PyErr_SetString(PyExc_NotImplementedError, "b's dtype not NPY_DOUBLE"); %(fail)s;}
if (%(a)s->dimensions[0] != %(b)s->dimensions[0])
if (PyArray_DIMS(%(a)s)[0] != PyArray_DIMS(%(b)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "a and b have different lengths"); %(fail)s;}
if ((!%(z)s)
|| (%(z)s->dimensions[0] != %(b)s->dimensions[0])
|| (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(b)s)[0])
)
{
{Py_XDECREF(%(z)s);}
npy_intp dims[] = {0};
dims[0] = %(b)s->dimensions[0];
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, dims, %(b)s->descr->type_num);
dims[0] = PyArray_DIMS(%(b)s)[0];
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, dims, PyArray_DESCR(%(b)s)->type_num);
}
{
for (npy_intp m = 0; m < %(z)s->dimensions[0]; ++m)
for (npy_intp m = 0; m < PyArray_DIMS(%(z)s)[0]; ++m)
{
((double*)PyArray_GETPTR1(%(z)s, m))[0]
= 0.5
......@@ -150,13 +150,13 @@ class WeirdBrokenOp(gof.Op):
else:
z_code = """
{Py_XDECREF(%(z)s);}
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, %(a)s->dimensions, %(a)s->descr->type_num);
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, PyArray_DIMS(%(a)s), PyArray_DESCR(%(a)s)->type_num);
"""
prep_vars = """
//the output array has size M x N
npy_intp M = %(a)s->dimensions[0];
npy_intp Sa = %(a)s->strides[0] / %(a)s->descr->elsize;
npy_intp Sz = %(z)s->strides[0] / %(z)s->descr->elsize;
npy_intp M = PyArray_DIMS(%(a)s)[0];
npy_intp Sa = %(a)s->strides[0] / PyArray_DESCR(%(a)s)->elsize;
npy_intp Sz = %(z)s->strides[0] / PyArray_DESCR(%(z)s)->elsize;
npy_double * Da = (npy_double*)%(a)s->data;
npy_double * Dz = (npy_double*)%(z)s->data;
......@@ -603,22 +603,22 @@ class BrokenCImplementationAdd(gof.Op):
debug = 0
return """
//printf("executing c_code\\n");
if (%(a)s->nd != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(a) != 2"); %(fail)s;}
if (%(b)s->nd != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 2"); %(fail)s;}
if (PyArray_NDIM(%(a)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(a) != 2"); %(fail)s;}
if (PyArray_NDIM(%(b)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 2"); %(fail)s;}
if (%(a)s->descr->type_num != PyArray_FLOAT)
if (PyArray_DESCR(%(a)s)->type_num != NPY_FLOAT)
{PyErr_SetString(PyExc_NotImplementedError, "a dtype not NPY_FLOAT"); %(fail)s;}
if (%(b)s->descr->type_num != PyArray_FLOAT)
if (PyArray_DESCR(%(b)s)->type_num != NPY_FLOAT)
{PyErr_SetString(PyExc_NotImplementedError, "b's dtype not NPY_FLOAT"); %(fail)s;}
if (%(a)s->dimensions[0] != %(a)s->dimensions[1])
if (PyArray_DIMS(%(a)s)[0] != PyArray_DIMS(%(a)s)[1])
{PyErr_SetString(PyExc_NotImplementedError, "a is not square"); %(fail)s;}
if (%(b)s->dimensions[0] != %(b)s->dimensions[1])
if (PyArray_DIMS(%(b)s)[0] != PyArray_DIMS(%(b)s)[1])
{PyErr_SetString(PyExc_NotImplementedError, "b is not square"); %(fail)s;}
if (%(a)s->dimensions[0] != %(b)s->dimensions[0])
if (PyArray_DIMS(%(a)s)[0] != PyArray_DIMS(%(b)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "a and b have different dimensions"); %(fail)s;}
// We do not check for c_contiguous property here
......@@ -626,32 +626,32 @@ class BrokenCImplementationAdd(gof.Op):
{
if (!%(z)s)
printf("%(z)s is not there, %%p \\n", %(z)s);
else if (%(z)s->dimensions[0] != %(b)s->dimensions[0])
else if (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(b)s)[0])
printf("Dimension 0 mismatch for %(z)s and %(b)s\\n");
else if (%(z)s->dimensions[1] != %(b)s->dimensions[1])
else if (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1])
printf("Dimension 1 mismatch for %(z)s and %(b)s\\n");
else
printf("Reusing %(z)s\\n");
}
if ((!%(z)s)
|| (%(z)s->dimensions[0] != %(b)s->dimensions[0])
|| (%(z)s->dimensions[1] != %(b)s->dimensions[1])
|| (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(b)s)[0])
|| (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1])
)
{
Py_XDECREF(%(z)s);
npy_intp dims[] = {0, 0};
dims[0] = %(b)s->dimensions[0];
dims[1] = %(b)s->dimensions[1];
%(z)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, %(b)s->descr->type_num);
dims[0] = PyArray_DIMS(%(b)s)[0];
dims[1] = PyArray_DIMS(%(b)s)[1];
%(z)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, PyArray_DESCR(%(b)s)->type_num);
}
// Let us assume that %(z)s is c_contiguous
{
dtype_%(z)s * z = ((dtype_%(z)s*)(PyArray_GETPTR2(%(z)s,0,0)));
for (int i=0; i<%(b)s->dimensions[0]; i++)
for (int i=0; i<PyArray_DIMS(%(b)s)[0]; i++)
{
for (int j=0; j<%(b)s->dimensions[1]; j++)
for (int j=0; j<PyArray_DIMS(%(b)s)[1]; j++)
{
*z = ((float*)PyArray_GETPTR2(%(a)s, i, j))[0] +
((float*)PyArray_GETPTR2(%(b)s, i, j))[0] ;
......
......@@ -1453,6 +1453,22 @@ class GCC_compiler(object):
@staticmethod
def compile_args():
cxxflags = [flag for flag in config.gcc.cxxflags.split(' ') if flag]
#NumPy 1.7 Deprecate the old API. I updated most of the places
#to use the new API, but not everywhere. When finished, enable
#the following macro to assert that we don't bring new code
#that use the old API.
#cxxflags.append("-D NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION")
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
# numpy 1.7 deprecated the following macro but the didn't
# existed in the past
if bool(numpy_ver < [1, 7]):
cxxflags.append("-D NPY_ARRAY_ENSURECOPY=NPY_ENSURECOPY")
cxxflags.append("-D NPY_ARRAY_ALIGNED=NPY_ALIGNED")
cxxflags.append("-D NPY_ARRAY_WRITEABLE=NPY_WRITEABLE")
cxxflags.append("-D NPY_ARRAY_UPDATE_ALL=NPY_UPDATE_ALL")
cxxflags.append("-D NPY_ARRAY_C_CONTIGUOUS=NPY_C_CONTIGUOUS")
cxxflags.append("-D NPY_ARRAY_F_CONTIGUOUS=NPY_F_CONTIGUOUS")
return cxxflags
@staticmethod
......
......@@ -288,7 +288,8 @@ class TestComputeTestValue(unittest.TestCase):
# The second is a new message in numpy 1.6.
assert (str(e).startswith("shape mismatch") or
str(e).startswith("operands could not be broadcast "
"together with shapes"))
"together with shapes") or
str(e).startswith("could not broadcast input"))
finally:
theano.config.compute_test_value = orig_compute_test_value
......
......@@ -123,7 +123,7 @@ class GpuDot22Scalar(GpuOp):
fail = sub['fail']
return """
#define REAL float
float %(name)s_a = (%(a)s->descr->type_num == PyArray_FLOAT)
float %(name)s_a = (PyArray_TYPE(%(a)s) == NPY_FLOAT)
? (REAL)(((float*)%(a)s->data)[0])
: (REAL)(((double*)%(a)s->data)[0]);
#undef REAL
......@@ -231,11 +231,11 @@ class GpuGemm(GpuOp):
print >> sio, """
#define REAL float
float %(name)s_a = (%(a)s->descr->type_num == PyArray_FLOAT)
float %(name)s_a = (PyArray_TYPE(%(a)s) == NPY_FLOAT)
? (REAL)(((float*)%(a)s->data)[0])
: (REAL)(((double*)%(a)s->data)[0]);
float %(name)s_b = (%(b)s->descr->type_num == PyArray_FLOAT) ?
float %(name)s_b = (PyArray_TYPE(%(b)s) == NPY_FLOAT) ?
(REAL)(((float*)%(b)s->data)[0])
: (REAL)(((double*)%(b)s->data)[0]);
#undef REAL
......
......@@ -788,7 +788,7 @@ CudaNdarray_TakeFrom(CudaNdarray * self, PyObject *args){
PyErr_SetString(PyExc_TypeError, "CudaNdarray_TakeFrom: need a ndarray for indices with dtype int32");
return NULL;
}
if (((PyArrayObject*)indices_obj)->nd != 1) {
if (PyArray_NDIM(((PyArrayObject*)indices_obj)) != 1) {
PyErr_SetString(PyExc_TypeError, "CudaNdarray_TakeFrom: need a CudaNdarray of indices with only 1 dimensions");
return NULL;
}
......@@ -2921,7 +2921,7 @@ filter(PyObject* __unsed_self, PyObject *args) // args = (data, broadcastable, s
Py_DECREF(broadcastable);
return NULL;
}
for (int i = 0; i < data->nd; ++i)
for (int i = 0; i < PyArray_NDIM(data); ++i)
{
if ((data->dimensions[i] > 1) && PyInt_AsLong(PyTuple_GetItem(broadcastable, Py_ssize_t(i))))
{
......@@ -3102,7 +3102,7 @@ cublas_shutdown()
int
CudaNdarray_CopyFromArray(CudaNdarray * self, PyArrayObject*obj)
{
int err = CudaNdarray_alloc_contiguous(self, obj->nd, obj->dimensions);
int err = CudaNdarray_alloc_contiguous(self, PyArray_NDIM(obj), obj->dimensions);
if (err) {
return err;
}
......
......@@ -214,7 +214,7 @@ class GpuImages2Neibs(Images2Neibs, GpuOp):
%(fail)s;
}
if (%(neib_shape)s->dimensions[0] != 2)
if (PyArray_DIMS(%(neib_shape)s)[0] != 2)
{
PyErr_Format(PyExc_ValueError,
"neib_shape has to contain two elements");
......
......@@ -153,7 +153,7 @@ class CURAND_Base(GpuOp):
%(ndim)s, %(size)s->dimensions[0]);
%(fail)s
}
if (%(size)s->descr->type_num != PyArray_INT32)
if (PyArray_DESCR(%(size)s)->type_num != NPY_INT32)
{
PyErr_SetString(PyExc_ValueError, "size must be int32");
%(fail)s
......
......@@ -55,31 +55,31 @@ class MultinomialFromUniform(Op):
fail = sub['fail']
return """
if (%(pvals)s->nd != 2)
if (PyArray_NDIM(%(pvals)s) != 2)
{
PyErr_Format(PyExc_TypeError, "pvals wrong rank");
%(fail)s;
}
if (%(unis)s->nd != 1)
if (PyArray_NDIM(%(unis)s) != 1)
{
PyErr_Format(PyExc_TypeError, "unis wrong rank");
%(fail)s;
}
if (%(unis)s->dimensions[0] != %(pvals)s->dimensions[0])
if (PyArray_DIMS(%(unis)s)[0] != PyArray_DIMS(%(pvals)s)[0])
{
PyErr_Format(PyExc_ValueError, "unis.shape[0] != pvals.shape[0]");
%(fail)s;
}
if ((NULL == %(z)s)
|| ((%(z)s->dimensions)[0] != (%(pvals)s->dimensions)[0])
|| ((%(z)s->dimensions)[1] != (%(pvals)s->dimensions)[1])
|| ((PyArray_DIMS(%(z)s))[0] != (PyArray_DIMS(%(pvals)s))[0])
|| ((PyArray_DIMS(%(z)s))[1] != (PyArray_DIMS(%(pvals)s))[1])
)
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_ZEROS(2,
%(pvals)s->dimensions,
PyArray_DIMS(%(pvals)s),
type_num_%(z)s,
0);
if (!%(z)s)
......@@ -91,8 +91,8 @@ class MultinomialFromUniform(Op):
{ // NESTED SCOPE
const int nb_multi = %(pvals)s->dimensions[0];
const int nb_outcomes = %(pvals)s->dimensions[1];
const int nb_multi = PyArray_DIMS(%(pvals)s)[0];
const int nb_outcomes = PyArray_DIMS(%(pvals)s)[1];
//
// For each multinomial, loop over each possible outcome
......@@ -233,12 +233,12 @@ class GpuMultinomialFromUniform(MultinomialFromUniform, GpuOp):
fail = sub['fail']
return """
if (%(pvals)s->nd != 2)
if (PyArray_NDIM(%(pvals)s) != 2)
{
PyErr_Format(PyExc_TypeError, "pvals wrong rank");
%(fail)s;
}
if (%(unis)s->nd != 1)
if (PyArray_NDIM(%(unis)s) != 1)
{
PyErr_Format(PyExc_TypeError, "unis wrong rank");
%(fail)s;
......
......@@ -114,28 +114,28 @@ class Images2Neibs(Op):
int grid_c = -1; //number of patch in height
int grid_d = -1; //number of patch in width
{
if (%(ten4)s->nd != 4)
if (PyArray_NDIM(%(ten4)s) != 4)
{
PyErr_Format(PyExc_TypeError, "ten4 wrong rank");
%(fail)s;
}
if (%(neib_shape)s->nd != 1)
if (PyArray_NDIM(%(neib_shape)s) != 1)
{
PyErr_Format(PyExc_TypeError, "neib_shape wrong rank");
%(fail)s;
}
if ( (%(neib_shape)s->dimensions)[0] != 2)
if ( (PyArray_DIMS(%(neib_shape)s))[0] != 2)
{
PyErr_Format(PyExc_TypeError, "neib_shape wrong shape ; has to"
" contain 2 elements");
%(fail)s;
}
if (%(neib_step)s->nd != 1)
if (PyArray_NDIM(%(neib_step)s) != 1)
{
PyErr_Format(PyExc_TypeError, "neib_step wrong rank");
%(fail)s;
}
if ( (%(neib_step)s->dimensions)[0] != 2)
if ( (PyArray_DIMS(%(neib_step)s))[0] != 2)
{
PyErr_Format(PyExc_TypeError,
"neib_step wrong step ; has to contain 2 elements");
......@@ -154,33 +154,33 @@ class Images2Neibs(Op):
PyErr_Format(PyExc_TypeError, "Images2Neibs: in mode wrap_centered need patch with odd shapes");
%(fail)s;
}
if ( (%(ten4)s->dimensions)[2] < c || (%(ten4)s->dimensions)[3] < d)
if ( (PyArray_DIMS(%(ten4)s))[2] < c || (PyArray_DIMS(%(ten4)s))[3] < d)
{
PyErr_Format(PyExc_TypeError, "Images2Neibs: in wrap_centered mode, don't support image shapes smaller then the patch shapes: neib_shape=(%%ld,%%ld), ten4[2:]=[%%ld,%%ld]",
(long int)c, (long int)d, (long int)(%(ten4)s->dimensions[2]), (long int)(%(ten4)s->dimensions[3]));
(long int)c, (long int)d, (long int)(PyArray_DIMS(%(ten4)s)[2]), (long int)(PyArray_DIMS(%(ten4)s)[3]));
%(fail)s;
}
grid_c = CEIL_INTDIV(((%(ten4)s->dimensions)[2]),step_x);
grid_d = CEIL_INTDIV(((%(ten4)s->dimensions)[3]),step_y);
grid_c = CEIL_INTDIV(((PyArray_DIMS(%(ten4)s))[2]),step_x);
grid_d = CEIL_INTDIV(((PyArray_DIMS(%(ten4)s))[3]),step_y);
}else if ( "%(mode)s" == "valid") {
if ( ((%(ten4)s->dimensions)[2] < c) ||( (((%(ten4)s->dimensions)[2]-c) %% step_x)!=0))
if ( ((PyArray_DIMS(%(ten4)s))[2] < c) ||( (((PyArray_DIMS(%(ten4)s))[2]-c) %% step_x)!=0))
{
PyErr_Format(PyExc_TypeError, "neib_shape[0]=%%ld, neib_step[0]=%%ld and ten4.shape[2]=%%ld not consistent",
(long int)c, (long int)step_x, (long int)(%(ten4)s->dimensions[2]));
(long int)c, (long int)step_x, (long int)(PyArray_DIMS(%(ten4)s)[2]));
%(fail)s;
}
if ( ((%(ten4)s->dimensions)[3] < d) ||( (((%(ten4)s->dimensions)[3]-d) %% step_y)!=0))
if ( ((PyArray_DIMS(%(ten4)s))[3] < d) ||( (((PyArray_DIMS(%(ten4)s))[3]-d) %% step_y)!=0))
{
PyErr_Format(PyExc_TypeError, "neib_shape[1]=%%ld, neib_step[1]=%%ld and ten4.shape[3]=%%ld not consistent",
(long int)d, (long int)step_y, (long int)(%(ten4)s->dimensions[3]));
(long int)d, (long int)step_y, (long int)(PyArray_DIMS(%(ten4)s)[3]));
%(fail)s;
}
grid_c = 1+(((%(ten4)s->dimensions)[2]-c)/step_x); //number of patch in height
grid_d = 1+(((%(ten4)s->dimensions)[3]-d)/step_y); //number of patch in width
grid_c = 1+(((PyArray_DIMS(%(ten4)s))[2]-c)/step_x); //number of patch in height
grid_d = 1+(((PyArray_DIMS(%(ten4)s))[3]-d)/step_y); //number of patch in width
}else if ( "%(mode)s" == "ignore_borders") {
grid_c = 1+(((%(ten4)s->dimensions)[2]-c)/step_x); //number of patch in height
grid_d = 1+(((%(ten4)s->dimensions)[3]-d)/step_y); //number of patch in width
grid_c = 1+(((PyArray_DIMS(%(ten4)s))[2]-c)/step_x); //number of patch in height
grid_d = 1+(((PyArray_DIMS(%(ten4)s))[3]-d)/step_y); //number of patch in width
}else{
PyErr_Format(PyExc_TypeError, "Images2Neibs: unknow mode '%(mode)s'");
%(fail)s;
......@@ -190,12 +190,12 @@ class Images2Neibs(Op):
const npy_intp z_dim1 = c * d;
const npy_intp z_dim0 = grid_c
* grid_d
* (%(ten4)s->dimensions)[1]
* (%(ten4)s->dimensions)[0];
* (PyArray_DIMS(%(ten4)s))[1]
* (PyArray_DIMS(%(ten4)s))[0];
if ((NULL == %(z)s)
|| ((%(z)s->dimensions)[0] != z_dim0 )
|| ((%(z)s->dimensions)[1] != z_dim1 )
|| ((PyArray_DIMS(%(z)s))[0] != z_dim0 )
|| ((PyArray_DIMS(%(z)s))[1] != z_dim1 )
)
{
Py_XDECREF(%(z)s);
......@@ -218,10 +218,10 @@ class Images2Neibs(Op):
{ // NESTED SCOPE
const int nb_batch = (%(ten4)s->dimensions)[0];
const int nb_stack = (%(ten4)s->dimensions)[1];
const int height = (%(ten4)s->dimensions)[2];
const int width = (%(ten4)s->dimensions)[3];
const int nb_batch = (PyArray_DIMS(%(ten4)s))[0];
const int nb_stack = (PyArray_DIMS(%(ten4)s))[1];
const int height = (PyArray_DIMS(%(ten4)s))[2];
const int width = (PyArray_DIMS(%(ten4)s))[3];
// (c,d) = neib_shape
const npy_intp c = (npy_intp) *(dtype_%(neib_shape)s*) PyArray_GETPTR1(%(neib_shape)s, 0);
......
......@@ -220,9 +220,9 @@ class mrg_uniform(mrg_uniform_base):
rstate, size = inp
o_rstate, o_sample = out
if self.inplace:
o_rstate_requirement = 'NPY_C_CONTIGUOUS|NPY_ALIGNED'
o_rstate_requirement = 'NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_ALIGNED'
else:
o_rstate_requirement = 'NPY_ENSURECOPY|NPY_C_CONTIGUOUS|NPY_ALIGNED'
o_rstate_requirement = 'NPY_ARRAY_ENSURECOPY|NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_ALIGNED'
ndim = self.output_type.ndim
o_type_num = numpy.asarray(0, dtype=self.output_type.dtype).dtype.num
fail = sub['fail']
......@@ -241,7 +241,7 @@ class mrg_uniform(mrg_uniform_base):
int n_elements = 1;
int n_streams = 0;
int must_alloc_sample = ((NULL == %(o_sample)s)
|| (%(o_sample)s->nd != %(ndim)s)
|| (PyArray_NDIM(%(o_sample)s) != %(ndim)s)
|| !(PyArray_ISCONTIGUOUS(%(o_sample)s)));
%(otype)s * sample_data;
npy_int32 * state_data;
......@@ -261,18 +261,18 @@ class mrg_uniform(mrg_uniform_base):
const npy_int32 MASK2 = 65535; //2^16 - 1
const npy_int32 MULT2 = 21069;
if (%(size)s->nd != 1)
if (PyArray_NDIM(%(size)s) != 1)
{
PyErr_SetString(PyExc_ValueError, "size must be vector");
%(fail)s
}
if (%(size)s->dimensions[0] != %(ndim)s)
if (PyArray_DIMS(%(size)s)[0] != %(ndim)s)
{
PyErr_Format(PyExc_ValueError, "size must have length %%i (not %%i)",
%(ndim)s, int(%(size)s->dimensions[0]));
%(ndim)s, int(PyArray_DIMS(%(size)s)[0]));
%(fail)s
}
if (%(size)s->descr->type_num != PyArray_INT32)
if (PyArray_DESCR(%(size)s)->type_num != NPY_INT32)
{
PyErr_SetString(PyExc_ValueError, "size must be int32");
%(fail)s
......@@ -281,7 +281,7 @@ class mrg_uniform(mrg_uniform_base):
{
odims[i] = ((npy_int32*)(%(size)s->data + %(size)s->strides[0] * i))[0];
n_elements *= odims[i];
must_alloc_sample = must_alloc_sample || (%(o_sample)s->dimensions[i] != odims[i]);
must_alloc_sample = must_alloc_sample || (PyArray_DIMS(%(o_sample)s)[i] != odims[i]);
//fprintf(stderr, "size %%i %%i\\n", i, (int)odims[i]);
}
if (must_alloc_sample)
......@@ -296,22 +296,22 @@ class mrg_uniform(mrg_uniform_base):
Py_XDECREF(%(o_rstate)s);
%(o_rstate)s = (PyArrayObject*)PyArray_FromAny(py_%(rstate)s, NULL, 0, 0, %(o_rstate_requirement)s,NULL);
if (%(o_rstate)s->nd != 2)
if (PyArray_NDIM(%(o_rstate)s) != 2)
{
PyErr_SetString(PyExc_ValueError, "rstate must be matrix");
%(fail)s
}
if (%(o_rstate)s->dimensions[1] != 6)
if (PyArray_DIMS(%(o_rstate)s)[1] != 6)
{
PyErr_Format(PyExc_ValueError, "rstate must have 6 columns");
%(fail)s
}
if (%(o_rstate)s->descr->type_num != PyArray_INT32)
if (PyArray_DESCR(%(o_rstate)s)->type_num != NPY_INT32)
{
PyErr_SetString(PyExc_ValueError, "rstate must be int32");
%(fail)s
}
n_streams = %(o_rstate)s->dimensions[0];
n_streams = PyArray_DIMS(%(o_rstate)s)[0];
sample_data = (%(otype)s *) %(o_sample)s->data;
state_data = (npy_int32 *) %(o_rstate)s->data;
......@@ -501,20 +501,20 @@ class GPU_mrg_uniform(mrg_uniform_base, GpuOp):
int must_alloc_sample = ((NULL == %(o_sample)s)
|| !CudaNdarray_Check(py_%(o_sample)s)
|| !CudaNdarray_is_c_contiguous(%(o_sample)s)
|| (%(o_sample)s->nd != %(ndim)s));
|| (PyArray_NDIM(%(o_sample)s) != %(ndim)s));
if (%(size)s->nd != 1)
if (PyArray_NDIM(%(size)s) != 1)
{
PyErr_SetString(PyExc_ValueError, "size must be vector");
%(fail)s
}
if (%(size)s->dimensions[0] != %(ndim)s)
if (PyArray_DIMS(%(size)s)[0] != %(ndim)s)
{
PyErr_Format(PyExc_ValueError, "size must have length %%i (not %%i)",
%(ndim)s, %(size)s->dimensions[0]);
%(ndim)s, PyArray_DIMS(%(size)s)[0]);
%(fail)s
}
if (%(size)s->descr->type_num != PyArray_INT32)
if (PyArray_DESCR(%(size)s)->type_num != NPY_INT32)
{
PyErr_SetString(PyExc_ValueError, "size must be int32");
%(fail)s
......@@ -552,7 +552,7 @@ class GPU_mrg_uniform(mrg_uniform_base, GpuOp):
%(o_rstate)s = (CudaNdarray*)CudaNdarray_Copy(%(rstate)s);
}
if (%(o_rstate)s->nd != 1)
if (PyArray_NDIM(%(o_rstate)s) != 1)
{
PyErr_SetString(PyExc_ValueError, "rstate must be vector");
%(fail)s;
......
......@@ -3006,38 +3006,38 @@ class StructuredDotGradCSC(gof.Op):
'g_ab')
return """
if (%(_d)s->nd != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(d) != 2"); %(fail)s;}
if (%(_g)s->nd != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(g) != 2"); %(fail)s;}
if (%(_indices)s->nd != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(indices) != 1"); %(fail)s;}
if (%(_indptr)s->nd != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(indptr) != 1"); %(fail)s;}
if (PyArray_NDIM(%(_d)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(d) != 2"); %(fail)s;}
if (PyArray_NDIM(%(_g)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(g) != 2"); %(fail)s;}
if (PyArray_NDIM(%(_indices)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(indices) != 1"); %(fail)s;}
if (PyArray_NDIM(%(_indptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(indptr) != 1"); %(fail)s;}
if( %(_indices)s->descr->type_num != PyArray_INT32) {
if( PyArray_DESCR(%(_indices)s)->type_num != NPY_INT32) {
PyErr_SetString(PyExc_NotImplementedError, "C"); %(fail)s;}
if( %(_indptr)s->descr->type_num != PyArray_INT32)
if( PyArray_DESCR(%(_indptr)s)->type_num != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "D"); %(fail)s;}
if( %(_d)s->dimensions[1] != %(_g)s->dimensions[1])
if( PyArray_DIMS(%(_d)s)[1] != PyArray_DIMS(%(_g)s)[1])
{PyErr_SetString(PyExc_NotImplementedError, "d and g have different numbers of columns"); %(fail)s;}
if (!%(_zout)s
|| (%(_zout)s->dimensions[0] != %(_indices)s->dimensions[0]))
|| (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0]))
{
Py_XDECREF(%(_zout)s);
%(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1, %(_indices)s->dimensions, %(_g)s->descr->type_num);
%(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1, PyArray_DIMS(%(_indices)s), PyArray_DESCR(%(_g)s)->type_num);
}
{ //makes it compile even though labels jump over variable definitions.
npy_intp nnz = %(_indices)s->dimensions[0];
npy_intp N = %(_indptr)s->dimensions[0]-1; //TODO: error checking with this
npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];
npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1; //TODO: error checking with this
npy_intp Sindices = %(_indices)s->strides[0]/%(_indices)s->descr->elsize;
npy_intp Sindptr = %(_indptr)s->strides[0]/%(_indptr)s->descr->elsize;
npy_intp Sindices = %(_indices)s->strides[0]/PyArray_DESCR(%(_indices)s)->elsize;
npy_intp Sindptr = %(_indptr)s->strides[0]/PyArray_DESCR(%(_indptr)s)->elsize;
const npy_intp Sd1 = %(_d)s->strides[1]/%(_d)s->descr->elsize;
const npy_intp Sg1 = %(_g)s->strides[1]/%(_g)s->descr->elsize;
const npy_intp Sd1 = %(_d)s->strides[1]/PyArray_DESCR(%(_d)s)->elsize;
const npy_intp Sg1 = %(_g)s->strides[1]/PyArray_DESCR(%(_g)s)->elsize;
const npy_intp K = %(_d)s->dimensions[1];
const npy_intp K = PyArray_DIMS(%(_d)s)[1];
const npy_int32 * __restrict__ indptr = (npy_int32 *)%(_indptr)s->data;
const npy_int32 * __restrict__ indices = (npy_int32 *)%(_indices)s->data;
......@@ -3047,7 +3047,7 @@ class StructuredDotGradCSC(gof.Op):
{
// extract j-th row of dense matrix
const dtype_%(_d)s* __restrict__ d_row = (dtype_%(_d)s*)(%(_d)s->data + %(_d)s->strides[0] * j);
if(j >= %(_d)s->dimensions[0]) {PyErr_SetString(PyExc_NotImplementedError, "G"); %(fail)s;}
if(j >= PyArray_DIMS(%(_d)s)[0]) {PyErr_SetString(PyExc_NotImplementedError, "G"); %(fail)s;}
// for each non-null value in the sparse column
for (npy_int32 i_idx = indptr[j * Sindptr]; i_idx < indptr[(j+1) * Sindptr]; ++i_idx)
......@@ -3062,7 +3062,7 @@ class StructuredDotGradCSC(gof.Op):
// make sure that row index is not bigger than actual number of rows
// Note: wouldn't the above operation fail if that were the case ?
// when would this ever be true anyway ?
if (i >= %(_g)s->dimensions[0])
if (i >= PyArray_DIMS(%(_g)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "H"); %(fail)s;}
// perform dot product of dense and sparse rows
......@@ -3142,39 +3142,39 @@ class StructuredDotGradCSR(gof.Op):
'g_ab')
return """
if (%(_d)s->nd != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(d) != 2"); %(fail)s;}
if (%(_g)s->nd != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(g) != 2"); %(fail)s;}
if (%(_indices)s->nd != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(indices) != 1"); %(fail)s;}
if (%(_indptr)s->nd != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(indptr) != 1"); %(fail)s;}
if (PyArray_NDIM(%(_d)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(d) != 2"); %(fail)s;}
if (PyArray_NDIM(%(_g)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(g) != 2"); %(fail)s;}
if (PyArray_NDIM(%(_indices)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(indices) != 1"); %(fail)s;}
if (PyArray_NDIM(%(_indptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(indptr) != 1"); %(fail)s;}
if( %(_indices)s->descr->type_num != PyArray_INT32) {
if( PyArray_DESCR(%(_indices)s)->type_num != NPY_INT32) {
PyErr_SetString(PyExc_NotImplementedError, "C"); %(fail)s;}
if( %(_indptr)s->descr->type_num != PyArray_INT32)
if( PyArray_DESCR(%(_indptr)s)->type_num != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "D"); %(fail)s;}
if( %(_d)s->dimensions[1] != %(_g)s->dimensions[1])
if( PyArray_DIMS(%(_d)s)[1] != PyArray_DIMS(%(_g)s)[1])
{PyErr_SetString(PyExc_NotImplementedError, "d and g have different numbers of columns"); %(fail)s;}
if (!%(_zout)s
|| (%(_zout)s->dimensions[0] != %(_indices)s->dimensions[0]))
|| (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0]))
{
Py_XDECREF(%(_zout)s);
%(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1, %(_indices)s->dimensions, %(_g)s->descr->type_num);
%(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1, PyArray_DIMS(%(_indices)s), PyArray_DESCR(%(_g)s)->type_num);
}
{ //makes it compile even though labels jump over variable definitions.
npy_intp nnz = %(_indices)s->dimensions[0];
npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];
// extract number of rows
npy_intp N = %(_indptr)s->dimensions[0]-1; //TODO: error checking with this
npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1; //TODO: error checking with this
npy_intp Sindices = %(_indices)s->strides[0]/%(_indices)s->descr->elsize;
npy_intp Sindptr = %(_indptr)s->strides[0]/%(_indptr)s->descr->elsize;
npy_intp Sindices = %(_indices)s->strides[0]/PyArray_DESCR(%(_indices)s)->elsize;
npy_intp Sindptr = %(_indptr)s->strides[0]/PyArray_DESCR(%(_indptr)s)->elsize;
const npy_intp Sd1 = %(_d)s->strides[1]/%(_d)s->descr->elsize;
const npy_intp Sg1 = %(_g)s->strides[1]/%(_g)s->descr->elsize;
const npy_intp Sd1 = %(_d)s->strides[1]/PyArray_DESCR(%(_d)s)->elsize;
const npy_intp Sg1 = %(_g)s->strides[1]/PyArray_DESCR(%(_g)s)->elsize;
const npy_intp K = %(_d)s->dimensions[1];
const npy_intp K = PyArray_DIMS(%(_d)s)[1];
const npy_int32 * __restrict__ indptr = (npy_int32 *)%(_indptr)s->data;
const npy_int32 * __restrict__ indices = (npy_int32 *)%(_indices)s->data;
......@@ -3190,7 +3190,7 @@ class StructuredDotGradCSR(gof.Op):
// extract j-th row of dense matrix
const dtype_%(_d)s* __restrict__ d_row = (dtype_%(_d)s*)(%(_d)s->data + %(_d)s->strides[0] * j);
if(j >= %(_d)s->dimensions[0]) {PyErr_SetString(PyExc_NotImplementedError, "G"); %(fail)s;}
if(j >= PyArray_DIMS(%(_d)s)[0]) {PyErr_SetString(PyExc_NotImplementedError, "G"); %(fail)s;}
// extract corresponding row in gradient
const dtype_%(_g)s* __restrict__ g_row = (dtype_%(_g)s*)(%(_g)s->data + %(_g)s->strides[0] * i);
......@@ -3199,7 +3199,7 @@ class StructuredDotGradCSR(gof.Op):
// make sure that row index is not bigger than actual number of rows
// Note: wouldn't the above operation fail if that were the case ?
// when would this ever be true anyway ?
if (i >= %(_g)s->dimensions[0])
if (i >= PyArray_DIMS(%(_g)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "H"); %(fail)s;}
// perform dot product of dense and sparse rows
......
差异被折叠。
......@@ -1374,7 +1374,7 @@ def test_sparse_shared_memory():
theano.In(y, mutable=True)], z, mode='FAST_RUN')
def f_(x, y, m1=m1, m2=m2):
return numpy.dot(x * 3, m1) + numpy.dot(y * 2, m2)
return ((x * 3) * m1) + ((y * 2) * m2)
assert SparseType.may_share_memory(a, a) # This is trivial
result = f(a, a)
......
......@@ -1972,7 +1972,7 @@ class ScalarFromTensor(Op):
z, = outputs
fail = sub['fail']
return """
%(z)s = ((dtype_%(x)s*)(%(x)s->data))[0];
%(z)s = ((dtype_%(x)s*)(PyArray_DATA(%(x)s)))[0];
""" % locals()
def c_code_cache_version(self):
......@@ -3087,14 +3087,14 @@ class Alloc(gof.Op):
# Initialize shape
for i, shp_i in enumerate(inp[1:]):
code += """
shape[%(i)s] = ((dtype_%(shp_i)s*) %(shp_i)s->data)[0];
shape[%(i)s] = ((dtype_%(shp_i)s*) PyArray_DATA(%(shp_i)s))[0];
""" % dict(i=i, shp_i=shp_i)
code += """
int need_new_out = (NULL == %(zz)s);
for (int i = 0; i < %(ndim)s; i++)
need_new_out = (need_new_out
|| (%(zz)s->dimensions[i] != shape[i]));
|| (PyArray_DIMS(%(zz)s)[i] != shape[i]));
if (need_new_out)
{
......@@ -4042,14 +4042,14 @@ class Subtensor(Op):
//TODO: give this Op a second output so that this view can be cached
//TODO: alternatively, fix the memory leak on failure
Py_INCREF(%(x)s->descr);
Py_INCREF(PyArray_DESCR(%(x)s));
PyArrayObject * xview = (PyArrayObject*)PyArray_NewFromDescr(
&PyArray_Type,
%(x)s->descr,
PyArray_DESCR(%(x)s),
%(view_ndim)s,
%(x)s->dimensions,
%(x)s->strides,
%(x)s->data,
PyArray_DIMS(%(x)s),
PyArray_STRIDES(%(x)s),
PyArray_DATA(%(x)s),
%(x)s->flags,
NULL);
if (!xview)
......@@ -4057,22 +4057,22 @@ class Subtensor(Op):
%(fail)s;
}
if ((xview->dimensions == %(x)s->dimensions)
&& (%(x)s->dimensions != NULL))
if ((PyArray_DIMS(xview) == PyArray_DIMS(%(x)s))
&& (PyArray_DIMS(%(x)s) != NULL))
{
PyErr_Format(PyExc_ValueError, "x and xview"
"(with %%d dims) have the same dimensions"
" pointers: %%p and %%p",
%(x)s->nd, xview->dimensions, %(x)s->dimensions);
PyArray_NDIM(%(x)s), PyArray_DIMS(xview), PyArray_DIMS(%(x)s));
%(fail)s;
}
if (xview->strides == %(x)s->strides
&& (%(x)s->dimensions != NULL))
if (PyArray_STRIDES(xview) == PyArray_STRIDES(%(x)s)
&& (PyArray_DIMS(%(x)s) != NULL))
{
PyErr_Format(PyExc_ValueError, "x and xview"
"(with %%d dims) have the same strides"
" pointers: %%p and %%p",
%(x)s->nd, xview->strides, %(x)s->strides);
PyArray_NDIM(%(x)s), PyArray_STRIDES(xview), PyArray_STRIDES(%(x)s));
%(fail)s;
}
......@@ -4080,7 +4080,7 @@ class Subtensor(Op):
{
if (is_slice[outer_ii])
{
npy_intp length = %(x)s->dimensions[outer_ii];
npy_intp length = PyArray_DIMS(%(x)s)[outer_ii];
npy_intp slicelength;
npy_intp start = subtensor_spec[spec_pos+0];
npy_intp stop = subtensor_spec[spec_pos+1];
......@@ -4144,9 +4144,9 @@ class Subtensor(Op):
}
assert (slicelength <= length);
xview->data += %(x)s->strides[outer_ii] * start;
xview->dimensions[inner_ii] = slicelength;
xview->strides[inner_ii] = %(x)s->strides[outer_ii] * step;
xview->data += PyArray_STRIDES(%(x)s)[outer_ii] * start;
PyArray_DIMS(xview)[inner_ii] = slicelength;
PyArray_STRIDES(xview)[inner_ii] = PyArray_STRIDES(%(x)s)[outer_ii] * step;
inner_ii += 1;
spec_pos += 3;
......@@ -4154,12 +4154,12 @@ class Subtensor(Op):
else // tuple coord `outer_ii` is an int
{
int idx = subtensor_spec[spec_pos];
if (idx < 0) idx += %(x)s->dimensions[outer_ii];
if (idx < 0) idx += PyArray_DIMS(%(x)s)[outer_ii];
if (idx >= 0)
{
if (idx < %(x)s->dimensions[outer_ii])
if (idx < PyArray_DIMS(%(x)s)[outer_ii])
{
xview->data += %(x)s->strides[outer_ii] * idx;
xview->data += PyArray_STRIDES(%(x)s)[outer_ii] * idx;
}
else
{
......@@ -4176,16 +4176,16 @@ class Subtensor(Op):
spec_pos += 1;
}
}
assert (inner_ii <= xview->nd);
while (inner_ii < xview->nd)
assert (inner_ii <= PyArray_NDIM(xview));
while (inner_ii < PyArray_NDIM(xview))
{
assert (outer_ii < %(x)s->nd);
xview->dimensions[inner_ii] = %(x)s->dimensions[outer_ii];
xview->strides[inner_ii] = %(x)s->strides[outer_ii];
assert (outer_ii < PyArray_NDIM(%(x)s));
PyArray_DIMS(xview)[inner_ii] = PyArray_DIMS(%(x)s)[outer_ii];
PyArray_STRIDES(xview)[inner_ii] = PyArray_STRIDES(%(x)s)[outer_ii];
inner_ii += 1;
outer_ii += 1;
}
PyArray_UpdateFlags(xview, NPY_C_CONTIGUOUS|NPY_F_CONTIGUOUS);
PyArray_UpdateFlags(xview, NPY_ARRAY_C_CONTIGUOUS|NPY_F_CONTIGUOUS);
""" % locals()
# print rval
return rval
......@@ -4203,7 +4203,7 @@ class Subtensor(Op):
part1 = """
if (%(z)s) Py_DECREF(%(z)s);
Py_INCREF(py_%(x)s);
xview->base = py_%(x)s;
PyArray_BASE(xview) = py_%(x)s;
assert(py_%(x)s == (PyObject*)%(x)s);
%(z)s = xview;
""" % locals()
......@@ -4504,7 +4504,7 @@ class IncSubtensor(Op):
{
if (%(z)s) Py_DECREF(%(z)s);
%(z)s = (PyArrayObject*)PyArray_FromAny(py_%(x)s, NULL, 0, 0,
NPY_ENSURECOPY, NULL);
NPY_ARRAY_ENSURECOPY, NULL);
}
""" % locals()
......@@ -4529,7 +4529,7 @@ class IncSubtensor(Op):
if (add_rval)
{
assert (PyArray_Check((PyObject*)add_rval));
assert (add_rval->data == xview->data);
assert (PyArray_DATA(add_rval) == PyArray_DATA(xview));
Py_DECREF(add_rval);
}
else
......@@ -5373,7 +5373,7 @@ class Reshape(Op):
new_ndim = self.ndim
fail = sub['fail']
return """
assert (%(shp)s->nd == 1);
assert (PyArray_NDIM(%(shp)s) == 1);
npy_intp new_dims[%(new_ndim)s];
PyArray_Dims newshape;
newshape.ptr = new_dims;
......@@ -5385,7 +5385,7 @@ class Reshape(Op):
// -- will err if this will downcast. This could happen if the
// -- user pass an int64 dtype, but npy_intp endup being int32.
new_dims[ii] = ((dtype_%(shp)s*)(
%(shp)s->data + ii * %(shp)s->strides[0]))[0];
PyArray_DATA(%(shp)s) + ii * PyArray_STRIDES(%(shp)s)[0]))[0];
}
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject *) PyArray_Newshape(%(x)s, &newshape,
......
差异被折叠。
差异被折叠。
......@@ -794,41 +794,41 @@ def ____gemm_code(check_ab, a_init, b_init):
return """
const char * error_string = NULL;
int type_num = _x->descr->type_num;
int type_size = _x->descr->elsize; // in bytes
int type_num = PyArray_DESCR(_x)->type_num;
int type_size = PyArray_DESCR(_x)->elsize; // in bytes
npy_intp* Nx = _x->dimensions;
npy_intp* Ny = _y->dimensions;
npy_intp* Nz = _z->dimensions;
npy_intp* Nx = PyArray_DIMS(_x);
npy_intp* Ny = PyArray_DIMS(_y);
npy_intp* Nz = PyArray_DIMS(_z);
npy_intp* Sx = _x->strides;
npy_intp* Sy = _y->strides;
npy_intp* Sz = _z->strides;
npy_intp* Sx = PyArray_STRIDES(_x);
npy_intp* Sy = PyArray_STRIDES(_y);
npy_intp* Sz = PyArray_STRIDES(_z);
size_t sx_0, sx_1, sy_0, sy_1, sz_0, sz_1;
int unit = 0;
if (_x->nd != 2) goto _dot_execute_fallback;
if (_y->nd != 2) goto _dot_execute_fallback;
if (_z->nd != 2) goto _dot_execute_fallback;
if (PyArray_NDIM(_x) != 2) goto _dot_execute_fallback;
if (PyArray_NDIM(_y) != 2) goto _dot_execute_fallback;
if (PyArray_NDIM(_z) != 2) goto _dot_execute_fallback;
%(check_ab)s
if ((_x->descr->type_num != PyArray_DOUBLE)
&& (_x->descr->type_num != PyArray_FLOAT))
if ((PyArray_DESCR(_x)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(_x)->type_num != NPY_FLOAT))
goto _dot_execute_fallback;
if ((_y->descr->type_num != PyArray_DOUBLE)
&& (_y->descr->type_num != PyArray_FLOAT))
if ((PyArray_DESCR(_y)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(_y)->type_num != NPY_FLOAT))
goto _dot_execute_fallback;
if ((_y->descr->type_num != PyArray_DOUBLE)
&& (_y->descr->type_num != PyArray_FLOAT))
if ((PyArray_DESCR(_y)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(_y)->type_num != NPY_FLOAT))
goto _dot_execute_fallback;
if ((_x->descr->type_num != _y->descr->type_num)
||(_x->descr->type_num != _z->descr->type_num))
if ((PyArray_DESCR(_x)->type_num != PyArray_DESCR(_y)->type_num)
||(PyArray_DESCR(_x)->type_num != PyArray_DESCR(_z)->type_num))
goto _dot_execute_fallback;
......@@ -863,7 +863,7 @@ def ____gemm_code(check_ab, a_init, b_init):
switch (type_num)
{
case PyArray_FLOAT:
case NPY_FLOAT:
{
#define REAL float
float a = %(a_init)s;
......@@ -888,7 +888,7 @@ def ____gemm_code(check_ab, a_init, b_init):
#undef REAL
}
break;
case PyArray_DOUBLE:
case NPY_DOUBLE:
{
#define REAL double
double a = %(a_init)s;
......
......@@ -270,7 +270,7 @@ class DimShuffle(Op):
nd_in = len(self.input_broadcastable)
nd_out = len(self.new_order)
check_input_nd = [('if (%(input)s->nd != ' + str(nd_in) + ')'
check_input_nd = [('if (PyArray_NDIM(%(input)s) != ' + str(nd_in) + ')'
'{PyErr_SetString(PyExc_NotImplementedError, "input nd"); %(fail)s;}')]
clear_output = ['if (%(res)s) {Py_XDECREF(%(res)s);}']
......@@ -282,13 +282,13 @@ class DimShuffle(Op):
'{ PyArrayObject * %(basename)s = %(input)s', 'Py_INCREF((PyObject*)%(basename)s)']
else:
get_base = [('{ PyArrayObject * %(basename)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(input)s, NULL,'
'0, 0, NPY_ALIGNED|NPY_ENSURECOPY, NULL)')]
'0, 0, NPY_ARRAY_ALIGNED|NPY_ARRAY_ENSURECOPY, NULL)')]
shape_statements = ['npy_intp dimensions[%i]' % nd_out]
for i, o in enumerate(self.new_order):
if o != 'x':
shape_statements += [('dimensions[' + str(
i) + '] = %(basename)s->dimensions[' + str(o) + ']')]
i) + '] = PyArray_DIMS(%(basename)s)[' + str(o) + ']')]
else:
shape_statements += [('dimensions[' + str(i) + '] = 1')]
......@@ -298,7 +298,7 @@ class DimShuffle(Op):
for i, o in enumerate(self.new_order):
if o != 'x':
strides_statements += [('strides[' + str(i)
+ '] = %(basename)s->strides[' + str(o) + ']')]
+ '] = PyArray_STRIDES(%(basename)s)[' + str(o) + ']')]
else:
strides_statements += [('strides[' + str(i) + '] = 0')]
......@@ -311,7 +311,7 @@ class DimShuffle(Op):
str(nd_out) +
'-1] == 0) strides[' +
str(nd_out) +
'-1] = %(basename)s->descr->elsize'
'-1] = PyArray_DESCR(%(basename)s)->elsize'
)
for i in xrange(nd_out - 2, -1, -1):
strides_statements.append(
......@@ -326,14 +326,20 @@ class DimShuffle(Op):
('%(res)s = (PyArrayObject*)PyArray_New(&PyArray_Type, '
'' + str(nd_out) + ', dimensions, '
'PyArray_TYPE(%(basename)s), strides, '
'%(basename)s->data, PyArray_ITEMSIZE(%(basename)s), '
'PyArray_DATA(%(basename)s), PyArray_ITEMSIZE(%(basename)s), '
#borrow only the writable flag from the base
# the NPY_OWNDATA flag will default to 0.
'(NPY_WRITEABLE*PyArray_ISWRITEABLE(%(basename)s)), NULL)'),
'(NPY_ARRAY_WRITEABLE*PyArray_ISWRITEABLE(%(basename)s)), NULL)'),
#recalculate flags: CONTIGUOUS, FORTRAN, ALIGNED
'PyArray_UpdateFlags(%(res)s, NPY_UPDATE_ALL)',
'PyArray_UpdateFlags(%(res)s, NPY_ARRAY_UPDATE_ALL)',
#we are making a view in both inplace and non-inplace cases
'%(res)s->base = (PyObject*)%(basename)s',
"""
#if NPY_VERSION <= 0x01000009
PyArray_BASE(%(res)s) = (PyObject*)%(basename)s;
#else
PyArray_SetBaseObject(%(res)s, (PyObject*)%(basename)s);
#endif
"""
'}']
full_code = statements(check_input_nd
......@@ -1341,7 +1347,7 @@ class CAReduce(Op):
pattern_ = str(pattern)[1:-1]
decl += """int tosum[]={%(pattern_)s};""" % locals()
alloc += """
for(int i=0;i<%(iname)s->nd;i++){
for(int i=0;i<PyArray_NDIM(%(iname)s);i++){
if(PyArray_DIMS(%(iname)s)[i]==0 && tosum[i]){
PyErr_Format(PyExc_ValueError,
"Input of CAReduce{%(scal_name)s} has zero-size on axis %%d",i);
......
......@@ -47,7 +47,7 @@ def make_checks(loop_orders, dtypes, sub):
# tensor is as expected.
min_nd = max(nonx) + 1
init += """
if (%(var)s->nd < %(min_nd)s) {
if (PyArray_NDIM(%(var)s) < %(min_nd)s) {
PyErr_SetString(PyExc_ValueError, "Not enough dimensions on input.");
%%(fail)s
}
......@@ -67,8 +67,8 @@ def make_checks(loop_orders, dtypes, sub):
# jump = stride - adjust
jump = "(%s) - (%s)" % ("%(var)s_stride%(index)s" % locals(), adjust)
init += """
%(var)s_n%(index)s = %(var)s->dimensions[%(index)s];
%(var)s_stride%(index)s = %(var)s->strides[%(index)s] / sizeof(%(dtype)s);
%(var)s_n%(index)s = PyArray_DIMS(%(var)s)[%(index)s];
%(var)s_stride%(index)s = PyArray_STRIDES(%(var)s)[%(index)s] / sizeof(%(dtype)s);
%(var)s_jump%(index)s_%(j)s = %(jump)s;
//printf("%(var)s_jump%(index)s_%(j)s is:");
//std::cout << %(var)s_jump%(index)s_%(j)s << std::endl;
......@@ -152,7 +152,7 @@ def make_alloc(loop_orders, dtype, sub):
PyArray_Dims new_dims;
new_dims.len = %(nd)s;
new_dims.ptr = dims;
PyObject* success = PyArray_Resize(%(olv)s, &new_dims, 0, PyArray_CORDER);
PyObject* success = PyArray_Resize(%(olv)s, &new_dims, 0, NPY_CORDER);
if (!success) {
// If we can't resize the ndarray we have we can allocate a new one.
PyErr_Clear();
......@@ -215,11 +215,11 @@ def make_loop(loop_orders, dtypes, loop_tasks, sub):
for j, index in enumerate(loop_order):
if index != 'x':
preloops.setdefault(j, "")
preloops[j] += ("%%(lv%(i)s)s_iter = (%(dtype)s*)(%%(lv%(i)s)s->data);\n" % locals()) % sub
preloops[j] += ("%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\n" % locals()) % sub
break
else: # all broadcastable
preloops.setdefault(0, "")
preloops[0] += ("%%(lv%(i)s)s_iter = (%(dtype)s*)(%%(lv%(i)s)s->data);\n" % locals()) % sub
preloops[0] += ("%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\n" % locals()) % sub
if len(loop_tasks) == 1:
s = preloops.get(0, "")
......@@ -263,7 +263,7 @@ def make_reordered_loop(init_loop_orders, olv_index, dtypes, inner_task, sub):
for i, index in enumerate(init_loop_orders[olv_index]):
if index != 'x':
order_loops += """
%(ovar)s_loops_it->first = abs(%(ovar)s->strides[%(index)i]);
%(ovar)s_loops_it->first = abs(PyArray_STRIDES(%(ovar)s)[%(index)i]);
""" % locals()
else:
# Stride is 0 when dimension is broadcastable
......@@ -375,7 +375,7 @@ def make_reordered_loop(init_loop_orders, olv_index, dtypes, inner_task, sub):
declare_iter = ""
for i, dtype in enumerate(dtypes):
var = sub["lv%i" % i]
declare_iter += "%(var)s_iter = (%(dtype)s*)(%(var)s->data);\n" % locals()
declare_iter += "%(var)s_iter = (%(dtype)s*)(PyArray_DATA(%(var)s));\n" % locals()
loop = inner_task
for i in reversed(range(nnested)):
......
......@@ -85,10 +85,6 @@ class ConvGrad3D(theano.Op):
output_storage[0][0] = dCdW
def c_compile_args(self):
flags = ['-Werror']
return flags
def c_code(self, node, nodename, inputs, outputs, sub):
V, d, WShape, dCdH = inputs
fail = sub['fail']
......@@ -101,40 +97,40 @@ class ConvGrad3D(theano.Op):
//printf("\t\t\t\tConvGradW3D c code\\n");
//Check dimensionality of inputs
if (%(dCdH)s->nd != 5)
if (PyArray_NDIM(%(dCdH)s) != 5)
{
PyErr_Format(PyExc_ValueError, "ConvGrad3D: dCdH must be a 5 dimensional tensor");
%(fail)s
}
if (%(V)s->nd != 5)
if (PyArray_NDIM(%(V)s) != 5)
{
PyErr_Format(PyExc_ValueError, "ConvGrad3D: V must be a 5 dimensional tensor");
%(fail)s
}
if (%(WShape)s->nd != 1)
if (PyArray_NDIM(%(WShape)s) != 1)
{
PyErr_Format(PyExc_ValueError,"ConvGrad3D: WShape must be a vector.");
%(fail)s
}
if (%(d)s->nd != 1)
if (PyArray_NDIM(%(d)s) != 1)
{
PyErr_Format(PyExc_ValueError,"ConvGrad3D: d must be a vector.");
%(fail)s
}
if (%(d)s->dimensions[0] != 3)
if (PyArray_DIMS(%(d)s)[0] != 3)
{
PyErr_Format(PyExc_ValueError,"ConvGrad3D: 3 stride length arguments expected (row, col, time) but %%li were given", (long)%(d)s->dimensions[0]);
PyErr_Format(PyExc_ValueError,"ConvGrad3D: 3 stride length arguments expected (row, col, time) but %%li were given", (long)PyArray_DIMS(%(d)s)[0]);
%(fail)s
}
{ //extra scope so that fail will not jump over declarations
//Read and check sizes of inputs
const int batchSize = %(V)s->dimensions[0];
if (%(WShape)s->dimensions[0] != 5)
const int batchSize = PyArray_DIMS(%(V)s)[0];
if (PyArray_DIMS(%(WShape)s)[0] != 5)
{
PyErr_Format(PyExc_ValueError,"ConvGrad3D: WShape must specify a 5D shape");
%(fail)s
......@@ -146,9 +142,9 @@ class ConvGrad3D(theano.Op):
}
{ //extra scope so that fail will not jump over declarations
dtype_%(WShape)s * WShape = (dtype_%(WShape)s *) %(WShape)s->data;
dtype_%(WShape)s * WShape = (dtype_%(WShape)s *) PyArray_DATA(%(WShape)s);
const int outputChannels = WShape[0];
const int inputChannels = %(V)s->dimensions[4];
const int inputChannels = PyArray_DIMS(%(V)s)[4];
if (WShape[4] != inputChannels)
{
PyErr_Format(PyExc_ValueError, "ConvGrad3D: W operates on a %%i channel image but the image has %%i channels",(int) WShape[1],inputChannels);
......@@ -159,9 +155,9 @@ class ConvGrad3D(theano.Op):
const int filterHeight = WShape[1];
const int filterWidth = WShape[2];
const int filterDur = WShape[3];
const int vidHeight = %(V)s->dimensions[1];
const int vidWidth = %(V)s->dimensions[2];
const int vidDur = %(V)s->dimensions[3];
const int vidHeight = PyArray_DIMS(%(V)s)[1];
const int vidWidth = PyArray_DIMS(%(V)s)[2];
const int vidDur = PyArray_DIMS(%(V)s)[3];
if (vidHeight < filterHeight)
{
PyErr_Format(PyExc_ValueError, "ConvGrad3D: W has a height of %%i but V is only %%i pixels tall", filterHeight, vidHeight);
......@@ -197,13 +193,13 @@ class ConvGrad3D(theano.Op):
if (%(dCdH)s->dimensions[0] != batchSize ||
%(dCdH)s->dimensions[4] != outputChannels ||
%(dCdH)s->dimensions[1] != outputHeight ||
%(dCdH)s->dimensions[2] != outputWidth ||
%(dCdH)s->dimensions[3] != outputDur)
if (PyArray_DIMS(%(dCdH)s)[0] != batchSize ||
PyArray_DIMS(%(dCdH)s)[4] != outputChannels ||
PyArray_DIMS(%(dCdH)s)[1] != outputHeight ||
PyArray_DIMS(%(dCdH)s)[2] != outputWidth ||
PyArray_DIMS(%(dCdH)s)[3] != outputDur)
{
PyErr_Format(PyExc_ValueError, "dCdH is the wrong size, expected (%%i,%%i,%%i,%%i,%%i), got (%%li,%%li,%%li,%%li,%%li)", batchSize, outputHeight, outputWidth, outputDur, outputChannels, (long)%(dCdH)s->dimensions[0], (long)%(dCdH)s->dimensions[1], (long)%(dCdH)s->dimensions[2], (long)%(dCdH)s->dimensions[3], (long)%(dCdH)s->dimensions[4]);
PyErr_Format(PyExc_ValueError, "dCdH is the wrong size, expected (%%i,%%i,%%i,%%i,%%i), got (%%li,%%li,%%li,%%li,%%li)", batchSize, outputHeight, outputWidth, outputDur, outputChannels, (long)PyArray_DIMS(%(dCdH)s)[0], (long)PyArray_DIMS(%(dCdH)s)[1], (long)PyArray_DIMS(%(dCdH)s)[2], (long)PyArray_DIMS(%(dCdH)s)[3], (long)PyArray_DIMS(%(dCdH)s)[4]);
%(fail)s
}
{ // extra scope for fail
......@@ -215,13 +211,13 @@ class ConvGrad3D(theano.Op):
dims[2] = filterWidth;
dims[3] = filterDur;
if(!(%(dCdW)s) || %(dCdW)s->dimensions[0]!=dims[0] ||
%(dCdW)s->dimensions[1]!=dims[1] ||
%(dCdW)s->dimensions[2]!=dims[2] ||
%(dCdW)s->dimensions[3]!=dims[3] ||
%(dCdW)s->dimensions[4]!=dims[4] ){
if(!(%(dCdW)s) || PyArray_DIMS(%(dCdW)s)[0]!=dims[0] ||
PyArray_DIMS(%(dCdW)s)[1]!=dims[1] ||
PyArray_DIMS(%(dCdW)s)[2]!=dims[2] ||
PyArray_DIMS(%(dCdW)s)[3]!=dims[3] ||
PyArray_DIMS(%(dCdW)s)[4]!=dims[4] ){
Py_XDECREF(%(dCdW)s);
%(dCdW)s = (PyArrayObject *) PyArray_SimpleNew(5, dims, %(V)s->descr->type_num);
%(dCdW)s = (PyArrayObject *) PyArray_SimpleNew(5, dims, PyArray_DESCR(%(V)s)->type_num);
if (!(%(dCdW)s)) {
PyErr_Format(PyExc_MemoryError,"ConvGrad3D: Could not allocate dCdW");
......@@ -230,12 +226,12 @@ class ConvGrad3D(theano.Op):
}
{ //extra scope so fail works
#define ELEM5(x, i,j,k,l,m) * ( dtype_ ## x *) ( x->data + (i)*x->strides[0]+(j)*x->strides[1]+(k)*x->strides[2]+(l)*x->strides[3]+(m)*x->strides[4] )
#define ELEM5(x, i,j,k,l,m) * ( dtype_ ## x *) ( PyArray_DATA(x) + (i)*PyArray_STRIDES(x)[0]+(j)*PyArray_STRIDES(x)[1]+(k)*PyArray_STRIDES(x)[2]+(l)*PyArray_STRIDES(x)[3]+(m)*PyArray_STRIDES(x)[4] )
#define ELEM_AT(x, i) * ( dtype_ ## x *) ( x->data + (i) )
#define ELEM_AT(x, i) * ( dtype_ ## x *) ( PyArray_BYTES(x) + (i) )
const int dhs3 = %(dCdH)s->strides[3];
const int dtvs3 = dt * %(V)s->strides[3];
const int dhs3 = PyArray_STRIDES(%(dCdH)s)[3];
const int dtvs3 = dt * PyArray_STRIDES(%(V)s)[3];
// Compute dCdW
//TODO-- see if this can be made faster by using ELEM_AT instead of ELEM5
......@@ -253,8 +249,8 @@ class ConvGrad3D(theano.Op):
for (int i = 0; i < batchSize; i++) {
for (int p = 0; p < outputHeight; p++) {
for (int q = 0; q < outputWidth; q++) {
int Hpos = i * %(dCdH)s->strides[0] + j * %(dCdH)s->strides[4] + p * %(dCdH)s->strides[1] + q * %(dCdH)s->strides[2] ;
int Vpos = i * %(V)s->strides[0] + z * %(V)s->strides[4] + (dr * p+k) * %(V)s->strides[1] + (dc*q+l) * %(V)s->strides[2] + m * %(V)s->strides[3];
int Hpos = i * PyArray_STRIDES(%(dCdH)s)[0] + j * PyArray_STRIDES(%(dCdH)s)[4] + p * PyArray_STRIDES(%(dCdH)s)[1] + q * PyArray_STRIDES(%(dCdH)s)[2] ;
int Vpos = i * PyArray_STRIDES(%(V)s)[0] + z * PyArray_STRIDES(%(V)s)[4] + (dr * p+k) * PyArray_STRIDES(%(V)s)[1] + (dc*q+l) * PyArray_STRIDES(%(V)s)[2] + m * PyArray_STRIDES(%(V)s)[3];
for (int r = 0; r < outputDur; r++) {
writePos += ELEM5(%(dCdH)s,i,p,q,r,j) * ELEM5(%(V)s,i,dr*p+k,dc*q+l,dt*r+m,z);
......
差异被折叠。
差异被折叠。
......@@ -619,15 +619,15 @@ class Shape_i(T.Op):
if isinstance(node.inputs[0].type, T.TensorType):
return """
if(!%(out)s)
%(out)s=(PyArrayObject*)PyArray_ZEROS(0, NULL, PyArray_INT64, 0);
((npy_int64*)PyArray_DATA(%(out)s))[0]=%(x)s->dimensions[%(i)s];
%(out)s=(PyArrayObject*)PyArray_ZEROS(0, NULL, NPY_INT64, 0);
((npy_int64*)PyArray_DATA(%(out)s))[0]=PyArray_DIMS(%(x)s)[%(i)s];
""" % locals()
elif node.inputs[0].type.__class__.__name__ == "CudaNdarrayType":
#Don't want to import cuda stuff here.
return """
if(!%(out)s)
%(out)s=(PyArrayObject*)PyArray_ZEROS(0, NULL, PyArray_INT64, 0);
%(out)s=(PyArrayObject*)PyArray_ZEROS(0, NULL, NPY_INT64, 0);
((npy_int64*)PyArray_DATA(%(out)s))[0]=
CudaNdarray_HOST_DIMS(%(x)s)[%(i)s];
""" % locals()
......
......@@ -176,13 +176,13 @@ class DownsampleFactorMax(Op):
int x_shp0_usable;
int x_shp1_usable;
int z_shp0, z_shp1;
if(%(x)s->nd!=4)
if(PyArray_NDIM(%(x)s)!=4)
{
PyErr_SetString(PyExc_ValueError, "x must be a 4d ndarray");
%(fail)s;
}
z_shp0 = %(x)s->dimensions[2] / %(ds0)s;
z_shp1 = %(x)s->dimensions[3] / %(ds1)s;
z_shp0 = PyArray_DIMS(%(x)s)[2] / %(ds0)s;
z_shp1 = PyArray_DIMS(%(x)s)[3] / %(ds1)s;
if (%(ignore_border)s)
{
x_shp0_usable = z_shp0 * %(ds0)s;
......@@ -190,23 +190,23 @@ class DownsampleFactorMax(Op):
}
else
{
z_shp0 += (%(x)s->dimensions[2] %% %(ds0)s) ? 1 : 0;
z_shp1 += (%(x)s->dimensions[3] %% %(ds1)s) ? 1 : 0;
x_shp0_usable = %(x)s->dimensions[2];
x_shp1_usable = %(x)s->dimensions[3];
z_shp0 += (PyArray_DIMS(%(x)s)[2] %% %(ds0)s) ? 1 : 0;
z_shp1 += (PyArray_DIMS(%(x)s)[3] %% %(ds1)s) ? 1 : 0;
x_shp0_usable = PyArray_DIMS(%(x)s)[2];
x_shp1_usable = PyArray_DIMS(%(x)s)[3];
}
if ((!%(z)s)
|| *PyArray_DIMS(%(z)s)!=4
||(%(z)s->dimensions[0] != %(x)s->dimensions[0])
||(%(z)s->dimensions[1] != %(x)s->dimensions[1])
||(%(z)s->dimensions[2] != z_shp0)
||(%(z)s->dimensions[3] != z_shp1)
||(PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(x)s)[0])
||(PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(x)s)[1])
||(PyArray_DIMS(%(z)s)[2] != z_shp0)
||(PyArray_DIMS(%(z)s)[3] != z_shp1)
)
{
if (%(z)s) Py_XDECREF(%(z)s);
npy_intp dims[4] = {0,0,0,0};
dims[0]=%(x)s->dimensions[0];
dims[1]=%(x)s->dimensions[1];
dims[0]=PyArray_DIMS(%(x)s)[0];
dims[1]=PyArray_DIMS(%(x)s)[1];
dims[2]=z_shp0;
dims[3]=z_shp1;
%(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0); //TODO: zeros not necessary
......@@ -214,8 +214,8 @@ class DownsampleFactorMax(Op):
if (z_shp0 && z_shp1)
{
for(int b=0;b<%(x)s->dimensions[0];b++){
for(int k=0;k<%(x)s->dimensions[1];k++){
for(int b=0;b<PyArray_DIMS(%(x)s)[0];b++){
for(int k=0;k<PyArray_DIMS(%(x)s)[1];k++){
int mini_i = 0;
int zi = 0;
for(int i=0;i< x_shp0_usable; i++){
......@@ -306,23 +306,23 @@ class DownsampleFactorMaxGrad(Op):
PyErr_SetString(PyExc_ValueError, "input types must all match");
%(fail)s;
}
if(%(x)s->nd!=4)
if(PyArray_NDIM(%(x)s)!=4)
{
PyErr_SetString(PyExc_ValueError, "x must be a 4d ndarray");
%(fail)s;
}
if(%(z)s->nd!=4)
if(PyArray_NDIM(%(z)s)!=4)
{
PyErr_SetString(PyExc_ValueError, "z must be a 4d ndarray");
%(fail)s;
}
if(%(gz)s->nd!=4)
if(PyArray_NDIM(%(gz)s)!=4)
{
PyErr_SetString(PyExc_ValueError, "gz must be a 4d ndarray");
%(fail)s;
}
z_shp0 = %(z)s->dimensions[2];
z_shp1 = %(z)s->dimensions[3];
z_shp0 = PyArray_DIMS(%(z)s)[2];
z_shp1 = PyArray_DIMS(%(z)s)[3];
if (%(ignore_border)s)
{
x_shp0_usable = z_shp0 * %(ds0)s;
......@@ -330,23 +330,23 @@ class DownsampleFactorMaxGrad(Op):
}
else
{
x_shp0_usable = %(x)s->dimensions[2];
x_shp1_usable = %(x)s->dimensions[3];
x_shp0_usable = PyArray_DIMS(%(x)s)[2];
x_shp1_usable = PyArray_DIMS(%(x)s)[3];
}
if ((!%(gx)s)
|| *PyArray_DIMS(%(gx)s)!=4
||(%(gx)s->dimensions[0] != %(x)s->dimensions[0])
||(%(gx)s->dimensions[1] != %(x)s->dimensions[1])
||(%(gx)s->dimensions[2] != %(x)s->dimensions[2])
||(%(gx)s->dimensions[3] != %(x)s->dimensions[3])
||(PyArray_DIMS(%(gx)s)[0] != PyArray_DIMS(%(x)s)[0])
||(PyArray_DIMS(%(gx)s)[1] != PyArray_DIMS(%(x)s)[1])
||(PyArray_DIMS(%(gx)s)[2] != PyArray_DIMS(%(x)s)[2])
||(PyArray_DIMS(%(gx)s)[3] != PyArray_DIMS(%(x)s)[3])
)
{
Py_XDECREF(%(gx)s);
%(gx)s = (PyArrayObject*) PyArray_ZEROS(4, %(x)s->dimensions, x_typenum,0);
%(gx)s = (PyArrayObject*) PyArray_ZEROS(4, PyArray_DIMS(%(x)s), x_typenum,0);
}
for(int b=0;b<%(x)s->dimensions[0];b++){
for(int k=0;k<%(x)s->dimensions[1];k++){
for(int b=0;b<PyArray_DIMS(%(x)s)[0];b++){
for(int k=0;k<PyArray_DIMS(%(x)s)[1];k++){
int mini_i = 0;
int zi = 0;
for(int i=0;i< x_shp0_usable; i++){
......@@ -364,14 +364,14 @@ class DownsampleFactorMaxGrad(Op):
mini_i = (mini_i + 1 == %(ds0)s) ? 0 : mini_i+1;
zi += (mini_i == 0);
for (int j = x_shp1_usable; j < %(x)s->dimensions[3]; ++j) {
for (int j = x_shp1_usable; j < PyArray_DIMS(%(x)s)[3]; ++j) {
dtype_%(gx)s * gxp = ((dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s,b,k,i,j)));
gxp[0] = 0;
}
}//for i
for(int i = x_shp0_usable; i < %(x)s->dimensions[2]; i++){
for (int j = 0; j < %(x)s->dimensions[3]; ++j) {
for(int i = x_shp0_usable; i < PyArray_DIMS(%(x)s)[2]; i++){
for (int j = 0; j < PyArray_DIMS(%(x)s)[3]; ++j) {
dtype_%(gx)s * gxp = ((dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s,b,k,i,j)));
gxp[0] = 0;
}
......
......@@ -5846,11 +5846,12 @@ class test_arithmetic_cast(unittest.TestCase):
config.int_division == 'floatX'):
assert theano_dtype == config.floatX
continue
numpy_version =numpy.__version__.split('.')[:2]
if (cfg == 'numpy+floatX' and
a_type == 'complex128' and
b_type == 'float32' and
combo == ('scalar', 'array') and
numpy.__version__.startswith('1.6.') and
bool(numpy_version >= [1, 6]) and
theano_dtype == 'complex128' and
numpy_dtypes == ['complex64',
'complex64']):
......@@ -5860,7 +5861,7 @@ class test_arithmetic_cast(unittest.TestCase):
# in progress), so in the meantime we just
# mark this test as a known failure.
raise KnownFailureTest('Known issue with '
'numpy 1.6.x, see #761')
'numpy >= 1.6.x see #761')
# In any other situation: something wrong is
# going on!
......
......@@ -918,7 +918,7 @@ class T_fibby(unittest.TestCase):
return """
Py_XDECREF(%(y)s);
%(y)s = (PyArrayObject*)PyArray_FromArray(
%(x)s, 0, NPY_ENSURECOPY);
%(x)s, 0, NPY_ARRAY_ENSURECOPY);
if (!(%y)s) %(fail)s;
dtype_%(y)s * y = (dtype_%(y)s*)%(y)s->data;
dtype_%(x)s * x = (dtype_%(x)s*)%(x)s->data;
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论