PyErr_Format(PyExc_ValueError, "GpuConv3D: W must be a 5 dimensional CudaNdarray");
%(fail)s
}
}
if (%(V)s->nd != 5)
{
if (%(V)s->nd != 5)
{
PyErr_Format(PyExc_ValueError, "GpuConv3D: V must be a 5 dimensional CudaNdarray");
%(fail)s
}
}
if (%(b)s->nd != 1)
{
if (%(b)s->nd != 1)
{
PyErr_Format(PyExc_ValueError, "GpuConv3D: b must be a vector CudaNdarray");
%(fail)s
}
}
if (%(d)s->nd != 1)
{
if (%(d)s->nd != 1)
{
PyErr_Format(PyExc_ValueError, "GpuConv3D: d must be a vector CudaNdarray");
%(fail)s
}
if (%(d)s->dimensions[0] != 3)
{
}
if (%(d)s->dimensions[0] != 3)
{
PyErr_Format(PyExc_ValueError, "GpuConv3D: 3 stride length arguments expected (row, col, time) but %%li were given", %(d)s->dimensions[0]);
%(fail)s
}
}
{ //extra scope so fail doesn't jump over declarations
//Read and check sizes of inputs
const int batchSize = CudaNdarray_HOST_DIMS(%(V)s)[0];
const int outputChannels = CudaNdarray_HOST_DIMS(%(W)s)[0];
const int inputChannels = CudaNdarray_HOST_DIMS(%(V)s)[4];
if (CudaNdarray_HOST_DIMS(%(W)s)[4] != inputChannels)
{
//Read and check sizes of inputs
const int batchSize = CudaNdarray_HOST_DIMS(%(V)s)[0];
const int outputChannels = CudaNdarray_HOST_DIMS(%(W)s)[0];
const int inputChannels = CudaNdarray_HOST_DIMS(%(V)s)[4];
if (CudaNdarray_HOST_DIMS(%(W)s)[4] != inputChannels)
{
PyErr_Format(PyExc_ValueError, "Conv3D: W operates on a %%i channel image but the image has %%i channels",CudaNdarray_HOST_DIMS(%(W)s)[4],inputChannels);
%(fail)s
}
}
{ //extra scope so error handler jumps don't cause errors
const int filterHeight = CudaNdarray_HOST_DIMS(%(W)s)[1];
const int filterWidth = CudaNdarray_HOST_DIMS(%(W)s)[2];
const int filterDur = CudaNdarray_HOST_DIMS(%(W)s)[3];
const int vidHeight = CudaNdarray_HOST_DIMS(%(V)s)[1];
const int vidWidth = CudaNdarray_HOST_DIMS(%(V)s)[2];
const int vidDur = CudaNdarray_HOST_DIMS(%(V)s)[3];
const int filterHeight = CudaNdarray_HOST_DIMS(%(W)s)[1];
const int filterWidth = CudaNdarray_HOST_DIMS(%(W)s)[2];
const int filterDur = CudaNdarray_HOST_DIMS(%(W)s)[3];
const int vidHeight = CudaNdarray_HOST_DIMS(%(V)s)[1];
const int vidWidth = CudaNdarray_HOST_DIMS(%(V)s)[2];
const int vidDur = CudaNdarray_HOST_DIMS(%(V)s)[3];
if (vidHeight < filterHeight)
{
PyErr_Format(PyExc_ValueError, "W has a height of %%i but V is only %%i pixels tall",filterHeight,vidHeight);
...
...
@@ -116,58 +116,58 @@ PyErr_Format(PyExc_ValueError, "GpuConv3D: d must be a vector CudaNdarray");
}
{ // extra scope so fail works
//Read and check stride arguments
//Read and check stride arguments
const int dr = *(dtype_%(d)s*)PyArray_GETPTR1(%(d)s,0);
const int dc = *(dtype_%(d)s*)PyArray_GETPTR1(%(d)s,1);
const int dt = *(dtype_%(d)s*)PyArray_GETPTR1(%(d)s,2);
if (dr <= 0 || dc <= 0 || dt <= 0)
{
if (dr <= 0 || dc <= 0 || dt <= 0)
{
PyErr_Format(PyExc_ValueError, "GpuConv3D: Strides must all be positive but are %%i, %%i, %%i", dr, dc, dt);
%(fail)s
}
}
{ // extra scope so fail works
//Make correctly sized output
const int outputHeight = int( (vidHeight - filterHeight) / dr )+1;
const int outputWidth = int( (vidWidth - filterWidth) / dc )+1;