提交 c2287027 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

We no longer need img_shape for the descriptor.

上级 5d6ca038
#section support_code_apply
int conv_desc(PyArrayObject *img_shp, PyArrayObject *filt_shp,
int conv_desc(PyArrayObject *filt_shp,
cudnnConvolutionDescriptor_t *desc) {
cudnnStatus_t err;
int pad[3] = {PAD_0, PAD_1, PAD_2};
int strides[3] = {SUB_0, SUB_1, SUB_2};
int upscale[3] = {1, 1, 1};
if (PyArray_DIM(filt_shp, 0) != PyArray_DIM(img_shp, 0)) {
PyErr_SetString(PyExc_ValueError, "Differing number of dimensions for "
"image and filter shape");
return -1;
}
#if BORDER_MODE == 0
pad[0] = *(npy_int64 *)PyArray_GETPTR1(filt_shp, 2) - 1;
pad[1] = *(npy_int64 *)PyArray_GETPTR1(filt_shp, 3) - 1;
......@@ -21,10 +15,10 @@ int conv_desc(PyArrayObject *img_shp, PyArrayObject *filt_shp,
#endif
#endif
if (PyArray_DIM(img_shp, 0) - 2 != NB_DIMS) {
PyErr_Format(PyExc_ValueError, "Input shapes have too many dimensions: "
if (PyArray_DIM(filt_shp, 0) - 2 != NB_DIMS) {
PyErr_Format(PyExc_ValueError, "Filter shape has too many dimensions: "
"expected %d, got %lld.", NB_DIMS,
(long long)PyArray_DIM(img_shp, 0));
(long long)PyArray_DIM(filt_shp, 0));
return -1;
}
......
......@@ -243,13 +243,11 @@ class GpuDnnConvDesc(COp):
assert conv_mode in ('conv', 'cross')
self.conv_mode = conv_mode
def make_node(self, img_shape, kern_shape):
if img_shape.type.ndim != 1 or img_shape.type.dtype != 'int64':
raise TypeError('img must be 1D shape tensor')
def make_node(self, kern_shape):
if kern_shape.type.ndim != 1 or kern_shape.type.dtype != 'int64':
raise TypeError('kern must be 1D shape tensor')
return Apply(self, [img_shape, kern_shape],
return Apply(self, [kern_shape],
[CDataType("cudnnConvolutionDescriptor_t",
freefunc="cudnnDestroyConvolutionDescriptor")()])
......@@ -780,7 +778,7 @@ def dnn_conv(img, kerns, border_mode='valid', subsample=(1, 1),
out = GpuAllocEmpty(img.dtype)(shape_i(kerns, 1, fgraph),
shape_i(img, 1, fgraph), shape2, shape3)
desc = GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
conv_mode='cross')(img.shape, out.shape)
conv_mode='cross')(out.shape)
conv = GpuDnnConvGradW()(img, kerns, out, desc)
return as_gpuarray_variable(conv.dimshuffle(1, 0, 2, 3))
......@@ -798,7 +796,7 @@ def dnn_conv(img, kerns, border_mode='valid', subsample=(1, 1),
shape_i(kerns, 1, fgraph),
shape2, shape3)
desc = GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
conv_mode=conv_mode)(out.shape, kerns.shape)
conv_mode=conv_mode)(kerns.shape)
return GpuDnnConvGradI()(kerns, img, out, desc)
# Standard case: We use GpuDnnConv with suitable padding.
......@@ -807,7 +805,7 @@ def dnn_conv(img, kerns, border_mode='valid', subsample=(1, 1),
img = gpu_contiguous(img)
kerns = gpu_contiguous(kerns)
desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode)(img.shape, kerns.shape)
conv_mode=conv_mode)(kerns.shape)
desc_op = desc.owner.op
out_shp = GpuDnnConv.get_out_shape(img.shape, kerns.shape,
desc_op.border_mode,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论