提交 486b760d authored 作者: --global's avatar --global

Remove nb_dim param from dnn convolutions

上级 b9e29760
差异被折叠。
......@@ -3,7 +3,7 @@
int
APPLY_SPECIFIC(conv_fwd)(CudaNdarray *input, CudaNdarray *kerns,
CudaNdarray *om, cudnnConvolutionDescriptor_t desc,
float alpha, float beta, int nb_dim, CudaNdarray **output) {
float alpha, float beta, CudaNdarray **output) {
cudnnStatus_t err = CUDNN_STATUS_SUCCESS;
if (CudaNdarray_HOST_DIMS(input)[1] != CudaNdarray_HOST_DIMS(kerns)[1]) {
......@@ -17,6 +17,8 @@ APPLY_SPECIFIC(conv_fwd)(CudaNdarray *input, CudaNdarray *kerns,
if (c_set_filterNd(kerns, APPLY_SPECIFIC(kerns)) == -1)
return 1;
int nb_dim = CudaNdarray_NDIM(input);
#ifdef CONV_INPLACE
Py_XDECREF(*output);
*output = om;
......
......@@ -3,7 +3,7 @@
int
APPLY_SPECIFIC(conv_gi)(CudaNdarray *kerns, CudaNdarray *output,
CudaNdarray *im, cudnnConvolutionDescriptor_t desc,
float alpha, float beta, int nb_dim, CudaNdarray **input) {
float alpha, float beta, CudaNdarray **input) {
cudnnStatus_t err = CUDNN_STATUS_SUCCESS;
if (CudaNdarray_HOST_DIMS(im)[1] != CudaNdarray_HOST_DIMS(kerns)[1]) {
......@@ -17,6 +17,8 @@ APPLY_SPECIFIC(conv_gi)(CudaNdarray *kerns, CudaNdarray *output,
if (c_set_filterNd(kerns, APPLY_SPECIFIC(kerns)) == -1)
return 1;
int nb_dim = CudaNdarray_NDIM(output);
#ifdef CONV_INPLACE
Py_XDECREF(*input);
*input = im;
......
......@@ -3,7 +3,7 @@
int
APPLY_SPECIFIC(conv_gw)(CudaNdarray *input, CudaNdarray *output,
CudaNdarray *km, cudnnConvolutionDescriptor_t desc,
float alpha, float beta, int nb_dim, CudaNdarray **kerns) {
float alpha, float beta, CudaNdarray **kerns) {
cudnnStatus_t err = CUDNN_STATUS_SUCCESS;
if (CudaNdarray_HOST_DIMS(input)[1] != CudaNdarray_HOST_DIMS(km)[1]) {
......@@ -17,6 +17,8 @@ APPLY_SPECIFIC(conv_gw)(CudaNdarray *input, CudaNdarray *output,
if (c_set_tensorNd(output, APPLY_SPECIFIC(output)) == -1)
return 1;
int nb_dim = CudaNdarray_NDIM(output);
#ifdef CONV_INPLACE
Py_XDECREF(*kerns);
*kerns = km;
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论