提交 4e13c84a authored 作者: Hector's avatar Hector 提交者: Thomas Wiecki

Revert `resize` to `ressize`

上级 44942c71
......@@ -17,7 +17,7 @@ int dnn_rnn_fwd(cudnnRNNDescriptor_t desc, uint32_t numDirs,
cudnnTensorDescriptor_t *xl = NULL;
cudnnTensorDescriptor_t *yl = NULL;
gpudata *workspace = NULL;
size_t worksize, resize;
size_t worksize, ressize;
size_t seqLength = PyGpuArray_DIM(x, 0);
size_t miniBatch = PyGpuArray_DIM(x, 1);
......@@ -162,7 +162,7 @@ int dnn_rnn_fwd(cudnnRNNDescriptor_t desc, uint32_t numDirs,
}
err = cudnnGetRNNTrainingReserveSize(_handle, desc, (int)seqLength,
xl, &resize);
xl, &ressize);
if (err != CUDNN_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError,
"Could not get reserve size: %s",
......@@ -170,7 +170,7 @@ int dnn_rnn_fwd(cudnnRNNDescriptor_t desc, uint32_t numDirs,
goto fail;
}
*reserve = gpudata_alloc(c->ctx, resize, NULL, 0, NULL);
*reserve = gpudata_alloc(c->ctx, ressize, NULL, 0, NULL);
if (*reserve == NULL) {
PyErr_Format(PyExc_RuntimeError, "Could not allocate reserve");
goto fail;
......@@ -185,7 +185,7 @@ int dnn_rnn_fwd(cudnnRNNDescriptor_t desc, uint32_t numDirs,
hydesc, PyGpuArray_DEV_DATA(*hy),
cydesc, cy ? PyGpuArray_DEV_DATA(*cy) : NULL,
*(void **)workspace, worksize,
*(void **)(*reserve), resize);
*(void **)(*reserve), ressize);
if (err != CUDNN_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError,
"Could run RNN: %s",
......
......@@ -21,7 +21,7 @@ int dnn_rnn_gi(cudnnRNNDescriptor_t desc, npy_uint64 xshp,
cudnnTensorDescriptor_t *yl = NULL;
cudnnTensorDescriptor_t *dxl = NULL;
gpudata *workspace = NULL;
size_t worksize, resize;
size_t worksize, ressize;
size_t seqLength = PyGpuArray_DIM(y, 0);
size_t miniBatch = PyGpuArray_DIM(y, 1);
......@@ -170,7 +170,7 @@ int dnn_rnn_gi(cudnnRNNDescriptor_t desc, npy_uint64 xshp,
}
err = cudnnGetRNNTrainingReserveSize(_handle, desc, (int)seqLength,
dxl, &resize);
dxl, &ressize);
if (err != CUDNN_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError,
"Could not get reserve size: %s",
......@@ -178,13 +178,13 @@ int dnn_rnn_gi(cudnnRNNDescriptor_t desc, npy_uint64 xshp,
goto fail;
}
*oreserve = gpudata_alloc(c->ctx, resize, NULL, 0, NULL);
*oreserve = gpudata_alloc(c->ctx, ressize, NULL, 0, NULL);
if (*oreserve == NULL) {
PyErr_Format(PyExc_RuntimeError, "Could not allocate reserve");
goto fail;
}
if (gpudata_move(*oreserve, 0, reserve, 0, resize) != GA_NO_ERROR) {
if (gpudata_move(*oreserve, 0, reserve, 0, ressize) != GA_NO_ERROR) {
PyErr_SetString(PyExc_RuntimeError, "could not copy reserve");
goto fail;
}
......@@ -202,7 +202,7 @@ int dnn_rnn_gi(cudnnRNNDescriptor_t desc, npy_uint64 xshp,
dhxdesc, PyGpuArray_DEV_DATA(*dhx),
dcxdesc, dcx ? PyGpuArray_DEV_DATA(*dcx) : NULL,
*(void **)workspace, worksize,
*(void **)(*oreserve), resize);
*(void **)(*oreserve), ressize);
if (err != CUDNN_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError,
"Could run RNN grad inputs: %s",
......
......@@ -12,7 +12,7 @@ int dnn_rnn_gw(cudnnRNNDescriptor_t desc, npy_uint64 _wsize,
cudnnTensorDescriptor_t *xl = NULL;
cudnnTensorDescriptor_t *yl = NULL;
gpudata *workspace = NULL;
size_t worksize, resize;
size_t worksize, ressize;
size_t iters = PyGpuArray_DIM(x, 0);
size_t wsize = _wsize;
......@@ -132,7 +132,7 @@ int dnn_rnn_gw(cudnnRNNDescriptor_t desc, npy_uint64 _wsize,
}
err = cudnnGetRNNTrainingReserveSize(_handle, desc, (int)iters,
xl, &resize);
xl, &ressize);
if (err != CUDNN_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError,
"Could not get reserve size: %s",
......@@ -146,7 +146,7 @@ int dnn_rnn_gw(cudnnRNNDescriptor_t desc, npy_uint64 _wsize,
yl, PyGpuArray_DEV_DATA(y),
*(void **)workspace, worksize,
dwdesc, PyGpuArray_DEV_DATA(*dw),
*(void **)reserve, resize);
*(void **)reserve, ressize);
if (err != CUDNN_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError,
"Could run RNN grad weights: %s",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论