提交 454c1ab4 authored 作者: James Bergstra's avatar James Bergstra

FIX: cuda: clear error flag when handling error

上级 555af254
...@@ -48,6 +48,11 @@ void * device_malloc(size_t size) ...@@ -48,6 +48,11 @@ void * device_malloc(size_t size)
cudaError_t err = cudaMalloc(&rval, size); cudaError_t err = cudaMalloc(&rval, size);
if (cudaSuccess != err) if (cudaSuccess != err)
{ {
// Clear the error flag, cudaMalloc doesn't do it.
// Currently this returns the same thing as err, but if in future
// it returns something else I still don't see why we should ignore
// it. All we want to do here is reset the flag.
cudaGetLastError();
#if COMPUTE_GPU_MEM_USED #if COMPUTE_GPU_MEM_USED
fprintf(stderr, "Error allocating %li bytes of device memory (%s). new total bytes allocated: %d\n", (long)size, cudaGetErrorString(err),_allocated_size); fprintf(stderr, "Error allocating %li bytes of device memory (%s). new total bytes allocated: %d\n", (long)size, cudaGetErrorString(err),_allocated_size);
#else #else
...@@ -86,6 +91,11 @@ int device_free(void *ptr) ...@@ -86,6 +91,11 @@ int device_free(void *ptr)
cudaError_t err = cudaFree(ptr); cudaError_t err = cudaFree(ptr);
if (cudaSuccess != err) if (cudaSuccess != err)
{ {
// Clear the error flag, cudaMalloc doesn't do it.
// Currently this returns the same thing as err, but if in future
// it returns something else I still don't see why we should ignore
// it. All we want to do here is reset the flag.
cudaGetLastError();
#if COMPUTE_GPU_MEM_USED #if COMPUTE_GPU_MEM_USED
fprintf(stderr, "Error freeing device pointer %p (%s).%d byte already allocated\n", ptr, cudaGetErrorString(err), _allocated_size); fprintf(stderr, "Error freeing device pointer %p (%s).%d byte already allocated\n", ptr, cudaGetErrorString(err), _allocated_size);
#else #else
...@@ -910,6 +920,11 @@ CudaNdarray_TakeFrom(CudaNdarray * self, PyObject *args){ ...@@ -910,6 +920,11 @@ CudaNdarray_TakeFrom(CudaNdarray * self, PyObject *args){
} }
cudaError_t err = cudaMemset((void*)err_var, 0, sizeof(int)); cudaError_t err = cudaMemset((void*)err_var, 0, sizeof(int));
if (cudaSuccess != err) { if (cudaSuccess != err) {
// Clear the error flag, cudaMalloc doesn't do it.
// Currently this returns the same thing as err, but if in future
// it returns something else I still don't see why we should ignore
// it. All we want to do here is reset the flag.
cudaGetLastError();
PyErr_Format(PyExc_RuntimeError, PyErr_Format(PyExc_RuntimeError,
"Error setting device error code to 0. %s", "Error setting device error code to 0. %s",
cudaGetErrorString(err)); cudaGetErrorString(err));
...@@ -2129,6 +2144,11 @@ CudaNdarray_setitem(PyObject *o, PyObject *key, PyObject *value) ...@@ -2129,6 +2144,11 @@ CudaNdarray_setitem(PyObject *o, PyObject *key, PyObject *value)
Py_XDECREF(rval); Py_XDECREF(rval);
if (err) if (err)
{ {
// Clear the error flag, cudaMalloc doesn't do it.
// Currently this returns the same thing as err, but if in future
// it returns something else I still don't see why we should ignore
// it. All we want to do here is reset the flag.
cudaGetLastError();
PyErr_SetString(PyExc_RuntimeError, PyErr_SetString(PyExc_RuntimeError,
"CudaNdarray.__setitem__: cudaMemset failed"); "CudaNdarray.__setitem__: cudaMemset failed");
return -1; return -1;
...@@ -2401,6 +2421,11 @@ GetDeviceMemInfo(PyObject* _unused, PyObject* dummy) ...@@ -2401,6 +2421,11 @@ GetDeviceMemInfo(PyObject* _unused, PyObject* dummy)
cudaError_t err = cudaMemGetInfo(&free, &total); cudaError_t err = cudaMemGetInfo(&free, &total);
if (err != cudaSuccess){ if (err != cudaSuccess){
// Clear the error flag, cudaMalloc doesn't do it.
// Currently this returns the same thing as err, but if in future
// it returns something else I still don't see why we should ignore
// it. All we want to do here is reset the flag.
cudaGetLastError();
PyErr_Format(PyExc_RuntimeError, PyErr_Format(PyExc_RuntimeError,
"Error while getting memory info about the gpu: %s", "Error while getting memory info about the gpu: %s",
cudaGetErrorString(err)); cudaGetErrorString(err));
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论