// it returns something else I still don't see why we should ignore
// it returns something else I still don't see why we should ignore
// it. All we want to do here is reset the flag.
// it. All we want to do here is reset the flag.
cudaGetLastError();
cudaGetLastError();
#if COMPUTE_GPU_MEM_USED
if(verbose)
fprintf(stderr,"Error allocating %li bytes of device memory (%s). new total bytes allocated: %d\n",(long)size,cudaGetErrorString(err),_allocated_size);
{
#else
#if COMPUTE_GPU_MEM_USED
fprintf(stderr,"Error allocating %li bytes of device memory (%s).\n",(long)size,cudaGetErrorString(err));
fprintf(stderr,"Error allocating %li bytes of device memory (%s). new total bytes allocated: %d\n",(long)size,cudaGetErrorString(err),_allocated_size);
#endif
#else
fprintf(stderr,"Error allocating %li bytes of device memory (%s).\n",(long)size,cudaGetErrorString(err));
#endif
}
PyErr_Format(PyExc_MemoryError,
PyErr_Format(PyExc_MemoryError,
"Error allocating %li bytes of device memory (%s).",(long)size,cudaGetErrorString(err));
"Error allocating %li bytes of device memory (%s).",(long)size,cudaGetErrorString(err));