提交 f7596c63 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron 提交者: Frederic Bastien

Fix compile problems.

上级 02a7bc1a
......@@ -69,9 +69,6 @@ def init_dev(dev, name=None):
pcibusid = context.pcibusid
except pygpu.gpuarray.UnsupportedException:
pcibusid = '(unsupported for device %s)' % dev
except Exception:
warnings.warn('Unable to get PCI Bus ID. Please consider updating libgpuarray and pygpu.')
pcibusid = 'unknown'
print("Mapped name %s to device %s: %s" %
(name, dev, context.devname),
......@@ -99,10 +96,11 @@ def init_dev(dev, name=None):
if need_preallocate:
MB = (1024 * 1024)
if config.gpuarray.preallocate <= 1:
gmem = min(config.gpuarray.preallocate, 0.95) * ctx.total_gmem
gmem = min(config.gpuarray.preallocate, 0.95) * context.total_gmem
else:
gmem = config.gpuarray.preallocate * MB
gmem = min(ctx.free_gmem - 50 * MB, gmem)
if gmem > context.free_gmem - 50 * MB:
print ("WARNING: Preallocating too much memory can prevent cudnn and cublas from working properly")
# This will allocate and immediatly free an object of size gmem
# which will reserve that amount of memory on the GPU.
......
......@@ -100,7 +100,7 @@ APPLY_SPECIFIC(conv_fwd)(PyGpuArrayObject *input, PyGpuArrayObject *kerns,
if (!reuse_algo) {
size_t free;
int err2 = gpucontext_property(ctx, GA_CTX_PROP_LARGEST_MEMBLOCK, &free);
int err2 = gpucontext_property(c->ctx, GA_CTX_PROP_LARGEST_MEMBLOCK, &free);
if (err2 != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError, "Error when trying to find the "
"memory information on the GPU");
......@@ -116,7 +116,7 @@ APPLY_SPECIFIC(conv_fwd)(PyGpuArrayObject *input, PyGpuArrayObject *kerns,
cudnnConvolutionFwdAlgoPerf_t choice;
gpudata *tmpmem;
tmpmem = gpudata_alloc(ctx, free, NULL, 0, NULL);
tmpmem = gpudata_alloc(c->ctx, free, NULL, 0, NULL);
if (tmpmem == NULL) {
PyErr_SetString(PyExc_MemoryError, "Could not allocate working GPU memory");
return -1;
......
......@@ -155,7 +155,7 @@ APPLY_SPECIFIC(conv_gi)(PyGpuArrayObject *kerns, PyGpuArrayObject *output,
cudnnConvolutionBwdDataAlgoPerf_t choice;
gpudata *tmpmem;
tmpmem = gpudata_alloc(ctx, mem_sz, NULL, 0, NULL);
tmpmem = gpudata_alloc(c->ctx, mem_sz, NULL, 0, NULL);
if (tmpmem == NULL) {
PyErr_SetString(PyExc_MemoryError, "Could not allocate working GPU memory");
return -1;
......
......@@ -142,7 +142,7 @@ APPLY_SPECIFIC(conv_gw)(PyGpuArrayObject *input, PyGpuArrayObject *output,
if (!reuse_algo) {
size_t free;
int err2 = gpucontext_property(ctx, GA_CTX_PROP_LARGEST_MEMBLOCK, &free);
int err2 = gpucontext_property(c->ctx, GA_CTX_PROP_LARGEST_MEMBLOCK, &free);
if (err2 != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError, "Error when trying to find the "
"memory information on the GPU");
......@@ -158,7 +158,7 @@ APPLY_SPECIFIC(conv_gw)(PyGpuArrayObject *input, PyGpuArrayObject *output,
cudnnConvolutionBwdFilterAlgoPerf_t choice;
gpudata *tmpmem;
tmpmem = gpudata_alloc(ctx, free, NULL, 0, NULL);
tmpmem = gpudata_alloc(c->ctx, free, NULL, 0, NULL);
if (tmpmem == NULL) {
PyErr_SetString(PyExc_MemoryError, "Could not allocate working GPU memory");
return -1;
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论