提交 7f860ba0 authored 作者: lamblin's avatar lamblin

Merge pull request #1273 from nouiz/mixed

fix reload and better error message
......@@ -582,6 +582,8 @@ class GpuConv(GpuOp):
self.__dict__.update(d)
if not hasattr(self, "imshp"):
self.imshp = None
if not hasattr(self, "max_threads_dim0"):
self.max_threads_dim0 = None
def __hash__(self):
# don't use hash(self.version) as hash(-1)==-2 and
......@@ -639,11 +641,6 @@ class GpuConv(GpuOp):
return super(GpuConv, node_.op).make_thunk(node_, storage_map,
compute_map, no_recycling)
def __setstate__(self, d):
self.__dict__.update(d)
if not hasattr(self, "max_threads_dim0"):
self.max_threads_dim0 = None
def c_compile_args(self):
nb = 0
if self.kshp is not None:
......@@ -760,7 +757,8 @@ class GpuDownsampleFactorMax(GpuOp):
int dims[4], xdim2, xdim3;
if (%(x)s->nd != 4)
{
PyErr_SetString(PyExc_ValueError, "rank error");
PyErr_SetString(PyExc_ValueError,
"GpuDownsampleFactorMax: rank error");
%(fail)s;
}
xdim2 = CudaNdarray_HOST_DIMS(%(x)s)[2];
......@@ -796,6 +794,7 @@ class GpuDownsampleFactorMax(GpuOp):
Py_XDECREF(%(z)s);
%(z)s = NULL;
PyErr_SetString(PyExc_ValueError,
"GpuDownsampleFactorMax:"
"Was not able to allocate output!");
%(fail)s;
}
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论