提交 9de4f1dd authored 作者: Frederic's avatar Frederic

Make the same speed up to the new back-end.

上级 9deb9ddf
...@@ -1409,8 +1409,64 @@ class GpuCAReduceCuda(HideC, CAReduceDtype): ...@@ -1409,8 +1409,64 @@ class GpuCAReduceCuda(HideC, CAReduceDtype):
if config.gpuarray.sync: if config.gpuarray.sync:
sync = """GpuArray_sync(&%(z)s->ga);""" % locals() sync = """GpuArray_sync(&%(z)s->ga);""" % locals()
print >> sio, """ print >> sio, """
{ {
int verbose = 0; int verbose = 0;
if(PyGpuArray_STRIDES(%(x)s)[0]>
PyGpuArray_STRIDES(%(x)s)[1]){
// If there are a lot of summations to do, then we can use simple parallelization -
// use each thread to do one sum.
// we might as well launch blocks of 32 threads because that's the warp size.
// we could schedule more threads if we were maxing out the gridsize below, but
// the gridsize is way more than the physical hardware and I think 32 threads
// on a huge grid is enough to fully use the hardware.
dim3 n_threads(32,1,1);
// We kindof reshape the input implicitly to something 4D:
// the shape A,B,C -> A, B, D, E
// where C <= D*E < C+32
// where E==32
int A = 1;
int B = PyGpuArray_DIMS(%(x)s)[0];
int C = PyGpuArray_DIMS(%(x)s)[1];
int D = C/32;
if (32*D < C) D+= 1;
assert ((C <= 32*D) && (32*D < C+32));
// The gridsize would ideally be (A, D). But we do the following logic to make
// sure we don't ask for a grid that is too big.
dim3 n_blocks(A,D);
if (n_blocks.x > 4096) n_blocks.x = 4096;
if (n_blocks.x*n_blocks.y > 4096) n_blocks.y = 4096/n_blocks.x;
kernel_reduce_010_AD_%(name)s<<<n_blocks, n_threads>>>(
A,B,C,D,
(%(in_dtype)s *)(((char *)cuda_get_ptr(%(x)s->ga.data))+%(x)s->ga.offset),
1,
PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s),
PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s),
(%(out_dtype)s *)(((char *)cuda_get_ptr(%(z)s->ga.data))+%(z)s->ga.offset),
1,
PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s)
);
%(sync)s
cudaError_t sts = cudaGetLastError();
if (cudaSuccess != sts)
{
PyErr_Format(PyExc_RuntimeError,
"Cuda error: %%s: %%s."
" (grid: %%i x %%i; block: %%i x %%i x %%i)\\n",
"kernel_reduce_10_AD%(name)s",
cudaGetErrorString(sts),
n_blocks.x,
n_blocks.y,
n_threads.x,
n_threads.y,
n_threads.z);
%(fail)s;
}
}else{
dim3 n_threads( dim3 n_threads(
std::min(PyGpuArray_DIMS(%(x)s)[0], std::min(PyGpuArray_DIMS(%(x)s)[0],
(size_t) 256)); (size_t) 256));
...@@ -1423,7 +1479,7 @@ class GpuCAReduceCuda(HideC, CAReduceDtype): ...@@ -1423,7 +1479,7 @@ class GpuCAReduceCuda(HideC, CAReduceDtype):
n_blocks.x, n_blocks.x,
n_blocks.y); n_blocks.y);
} }
assert( PyGpuArray_DIMS(%(x)s)[1] == PyGpuArray_DIMS(%(z)s)[0]); assert(PyGpuArray_DIMS(%(x)s)[1] == PyGpuArray_DIMS(%(z)s)[0]);
int n_shared = sizeof(%(acc_dtype)s) * n_threads.x; int n_shared = sizeof(%(acc_dtype)s) * n_threads.x;
kernel_reduce_010_%(name)s<<<n_blocks, n_threads, n_shared>>>( kernel_reduce_010_%(name)s<<<n_blocks, n_threads, n_shared>>>(
1, 1,
...@@ -1454,6 +1510,7 @@ class GpuCAReduceCuda(HideC, CAReduceDtype): ...@@ -1454,6 +1510,7 @@ class GpuCAReduceCuda(HideC, CAReduceDtype):
%(fail)s; %(fail)s;
} }
} }
}
""" % locals() """ % locals()
def c_code_reduce_010(self, sio, node, name, x, z, fail): def c_code_reduce_010(self, sio, node, name, x, z, fail):
...@@ -1795,7 +1852,7 @@ class GpuCAReduceCuda(HideC, CAReduceDtype): ...@@ -1795,7 +1852,7 @@ class GpuCAReduceCuda(HideC, CAReduceDtype):
""" % locals() """ % locals()
def c_code_cache_version_apply(self, node): def c_code_cache_version_apply(self, node):
version = [11] # the version corresponding to the c code in this Op version = [12] # the version corresponding to the c code in this Op
# now we insert versions for the ops on which we depend... # now we insert versions for the ops on which we depend...
scalar_node = Apply(self.scalar_op, scalar_node = Apply(self.scalar_op,
...@@ -2032,7 +2089,7 @@ class GpuCAReduceCuda(HideC, CAReduceDtype): ...@@ -2032,7 +2089,7 @@ class GpuCAReduceCuda(HideC, CAReduceDtype):
} }
""" % locals() """ % locals()
if self.reduce_mask == (0, 1, 0): if self.reduce_mask == (0, 1, 0) or self.reduce_mask == (1, 0):
reduce_fct = self._assign_reduce(node, nodename, "myresult", reduce_fct = self._assign_reduce(node, nodename, "myresult",
"X[a * sX0 + b * sX1 + c * sX2]", "X[a * sX0 + b * sX1 + c * sX2]",
{}, True) {}, True)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论