提交 db4b8f16 authored 作者: Pascal Lamblin's avatar Pascal Lamblin 提交者: GitHub

Merge pull request #5595 from notoraptor/fix-test-pool-on-windows

Fix GPU test_pool on Windows.
...@@ -241,9 +241,9 @@ int APPLY_SPECIFIC(pool)(PyGpuArrayObject *x, ...@@ -241,9 +241,9 @@ int APPLY_SPECIFIC(pool)(PyGpuArrayObject *x,
size_t p[3]; z_dims[0] = x_dims[0]; z_dims[1] = x_dims[1]; size_t p[3]; z_dims[0] = x_dims[0]; z_dims[1] = x_dims[1];
int nonzero_padding = 0; int nonzero_padding = 0;
for (int i = 0; i < ndims; i++) { for (int i = 0; i < ndims; i++) {
w[i] = *((npy_intp*)PyArray_GETPTR1(ws, i)); w[i] = *((npy_int64*)PyArray_GETPTR1(ws, i));
s[i] = *((npy_intp*)PyArray_GETPTR1(stride, i)); s[i] = *((npy_int64*)PyArray_GETPTR1(stride, i));
p[i] = *((npy_intp*)PyArray_GETPTR1(pad, i)); p[i] = *((npy_int64*)PyArray_GETPTR1(pad, i));
z_dims[2 + i] = OUTPUT_DIMS(x_dims[2 + i] + 2*p[i], w[i], s[i]); z_dims[2 + i] = OUTPUT_DIMS(x_dims[2 + i] + 2*p[i], w[i], s[i]);
if (p[i] > 0) { if (p[i] > 0) {
nonzero_padding = 1; nonzero_padding = 1;
......
...@@ -68,6 +68,10 @@ class GpuPool(CGpuKernelBase): ...@@ -68,6 +68,10 @@ class GpuPool(CGpuKernelBase):
if pad.dtype not in theano.tensor.int_dtypes: if pad.dtype not in theano.tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.') raise TypeError('Padding parameters must be ints.')
ws = theano.tensor.cast(ws, 'int64')
stride = theano.tensor.cast(stride, 'int64')
pad = theano.tensor.cast(pad, 'int64')
return Apply(self, [inp, ws, stride, pad], [inp.type()]) return Apply(self, [inp, ws, stride, pad], [inp.type()])
def get_params(self, node): def get_params(self, node):
...@@ -183,6 +187,11 @@ class GpuMaxPoolGrad(CGpuKernelBase): ...@@ -183,6 +187,11 @@ class GpuMaxPoolGrad(CGpuKernelBase):
raise TypeError('Stride parameters must be ints.') raise TypeError('Stride parameters must be ints.')
if pad.dtype not in theano.tensor.int_dtypes: if pad.dtype not in theano.tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.') raise TypeError('Padding parameters must be ints.')
ws = theano.tensor.cast(ws, 'int64')
stride = theano.tensor.cast(stride, 'int64')
pad = theano.tensor.cast(pad, 'int64')
return Apply(self, [inp, out, out_grad, ws, stride, pad], [inp.type()]) return Apply(self, [inp, out, out_grad, ws, stride, pad], [inp.type()])
def get_params(self, node): def get_params(self, node):
...@@ -257,6 +266,11 @@ class GpuAveragePoolGrad(CGpuKernelBase): ...@@ -257,6 +266,11 @@ class GpuAveragePoolGrad(CGpuKernelBase):
raise TypeError('Stride parameters must be ints.') raise TypeError('Stride parameters must be ints.')
if pad.dtype not in theano.tensor.int_dtypes: if pad.dtype not in theano.tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.') raise TypeError('Padding parameters must be ints.')
ws = theano.tensor.cast(ws, 'int64')
stride = theano.tensor.cast(stride, 'int64')
pad = theano.tensor.cast(pad, 'int64')
return Apply(self, [inp, out_grad, ws, stride, pad], [inp.type()]) return Apply(self, [inp, out_grad, ws, stride, pad], [inp.type()])
def get_params(self, node): def get_params(self, node):
...@@ -334,6 +348,11 @@ class GpuDownsampleFactorMaxGradGrad(CGpuKernelBase): ...@@ -334,6 +348,11 @@ class GpuDownsampleFactorMaxGradGrad(CGpuKernelBase):
raise TypeError('Stride parameters must be ints.') raise TypeError('Stride parameters must be ints.')
if pad.dtype not in theano.tensor.int_dtypes: if pad.dtype not in theano.tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.') raise TypeError('Padding parameters must be ints.')
ws = theano.tensor.cast(ws, 'int64')
stride = theano.tensor.cast(stride, 'int64')
pad = theano.tensor.cast(pad, 'int64')
return Apply(self, [inp, out, out_grad, ws, stride, pad], [inp.type()]) return Apply(self, [inp, out, out_grad, ws, stride, pad], [inp.type()])
def get_params(self, node): def get_params(self, node):
...@@ -402,13 +421,17 @@ class GpuMaxPoolRop(CGpuKernelBase): ...@@ -402,13 +421,17 @@ class GpuMaxPoolRop(CGpuKernelBase):
pad = as_tensor_variable(pad) pad = as_tensor_variable(pad)
assert ws.ndim == stride.ndim and ws.ndim == pad.ndim assert ws.ndim == stride.ndim and ws.ndim == pad.ndim
assert ws.ndim == 1 assert ws.ndim == 1
if not ws.dtype.startswith('int'): if ws.dtype not in theano.tensor.int_dtypes:
raise TypeError('Window shape parameters must be ints.') raise TypeError('Window shape parameters must be ints.')
if not stride.dtype.startswith('int'): if stride.dtype not in theano.tensor.int_dtypes:
raise TypeError('Stride parameters must be ints.') raise TypeError('Stride parameters must be ints.')
if not pad.dtype.startswith('int'): if pad.dtype not in theano.tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.') raise TypeError('Padding parameters must be ints.')
ws = theano.tensor.cast(ws, 'int64')
stride = theano.tensor.cast(stride, 'int64')
pad = theano.tensor.cast(pad, 'int64')
return Apply(self, [inp, eval_point, ws, stride, pad], [eval_point.type()]) return Apply(self, [inp, eval_point, ws, stride, pad], [eval_point.type()])
def get_params(self, node): def get_params(self, node):
......
...@@ -138,9 +138,9 @@ int APPLY_SPECIFIC(ave_pool_grad)(PyGpuArrayObject *x, ...@@ -138,9 +138,9 @@ int APPLY_SPECIFIC(ave_pool_grad)(PyGpuArrayObject *x,
size_t s[3]; size_t s[3];
size_t p[3]; size_t p[3];
for(int i = 0; i < ndims; i++) { for(int i = 0; i < ndims; i++) {
w[i] = *((npy_intp*)PyArray_GETPTR1(ws, i)); w[i] = *((npy_int64*)PyArray_GETPTR1(ws, i));
s[i] = *((npy_intp*)PyArray_GETPTR1(stride, i)); s[i] = *((npy_int64*)PyArray_GETPTR1(stride, i));
p[i] = *((npy_intp*)PyArray_GETPTR1(pad, i)); p[i] = *((npy_int64*)PyArray_GETPTR1(pad, i));
} }
int err; int err;
......
...@@ -132,9 +132,9 @@ int APPLY_SPECIFIC(pool_grad_grad)(PyGpuArrayObject *x, ...@@ -132,9 +132,9 @@ int APPLY_SPECIFIC(pool_grad_grad)(PyGpuArrayObject *x,
size_t s[3]; size_t s[3];
size_t p[3]; size_t p[3];
for(int i = 0; i < ndims; i++) { for(int i = 0; i < ndims; i++) {
w[i] = *((npy_intp*)PyArray_GETPTR1(ws, i)); w[i] = *((npy_int64*)PyArray_GETPTR1(ws, i));
s[i] = *((npy_intp*)PyArray_GETPTR1(stride, i)); s[i] = *((npy_int64*)PyArray_GETPTR1(stride, i));
p[i] = *((npy_intp*)PyArray_GETPTR1(pad, i)); p[i] = *((npy_int64*)PyArray_GETPTR1(pad, i));
} }
int err; int err;
......
...@@ -124,9 +124,9 @@ int APPLY_SPECIFIC(max_pool_grad)(PyGpuArrayObject *x, ...@@ -124,9 +124,9 @@ int APPLY_SPECIFIC(max_pool_grad)(PyGpuArrayObject *x,
size_t s[3]; size_t s[3];
size_t p[3]; size_t p[3];
for(int i = 0; i < ndims; i++) { for(int i = 0; i < ndims; i++) {
w[i] = *((npy_intp*)PyArray_GETPTR1(ws, i)); w[i] = *((npy_int64*)PyArray_GETPTR1(ws, i));
s[i] = *((npy_intp*)PyArray_GETPTR1(stride, i)); s[i] = *((npy_int64*)PyArray_GETPTR1(stride, i));
p[i] = *((npy_intp*)PyArray_GETPTR1(pad, i)); p[i] = *((npy_int64*)PyArray_GETPTR1(pad, i));
} }
int err; int err;
......
...@@ -137,9 +137,9 @@ int APPLY_SPECIFIC(max_pool_rop)(PyGpuArrayObject *x, ...@@ -137,9 +137,9 @@ int APPLY_SPECIFIC(max_pool_rop)(PyGpuArrayObject *x,
size_t p[3]; z_dims[0] = x_dims[0]; z_dims[1] = x_dims[1]; size_t p[3]; z_dims[0] = x_dims[0]; z_dims[1] = x_dims[1];
int nonzero_padding = 0; int nonzero_padding = 0;
for (int i = 0; i < ndims; i++) { for (int i = 0; i < ndims; i++) {
w[i] = *((npy_intp*)PyArray_GETPTR1(ws, i)); w[i] = *((npy_int64*)PyArray_GETPTR1(ws, i));
s[i] = *((npy_intp*)PyArray_GETPTR1(stride, i)); s[i] = *((npy_int64*)PyArray_GETPTR1(stride, i));
p[i] = *((npy_intp*)PyArray_GETPTR1(pad, i)); p[i] = *((npy_int64*)PyArray_GETPTR1(pad, i));
z_dims[2 + i] = OUTPUT_DIMS(x_dims[2 + i] + 2*p[i], w[i], s[i]); z_dims[2 + i] = OUTPUT_DIMS(x_dims[2 + i] + 2*p[i], w[i], s[i]);
if (p[i] > 0) { if (p[i] > 0) {
nonzero_padding = 1; nonzero_padding = 1;
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论