提交 2a3aa0fa authored 作者: notoraptor's avatar notoraptor

Update.

Ensure code works both on Python 2 and Python 3. test_reduction rewritten.
上级 1ec4bbc9
......@@ -55,6 +55,12 @@ class GpuMaxAndArgmax(Op):
max_typecode = pygpu.gpuarray.dtype_to_typecode(node.inputs[0].dtype)
argmax_typecode = pygpu.gpuarray.dtype_to_typecode(self.argmax_dtype)
ret = """
#if PY_MAJOR_VERSION >= 3
#ifndef PyInt_AS_LONG
#define PyInt_AS_LONG PyLong_AS_LONG
#endif
#endif
unsigned %(name)s_redux_len = PyTuple_GET_SIZE(%(axes)s);
unsigned* %(name)s_axes_to_reduce = (unsigned*)malloc(%(name)s_redux_len * sizeof(unsigned));
for (unsigned i = 0; i < %(name)s_redux_len; ++i) {
......
......@@ -11,7 +11,11 @@ from .config import mode_with_gpu, mode_without_gpu
from .test_basic_ops import rand_gpuarray
from .. import GpuArrayType
test_shape = (1000, 100, 10, 5, 2)
import math
# Number of values to be used in test tensors (except with 0-D tensors!).
test_size = 10000000
# NB: This order of "unsorted axes" is arbitrary and is here
# just to have the same informations on profile output
# from one test to another.
......@@ -29,7 +33,7 @@ def numpy_random_array(shapes):
def numpy_maxandargmax(X, axis=None):
if axis is None:
axis = range(X.ndim)
axis = list(range(X.ndim))
elif not isinstance(axis, (tuple, list)):
axis = [int(axis)]
axis = list(set(axis)) # remove duplicated values.
......@@ -62,14 +66,22 @@ def check_if_gpu_maxandargmax_not_in_graph(theano_function):
class BaseTest:
# This attribute must be set in subclasses.
tensor_size = None
shape = None
dtype = theano.config.floatX
def get_shape(self):
if self.tensor_size == 0:
return []
return [int(math.ceil(math.pow(test_size, 1 / self.tensor_size)))] * self.tensor_size
def setUp(self):
if not isinstance(self.tensor_size, int):
raise SkipTest("No tensor ndim defined.")
if self.tensor_size < 0 or self.tensor_size > 5:
raise SkipTest("We allow from 0 (included) to 5 (inclued) dimensons for these tests.")
if self.shape is None:
self.shape = self.get_shape()
def get_host_tensor(self):
broadcastable = (False,) * self.tensor_size
......@@ -80,10 +92,10 @@ class BaseTest:
return GpuArrayType(self.dtype, broadcastable)()
def get_host_value(self):
return numpy_random_array(test_shape[:self.tensor_size])
return numpy_random_array(self.shape)
def get_gpu_value(self):
return rand_gpuarray(*(test_shape[:self.tensor_size]))
return rand_gpuarray(*self.shape)
# NB: In compute_host() and compute_gpu(),
# the first call of the theano function should be ignored in profiling,
......@@ -92,7 +104,7 @@ class BaseTest:
def compute_host(self, test_tensor, axis):
M = self.get_host_tensor()
f = theano.function([M], [T.max(M, axis=axis), T.argmax(M, axis=axis)],
name='HOST-function', mode=mode_without_gpu)
name='HOST/shape:'+str(test_tensor.shape)+'/axis:'+str(axis), mode=mode_without_gpu)
check_if_gpu_maxandargmax_not_in_graph(f)
f(test_tensor)
theano_max, theano_argmax = f(test_tensor)
......@@ -103,7 +115,7 @@ class BaseTest:
def compute_gpu(self, test_gpu_tensor, test_host_tensor, axis):
M = self.get_gpu_tensor()
f = theano.function([M], [T.max(M, axis=axis), T.argmax(M, axis=axis)],
name='GPU-function', mode=mode_with_gpu)
name='GPU/shape:'+str(test_gpu_tensor.shape)+'/axis:'+str(axis), mode=mode_with_gpu)
check_if_gpu_maxandargmax_in_graph(f)
f(test_gpu_tensor)
theano_max, theano_argmax = f(test_gpu_tensor)
......@@ -119,22 +131,17 @@ class BaseTest:
self.compute_gpu(test_gpu_tensor, test_host_tensor, axis)
def compute_axis(self, pos):
if 0 <= pos < self.tensor_size:
if self.tensor_size != 1 and 0 <= pos < self.tensor_size:
self.compute(pos)
def compute_some_axes(self, count):
if 0 <= count <= self.tensor_size:
if 0 <= count < self.tensor_size:
self.compute([i for i in unsorted_axes if i < self.tensor_size][:count])
# Equivalent to test reduction on all axes.
def test_none(self):
self.compute(None)
def test_all_axes(self):
self.compute(range(self.tensor_size))
def test_all_axes_unsorted(self):
self.compute([i for i in unsorted_axes if i < self.tensor_size])
def test_axis_1(self):
self.compute_axis(0)
......@@ -169,6 +176,16 @@ class TestScalar(BaseTest, TestCase):
class TestVector(BaseTest, TestCase):
tensor_size = 1
# Special case
class TestRow(BaseTest, TestCase):
tensor_size = 2
shape = [1,test_size]
# Special case
class TestColumn(BaseTest, TestCase):
tensor_size = 2
shape = [test_size, 1]
class TestMatrix(BaseTest, TestCase):
tensor_size = 2
......@@ -176,3 +193,4 @@ class TestMatrix(BaseTest, TestCase):
class TestTensor5(BaseTest, TestCase):
tensor_size = 5
......@@ -1237,6 +1237,12 @@ class MaxAndArgmax(Op):
max, argmax = out
fail = sub["fail"]
ret = """
#if PY_MAJOR_VERSION >= 3
#ifndef PyInt_AS_LONG
#define PyInt_AS_LONG PyLong_AS_LONG
#endif
#endif
int axis;
if (PyTuple_GET_SIZE(%(axis)s) == PyArray_NDIM(%(x)s)) {
......@@ -1597,7 +1603,7 @@ def max_and_argmax(a, axis=None, keepdims=False):
# Check axis and convert it to a Python list of integers.
# Axis will be used as an op param of MaxAndArgmax.
if axis is None:
axis = range(a.type.ndim)
axis = list(range(a.type.ndim))
elif (isinstance(axis, (integer_types, numpy.integer)) or
(isinstance(axis, numpy.ndarray) and axis.ndim == 0)):
axis = [int(axis)]
......@@ -1605,7 +1611,7 @@ def max_and_argmax(a, axis=None, keepdims=False):
axis = [int(i) for i in axis]
elif isinstance(axis, Variable):
if NoneConst.equals(axis):
axis = range(a.type.ndim)
axis = list(range(a.type.ndim))
elif not isinstance(axis, TensorConstant):
raise TypeError("max and argmax computation needs a constant axis. Got %s" % axis)
else:
......@@ -1616,7 +1622,7 @@ def max_and_argmax(a, axis=None, keepdims=False):
elif isinstance(axis.data, (list, numpy.ndarray)):
axis = [int(i) for i in axis.data]
if len(axis) == 0:
axis = range(a.type.ndim)
axis = list(range(a.type.ndim))
else:
for i in range(len(axis)):
if axis[i] < 0:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论