提交 9627228c authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Merge pull request #3936 from nouiz/32bit

Mostly tests fix for 32bit python
...@@ -504,18 +504,6 @@ def bincount(x, weights=None, minlength=None, assert_nonneg=False): ...@@ -504,18 +504,6 @@ def bincount(x, weights=None, minlength=None, assert_nonneg=False):
.. versionadded:: 0.6 .. versionadded:: 0.6
""" """
compatible_type = ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32')
unsupported_dtypes = ('uint64',)
if x.dtype in unsupported_dtypes:
raise TypeError(
("Input dtype %s is not supported, "
% unsupported_dtypes), x.dtype)
if x.dtype not in compatible_type:
raise TypeError("Inputs dtype must be an integer.")
if x.ndim != 1: if x.ndim != 1:
raise TypeError("Inputs must be of dimension 1.") raise TypeError("Inputs must be of dimension 1.")
...@@ -738,6 +726,9 @@ def repeat(x, repeats, axis=None): ...@@ -738,6 +726,9 @@ def repeat(x, repeats, axis=None):
if repeats.ndim == 1: if repeats.ndim == 1:
repeats = repeats[0] repeats = repeats[0]
if x.dtype == 'uint64':
raise TypeError("theano.tensor.repeat don't support dtype uint64")
if axis is None: if axis is None:
axis = 0 axis = 0
x = x.flatten() x = x.flatten()
......
...@@ -663,9 +663,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -663,9 +663,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
output = pool_2d(images, maxpoolshp, ignore_border, output = pool_2d(images, maxpoolshp, ignore_border,
mode=mode) mode=mode)
output_val = function([images], output)(imval) output_val = function([images], output)(imval)
assert numpy.all(output_val == numpy_output_val), ( utt.assert_allclose(output_val, numpy_output_val)
"output_val is %s, numpy_output_val is %s"
% (output_val, numpy_output_val))
def mp(input): def mp(input):
return pool_2d(input, maxpoolshp, ignore_border, return pool_2d(input, maxpoolshp, ignore_border,
...@@ -686,12 +684,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -686,12 +684,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
patch_size = (2, 2) patch_size = (2, 2)
op = max_pool_2d_same_size(input, patch_size) op = max_pool_2d_same_size(input, patch_size)
op_output = function([input], op)(test_input_array) op_output = function([input], op)(test_input_array)
assert numpy.all(op_output == test_answer_array), ( utt.assert_allclose(op_output, test_answer_array)
"op_output is %s, test_answer_array is %s" % (
op_output, test_answer_array
)
)
def mp(input): def mp(input):
return max_pool_2d_same_size(input, patch_size) return max_pool_2d_same_size(input, patch_size)
...@@ -716,9 +709,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -716,9 +709,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
output = pool_2d(images, maxpoolshp, ignore_border, output = pool_2d(images, maxpoolshp, ignore_border,
mode=mode) mode=mode)
output_val = function([images], output)(imval) output_val = function([images], output)(imval)
assert numpy.all(output_val == numpy_output_val), ( utt.assert_allclose(output_val, numpy_output_val)
"output_val is %s, numpy_output_val is %s"
% (output_val, numpy_output_val))
# removed as already tested in test_max_pool_2d_2D # removed as already tested in test_max_pool_2d_2D
# This make test in debug mode too slow. # This make test in debug mode too slow.
...@@ -745,7 +736,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -745,7 +736,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
output = pool_2d(images, maxpoolshp, ignore_border, output = pool_2d(images, maxpoolshp, ignore_border,
mode=mode) mode=mode)
output_val = function([images], output)(imval) output_val = function([images], output)(imval)
assert numpy.all(output_val == numpy_output_val) utt.assert_allclose(output_val, numpy_output_val)
# removed as already tested in test_max_pool_2d_2D # removed as already tested in test_max_pool_2d_2D
# This make test in debug mode too slow. # This make test in debug mode too slow.
......
...@@ -1779,7 +1779,8 @@ class AdvancedSubtensor1(Op): ...@@ -1779,7 +1779,8 @@ class AdvancedSubtensor1(Op):
if (i_type != NPY_INTP) { if (i_type != NPY_INTP) {
// Cast %(i_name)s to NPY_INTP (expected by PyArray_TakeFrom), // Cast %(i_name)s to NPY_INTP (expected by PyArray_TakeFrom),
// if all values fit. // if all values fit.
if (!PyArray_CanCastSafely(i_type, NPY_INTP)) { if (!PyArray_CanCastSafely(i_type, NPY_INTP) &&
PyArray_SIZE(%(i_name)s) > 0) {
npy_int64 min_val, max_val; npy_int64 min_val, max_val;
PyObject* py_min_val = PyArray_Min(%(i_name)s, NPY_MAXDIMS, PyObject* py_min_val = PyArray_Min(%(i_name)s, NPY_MAXDIMS,
NULL); NULL);
...@@ -1850,7 +1851,7 @@ class AdvancedSubtensor1(Op): ...@@ -1850,7 +1851,7 @@ class AdvancedSubtensor1(Op):
""" % locals() """ % locals()
def c_code_cache_version(self): def c_code_cache_version(self):
return (0, 1, 1) return (0, 1, 2)
advanced_subtensor1 = AdvancedSubtensor1() advanced_subtensor1 = AdvancedSubtensor1()
......
...@@ -6672,10 +6672,11 @@ class test_arithmetic_cast(unittest.TestCase): ...@@ -6672,10 +6672,11 @@ class test_arithmetic_cast(unittest.TestCase):
class T_long_tensor(unittest.TestCase): class T_long_tensor(unittest.TestCase):
def test_fit_int64(self): def test_fit_int64(self):
for exp in xrange(64): for exp in xrange(gof.python_int_bitwidth()):
val = L(2 ** exp - 1) val = L(2 ** exp - 1)
scalar_ct = constant(val) scalar_ct = constant(val)
assert scalar_ct.dtype.startswith('int')
assert scalar_ct.dtype.startswith('int'), (exp, val, scalar_ct.dtype)
assert scalar_ct.value == val assert scalar_ct.value == val
vector_ct = constant([val, val]) vector_ct = constant([val, val])
......
...@@ -134,35 +134,35 @@ class TestBinCountOp(utt.InferShapeTester): ...@@ -134,35 +134,35 @@ class TestBinCountOp(utt.InferShapeTester):
def test_bincountFn(self): def test_bincountFn(self):
w = T.vector('w') w = T.vector('w')
def ref(data, w=None, minlength=None):
size = data.max() + 1
if minlength:
size = max(size, minlength)
if w is not None:
out = np.zeros(size, dtype=w.dtype)
for i in range(data.shape[0]):
out[data[i]] += w[i]
else:
out = np.zeros(size, dtype=a.dtype)
for i in range(data.shape[0]):
out[data[i]] += 1
return out
for dtype in ('int8', 'int16', 'int32', 'int64', for dtype in ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64'): 'uint8', 'uint16', 'uint32', 'uint64'):
x = T.vector('x', dtype=dtype) x = T.vector('x', dtype=dtype)
# uint64 always fails
# int64 and uint32 also fail if python int are 32-bit
int_bitwidth = theano.gof.python_int_bitwidth()
if int_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if int_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
# uint64 always fails
if dtype in numpy_unsupported_dtypes:
self.assertRaises(TypeError, bincount, x)
else:
a = np.random.random_integers(50, size=(25)).astype(dtype) a = np.random.random_integers(50, size=(25)).astype(dtype)
weights = np.random.random((25,)).astype(config.floatX) weights = np.random.random((25,)).astype(config.floatX)
f1 = theano.function([x], bincount(x)) f1 = theano.function([x], bincount(x))
f2 = theano.function([x, w], bincount(x, weights=w)) f2 = theano.function([x, w], bincount(x, weights=w))
assert (np.bincount(a) == f1(a)).all() assert (ref(a) == f1(a)).all()
assert np.allclose(np.bincount(a, weights=weights), assert np.allclose(ref(a, weights), f2(a, weights))
f2(a, weights)) f3 = theano.function([x], bincount(x, minlength=55))
f3 = theano.function([x], bincount(x, minlength=23))
f4 = theano.function([x], bincount(x, minlength=5)) f4 = theano.function([x], bincount(x, minlength=5))
assert (np.bincount(a, minlength=23) == f3(a)).all() assert (ref(a, minlength=55) == f3(a)).all()
assert (np.bincount(a, minlength=5) == f4(a)).all() assert (ref(a, minlength=5) == f4(a)).all()
# skip the following test when using unsigned ints # skip the following test when using unsigned ints
if not dtype.startswith('u'): if not dtype.startswith('u'):
a[0] = -1 a[0] = -1
...@@ -423,7 +423,8 @@ class TestRepeatOp(utt.InferShapeTester): ...@@ -423,7 +423,8 @@ class TestRepeatOp(utt.InferShapeTester):
for dtype in tensor.discrete_dtypes: for dtype in tensor.discrete_dtypes:
r_var = T.scalar(dtype=dtype) r_var = T.scalar(dtype=dtype)
r = numpy.asarray(3, dtype=dtype) r = numpy.asarray(3, dtype=dtype)
if dtype in self.numpy_unsupported_dtypes: if (dtype == 'uint64' or
(dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1)):
self.assertRaises(TypeError, self.assertRaises(TypeError,
repeat, x, r_var, axis=axis) repeat, x, r_var, axis=axis)
else: else:
...@@ -440,6 +441,10 @@ class TestRepeatOp(utt.InferShapeTester): ...@@ -440,6 +441,10 @@ class TestRepeatOp(utt.InferShapeTester):
r = np.random.random_integers( r = np.random.random_integers(
5, size=(10,)).astype(dtype) 5, size=(10,)).astype(dtype)
if dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1:
self.assertRaises(TypeError,
repeat, x, r_var, axis=axis)
else:
f = theano.function([x, r_var], f = theano.function([x, r_var],
repeat(x, r_var, axis=axis)) repeat(x, r_var, axis=axis))
assert np.allclose(np.repeat(a, r, axis=axis), assert np.allclose(np.repeat(a, r, axis=axis),
...@@ -477,6 +482,7 @@ class TestRepeatOp(utt.InferShapeTester): ...@@ -477,6 +482,7 @@ class TestRepeatOp(utt.InferShapeTester):
r_var = T.scalar(dtype=dtype) r_var = T.scalar(dtype=dtype)
r = numpy.asarray(3, dtype=dtype) r = numpy.asarray(3, dtype=dtype)
if dtype in self.numpy_unsupported_dtypes: if dtype in self.numpy_unsupported_dtypes:
r_var = T.vector(dtype=dtype)
self.assertRaises(TypeError, repeat, x, r_var) self.assertRaises(TypeError, repeat, x, r_var)
else: else:
self._compile_and_check( self._compile_and_check(
......
...@@ -514,8 +514,8 @@ def test_scan_debugprint4(): ...@@ -514,8 +514,8 @@ def test_scan_debugprint4():
def fn(a_m2, a_m1, b_m2, b_m1): def fn(a_m2, a_m1, b_m2, b_m1):
return a_m1 + a_m2, b_m1 + b_m2 return a_m1 + a_m2, b_m1 + b_m2
a0 = theano.shared(numpy.arange(2)) a0 = theano.shared(numpy.arange(2, dtype='int64'))
b0 = theano.shared(numpy.arange(2)) b0 = theano.shared(numpy.arange(2, dtype='int64'))
(a, b), _ = theano.scan( (a, b), _ = theano.scan(
fn, outputs_info=[{'initial': a0, 'taps': [-2, -1]}, fn, outputs_info=[{'initial': a0, 'taps': [-2, -1]},
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论