提交 15db4a1f authored 作者: Christos Tsirigotis's avatar Christos Tsirigotis

Fix flake8 in extra_ops and tests

- Resolve :215 of test_extra_ops to int explicitly Reason: numpy.uint64 + int -> numpy.float64 for some reason (numpy/numpy@3509704)
上级 458a5ccd
from __future__ import absolute_import, print_function, division
import unittest
import numpy as np
import numpy
......@@ -62,9 +61,8 @@ class TestSearchsortedOp(utt.InferShapeTester):
def test_searchsortedOp_on_sorted_input(self):
f = theano.function([self.x, self.v], searchsorted(self.x, self.v))
assert np.allclose(
np.searchsorted(self.a[self.idx_sorted], self.b),
f(self.a[self.idx_sorted], self.b))
assert np.allclose(np.searchsorted(self.a[self.idx_sorted], self.b),
f(self.a[self.idx_sorted], self.b))
def test_searchsortedOp_on_float_sorter(self):
sorter = T.vector('sorter', dtype="float32")
......@@ -73,22 +71,20 @@ class TestSearchsortedOp(utt.InferShapeTester):
def test_searchsortedOp_on_int_sorter(self):
compatible_types = ('int8', 'int16', 'int32', 'int64',)
# 'uint8', 'uint16', 'uint32', 'uint64')
# 'uint8', 'uint16', 'uint32', 'uint64')
for dtype in compatible_types:
sorter = T.vector('sorter', dtype=dtype)
f = theano.function([self.x, self.v, sorter],
searchsorted(self.x, self.v, sorter=sorter),
allow_input_downcast=True)
assert np.allclose(
np.searchsorted(self.a, self.b, sorter=self.idx_sorted),
f(self.a, self.b, self.idx_sorted))
assert np.allclose(np.searchsorted(self.a, self.b, sorter=self.idx_sorted),
f(self.a, self.b, self.idx_sorted))
def test_searchsortedOp_on_right_side(self):
f = theano.function([self.x, self.v],
searchsorted(self.x, self.v, side='right'))
assert np.allclose(
np.searchsorted(self.a, self.b, side='right'),
f(self.a, self.b))
assert np.allclose(np.searchsorted(self.a, self.b, side='right'),
f(self.a, self.b))
def test_infer_shape(self):
# Test using default parameters' value
......@@ -218,8 +214,9 @@ class TestBinCountOp(utt.InferShapeTester):
def test_bincountFn(self):
w = T.vector('w')
def ref(data, w=None, minlength=None):
size = data.max() + 1
size = int(data.max() + 1)
if minlength:
size = max(size, minlength)
if w is not None:
......@@ -231,6 +228,7 @@ class TestBinCountOp(utt.InferShapeTester):
for i in range(data.shape[0]):
out[data[i]] += 1
return out
for dtype in ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64'):
x = T.vector('x', dtype=dtype)
......@@ -304,36 +302,32 @@ class TestBinCountOp(utt.InferShapeTester):
self.assertRaises(TypeError, BinCountOp(), x)
else:
self._compile_and_check(
[x],
[BinCountOp()(x,None)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
self._compile_and_check([x],
[BinCountOp()(x, None)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
weights = np.random.random((25,)).astype(config.floatX)
self._compile_and_check(
[x],
[BinCountOp()(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
self._compile_and_check([x],
[BinCountOp()(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
if not numpy_16:
continue
self._compile_and_check(
[x],
[BinCountOp(minlength=60)(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
self._compile_and_check([x],
[BinCountOp(minlength=60)(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
self._compile_and_check(
[x],
[BinCountOp(minlength=5)(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
self._compile_and_check([x],
[BinCountOp(minlength=5)(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
class TestDiffOp(utt.InferShapeTester):
......@@ -508,9 +502,9 @@ class TestRepeatOp(utt.InferShapeTester):
r_var = T.scalar(dtype=dtype)
r = numpy.asarray(3, dtype=dtype)
if (dtype == 'uint64' or
(dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1)):
self.assertRaises(TypeError,
repeat, x, r_var, axis=axis)
(dtype in self.numpy_unsupported_dtypes and
r_var.ndim == 1)):
self.assertRaises(TypeError, repeat, x, r_var, axis=axis)
else:
f = theano.function([x, r_var],
repeat(x, r_var, axis=axis))
......@@ -520,10 +514,10 @@ class TestRepeatOp(utt.InferShapeTester):
r_var = T.vector(dtype=dtype)
if axis is None:
r = np.random.random_integers(
5, size=a.size).astype(dtype)
5, size=a.size).astype(dtype)
else:
r = np.random.random_integers(
5, size=(10,)).astype(dtype)
5, size=(10,)).astype(dtype)
if dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1:
self.assertRaises(TypeError,
......@@ -534,15 +528,16 @@ class TestRepeatOp(utt.InferShapeTester):
assert np.allclose(np.repeat(a, r, axis=axis),
f(a, r))
#check when r is a list of single integer, e.g. [3].
r = np.random.random_integers(10, size=()).astype(dtype) + 2
# check when r is a list of single integer, e.g. [3].
r = np.random.random_integers(
10, size=()).astype(dtype) + 2
f = theano.function([x],
repeat(x, [r], axis=axis))
assert np.allclose(np.repeat(a, r, axis=axis),
f(a))
assert not np.any([isinstance(n.op, RepeatOp)
assert not np.any([isinstance(n.op, RepeatOp)
for n in f.maker.fgraph.toposort()])
# check when r is theano tensortype that broadcastable is (True,)
r_var = theano.tensor.TensorType(broadcastable=(True,),
dtype=dtype)()
......@@ -551,9 +546,9 @@ class TestRepeatOp(utt.InferShapeTester):
repeat(x, r_var, axis=axis))
assert np.allclose(np.repeat(a, r[0], axis=axis),
f(a, r))
assert not np.any([isinstance(n.op, RepeatOp)
assert not np.any([isinstance(n.op, RepeatOp)
for n in f.maker.fgraph.toposort()])
@attr('slow')
def test_infer_shape(self):
for ndim in range(4):
......@@ -569,28 +564,27 @@ class TestRepeatOp(utt.InferShapeTester):
r_var = T.vector(dtype=dtype)
self.assertRaises(TypeError, repeat, x, r_var)
else:
self._compile_and_check(
[x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
self._compile_and_check([x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
r_var = T.vector(dtype=dtype)
if axis is None:
r = np.random.random_integers(
5, size=a.size).astype(dtype)
5, size=a.size).astype(dtype)
elif a.size > 0:
r = np.random.random_integers(
5, size=a.shape[axis]).astype(dtype)
5, size=a.shape[axis]).astype(dtype)
else:
r = np.random.random_integers(
5, size=(10,)).astype(dtype)
5, size=(10,)).astype(dtype)
self._compile_and_check(
[x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
[x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
def test_grad(self):
for ndim in range(3):
......@@ -717,26 +711,26 @@ class TestFillDiagonalOffset(utt.InferShapeTester):
# We can't use numpy.fill_diagonal as it is bugged.
assert numpy.allclose(numpy.diag(out, test_offset), val)
if test_offset >= 0:
assert (out == val).sum() == min( min(a.shape),
a.shape[1]-test_offset )
assert (out == val).sum() == min(min(a.shape),
a.shape[1] - test_offset)
else:
assert (out == val).sum() == min( min(a.shape),
a.shape[0]+test_offset )
assert (out == val).sum() == min(min(a.shape),
a.shape[0] + test_offset)
def test_gradient(self):
for test_offset in (-5, -4, -1, 0, 1, 4, 5):
# input 'offset' will not be tested
def fill_diagonal_with_fix_offset( a, val):
return fill_diagonal_offset( a, val, test_offset)
def fill_diagonal_with_fix_offset(a, val):
return fill_diagonal_offset(a, val, test_offset)
utt.verify_grad(fill_diagonal_with_fix_offset,
[numpy.random.rand(5, 8), numpy.random.rand()],
[numpy.random.rand(5, 8), numpy.random.rand()],
n_tests=1, rng=TestFillDiagonalOffset.rng)
utt.verify_grad(fill_diagonal_with_fix_offset,
[numpy.random.rand(8, 5), numpy.random.rand()],
[numpy.random.rand(8, 5), numpy.random.rand()],
n_tests=1, rng=TestFillDiagonalOffset.rng)
utt.verify_grad(fill_diagonal_with_fix_offset,
[numpy.random.rand(5, 5), numpy.random.rand()],
[numpy.random.rand(5, 5), numpy.random.rand()],
n_tests=1, rng=TestFillDiagonalOffset.rng)
def test_infer_shape(self):
......@@ -748,12 +742,12 @@ class TestFillDiagonalOffset(utt.InferShapeTester):
[numpy.random.rand(8, 5),
numpy.random.rand(),
test_offset],
self.op_class )
self.op_class)
self._compile_and_check([x, y, z], [self.op(x, y, z)],
[numpy.random.rand(5, 8),
numpy.random.rand(),
test_offset],
self.op_class )
self.op_class)
def test_to_one_hot():
......@@ -783,47 +777,48 @@ def test_to_one_hot():
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]])
class test_Unique(utt.InferShapeTester):
def setUp(self):
super(test_Unique, self).setUp()
self.op_class = Unique
self.ops = [Unique(),
Unique(True),
Unique(False, True),
self.ops = [Unique(),
Unique(True),
Unique(False, True),
Unique(True, True)]
if bool(numpy_ver >= [1, 9]) :
if bool(numpy_ver >= [1, 9]):
self.ops.extend([
Unique(False, False, True),
Unique(True, False, True),
Unique(False, True, True),
Unique(True, True, True)])
def test_basic_vector(self):
Unique(False, False, True),
Unique(True, False, True),
Unique(False, True, True),
Unique(True, True, True)])
def test_basic_vector(self):
"""
Basic test for a vector.
Done by using the op and checking that it returns the right answer.
"""
x = theano.tensor.vector()
inp = np.asarray([2,1,3,2], dtype=config.floatX)
list_outs_expected = [[np.unique(inp)],
np.unique(inp, True),
np.unique(inp, False, True),
inp = np.asarray([2, 1, 3, 2], dtype=config.floatX)
list_outs_expected = [[np.unique(inp)],
np.unique(inp, True),
np.unique(inp, False, True),
np.unique(inp, True, True)]
if bool(numpy_ver >= [1, 9]) :
if bool(numpy_ver >= [1, 9]):
list_outs_expected.extend([
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)])
for op, outs_expected in zip(self.ops, list_outs_expected) :
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)])
for op, outs_expected in zip(self.ops, list_outs_expected):
f = theano.function(inputs=[x], outputs=op(x, return_list=True))
outs = f(inp)
# Compare the result computed to the expected value.
for out, out_exp in zip(outs, outs_expected):
utt.assert_allclose(out, out_exp)
def test_basic_matrix(self):
def test_basic_matrix(self):
""" Basic test for a matrix.
Done by using the op and checking that it returns the right answer.
"""
......@@ -833,20 +828,20 @@ class test_Unique(utt.InferShapeTester):
np.unique(inp, True),
np.unique(inp, False, True),
np.unique(inp, True, True)]
if bool(numpy_ver >= [1, 9]) :
if bool(numpy_ver >= [1, 9]):
list_outs_expected.extend([
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)])
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)])
for op, outs_expected in zip(self.ops, list_outs_expected):
f = theano.function(inputs=[x], outputs=op(x, return_list=True))
outs = f(inp)
# Compare the result computed to the expected value.
for out, out_exp in zip(outs, outs_expected):
utt.assert_allclose(out, out_exp)
def test_infer_shape_vector(self):
def test_infer_shape_vector(self):
"""
Testing the infer_shape with a vector.
"""
......@@ -855,32 +850,31 @@ class test_Unique(utt.InferShapeTester):
for op in self.ops:
if not op.return_inverse:
continue
if op.return_index :
if op.return_index:
f = op(x)[2]
else:
f = op(x)[1]
self._compile_and_check([x],
[f],
[np.asarray(np.array([2,1,3,2]),
self._compile_and_check([x],
[f],
[np.asarray(np.array([2, 1, 3, 2]),
dtype=config.floatX)],
self.op_class)
def test_infer_shape_matrix(self):
def test_infer_shape_matrix(self):
"""
Testing the infer_shape with a matrix.
"""
x = theano.tensor.matrix()
for op in self.ops:
if not op.return_inverse:
continue
if op.return_index :
if op.return_index:
f = op(x)[2]
else:
f = op(x)[1]
self._compile_and_check([x],
[f],
[np.asarray(np.array([[2, 1], [3, 2],[2, 3]]),
dtype=config.floatX)],
self.op_class)
self._compile_and_check([x],
[f],
[np.asarray(np.array([[2, 1], [3, 2], [2, 3]]),
dtype=config.floatX)],
self.op_class)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论