提交 c25f2cc0 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

flake8

上级 ed44bb59
......@@ -113,42 +113,42 @@ class test_reduce_axes(unittest.TestCase):
[numpy.array(0), numpy.array(1)]]
for a in axes:
x = tensor.matrix()
m = x.sum(a)
x.sum(a)
def test_mean_axes(self):
axes = [None, 0, 1, [0, 1], numpy.array(1),
[numpy.array(0), numpy.array(1)]]
for a in axes:
x = tensor.matrix()
m = x.mean(a)
x.mean(a)
def test_max_axes(self):
axes = [None, 0, 1, [0, 1], numpy.array(1),
[numpy.array(0), numpy.array(1)]]
for a in axes:
x = tensor.matrix()
m = x.max(a)
x.max(a)
def test_min_axes(self):
axes = [None, 0, 1, [0, 1], numpy.array(1),
[numpy.array(0), numpy.array(1)]]
for a in axes:
x = tensor.matrix()
m = x.min(a)
x.min(a)
def test_argmax_axes(self):
axes = [None, 0, 1, [0, 1], numpy.array(1),
[numpy.array(0), numpy.array(1)]]
for a in axes:
x = tensor.matrix()
m = x.argmax(a)
x.argmax(a)
def test_var_axes(self):
axes = [None, 0, 1, [0, 1], numpy.array(1),
[numpy.array(0), numpy.array(1)]]
for a in axes:
x = tensor.matrix()
m = x.var(a)
x.var(a)
class test_Broadcast(unittest.TestCase):
......@@ -159,7 +159,7 @@ class test_Broadcast(unittest.TestCase):
ctype = TensorType
cop = Elemwise
openmp_minsize = 2*config.openmp_elemwise_minsize
openmp_minsize = 2 * config.openmp_elemwise_minsize
openmp_minsize_sqrt = int(math.ceil(math.sqrt(openmp_minsize)))
# The order is important if you change them.
......@@ -346,8 +346,7 @@ class test_CAReduce(unittest_tools.InferShapeTester):
((5, 0), (1, )),
((5, 0), ()),
((), None),
((), ())
]
((), ())]
type = TensorType
def with_linker(self, linker, scalar_op=scalar.add, dtype="floatX",
......@@ -371,7 +370,7 @@ class test_CAReduce(unittest_tools.InferShapeTester):
f = copy(linker).accept(FunctionGraph([x], [e])).make_function()
xv = numpy.asarray(numpy.random.rand(*xsh))
if not "int" in dtype:
if "int" not in dtype:
xv = numpy.asarray(xv, dtype=dtype)
else:
xv = numpy.asarray(xv < 0.5, dtype=dtype)
......@@ -610,18 +609,20 @@ class test_Prod(unittest.TestCase):
x = theano.tensor.dmatrix()
# sanity check
x2 = theano.tensor.dmatrix()
p = Prod(axis=1)(x)
p2 = Prod(axis=1)(x2)
fn = theano.function([x, x2], [p - p2], mode=self.mode)
# print "hand computed diff for each row"
x2_val = numpy.asarray([[1., 2., 3.003], [0.003, 5., 6], [
0., 0., 9.01]])
# print fn(x_val, x2_val)
fn2 = theano.function([x], [theano.tensor.grad(p.sum(), x)],
mode=self.mode)
# print "real grad"
# print fn2(x_val)
# Uncomment this for debugging if needed
# x2 = theano.tensor.dmatrix()
# p2 = Prod(axis=1)(x2)
# fn = theano.function([x, x2], [p - p2], mode=self.mode)
# print("hand computed diff for each row")
# x2_val = numpy.asarray([[1., 2., 3.003], [0.003, 5., 6], [
# 0., 0., 9.01]])
# print(fn(x_val, x2_val))
# fn2 = theano.function([x], [theano.tensor.grad(p.sum(), x)],
# mode=self.mode)
# print("real grad")
# print(fn2(x_val))
fn3 = theano.function([x], [p], mode=self.mode)
assert numpy.allclose(fn3(x_val), [6., 0., 0.])
......@@ -633,14 +634,14 @@ class test_Prod(unittest.TestCase):
# def fn5(x5):
# return theano.tensor.sqr(Prod(axis=1)(x5))
#x4 = theano.tensor.dmatrix()
#p4 = theano.tensor.sqr(Prod(axis=1)(x4))
#fn4 = theano.function([x4], p4)
# print "with sqr"
# print fn4(x_val)
# print fn4(x2_val)
# x4 = theano.tensor.dmatrix()
# p4 = theano.tensor.sqr(Prod(axis=1)(x4))
# fn4 = theano.function([x4], p4)
# print("with sqr")
# print(fn4(x_val))
# print(fn4(x2_val))
#unittest_tools.verify_grad(fn5, [x_val])
# unittest_tools.verify_grad(fn5, [x_val])
@attr('slow')
def test_prod_no_zeros_in_input(self):
......@@ -691,7 +692,7 @@ class test_Prod(unittest.TestCase):
x = theano.tensor.dmatrix()
pwz_a1 = ProdWithoutZeros(axis=0)(x)
pwz_grad = theano.grad(theano.tensor.sum(pwz_a1), x)
fn_a1 = theano.function([x], pwz_grad, mode=self.mode)
theano.function([x], pwz_grad, mode=self.mode)
@attr('slow')
def test_other_grad_tests(self):
......@@ -705,16 +706,19 @@ class test_Prod(unittest.TestCase):
p = Prod(axis=1)
grad_p = theano.tensor.grad(p(x).sum(), x)
grad_fn = theano.function([x], grad_p, mode=self.mode)
assert numpy.allclose(grad_fn(x_val1), [[6., 3., 2.], [30., 0.,
0.], [0., 0., 0.]])
assert numpy.allclose(grad_fn(x_val2), [[0., 0., 2.], [30.,
0., 0.], [72., 63., 56.], [0., 0., 90.]])
assert numpy.allclose(
grad_fn(x_val1),
[[6., 3., 2.], [30., 0., 0.], [0., 0., 0.]])
assert numpy.allclose(
grad_fn(x_val2),
[[0., 0., 2.], [30., 0., 0.], [72., 63., 56.], [0., 0., 90.]])
p_axis0 = Prod(axis=0)
grad_p_axis0 = theano.tensor.grad(p_axis0(x).sum(), x)
grad_fn_axis0 = theano.function([x], grad_p_axis0, mode=self.mode)
assert numpy.allclose(grad_fn_axis0(x_val2), [[0., 400.,
0.], [63., 160., 0.], [0., 100., 0.], [0., 80., 0.]])
assert numpy.allclose(
grad_fn_axis0(x_val2),
[[0., 400., 0.], [63., 160., 0.], [0., 100., 0.], [0., 80., 0.]])
tensor.verify_grad(p, [x_val1], rng=rng, mode=self.mode)
......@@ -863,8 +867,8 @@ class T_reduce_dtype(unittest.TestCase):
f = theano.function([x], var, mode=self.mode)
topo = f.maker.fgraph.toposort()
assert [n for n in topo if isinstance(n.op, self.op)], (topo,
dtype)
assert [n for n in topo if isinstance(n.op, self.op)], \
(topo, output_dtype)
data = numpy.random.rand(3, 4) * 10
data = data.astype(input_dtype)
f(data)
......@@ -897,8 +901,7 @@ class T_reduce_dtype(unittest.TestCase):
upcasted_dtype = scalar.upcast(input_dtype, acc_dtype)
if (acc_dtype == upcasted_dtype or
(input_dtype in tensor.discrete_dtypes and
acc_dtype in tensor.continuous_dtypes)
):
acc_dtype in tensor.continuous_dtypes)):
var = getattr(x, method)(acc_dtype=acc_dtype,
axis=axis)
assert var.owner.op.acc_dtype == acc_dtype
......@@ -923,8 +926,7 @@ class T_reduce_dtype(unittest.TestCase):
s = getattr(x, method)()
f = theano.function([], s, mode=self.mode)
topo = f.maker.fgraph.toposort()
assert [n for n in topo if isinstance(n.op, self.op)], (topo,
dtype)
assert [n for n in topo if isinstance(n.op, self.op)], topo
s_val = f()
# Use extra precision in NumPy to compute the good answer.
ret = getattr(numpy.asarray([1e8, 1, -1e8], dtype='float64'),
......@@ -1096,8 +1098,7 @@ class T_prod_without_zeros_dtype(unittest.TestCase):
upcasted_dtype = scalar.upcast(input_dtype, acc_dtype)
if (acc_dtype == upcasted_dtype or
(input_dtype in tensor.discrete_dtypes and
acc_dtype in tensor.continuous_dtypes)
):
acc_dtype in tensor.continuous_dtypes)):
prod_woz_var = ProdWithoutZeros(
axis=axis, acc_dtype=acc_dtype)(x)
assert prod_woz_var.owner.op.acc_dtype == acc_dtype
......@@ -1110,7 +1111,8 @@ class T_prod_without_zeros_dtype(unittest.TestCase):
data = data.astype(input_dtype)
f(data)
else:
self.assertRaises(TypeError,
self.assertRaises(
TypeError,
ProdWithoutZeros(axis=axis, acc_dtype=acc_dtype),
x)
......@@ -1152,7 +1154,8 @@ class TestElemwise(unittest_tools.InferShapeTester):
def test_infer_shape(self):
for s_left, s_right in [((5, 6), (5, 6)),
for s_left, s_right in [
((5, 6), (5, 6)),
((5, 6), (5, 1)),
((5, 6), (1, 6)),
((5, 1), (5, 6)),
......@@ -1167,7 +1170,8 @@ class TestElemwise(unittest_tools.InferShapeTester):
t_right = TensorType(dtype, [(entry == 1) for entry in s_right])()
t_left_val = numpy.zeros(s_left, dtype=dtype)
t_right_val = numpy.zeros(s_right, dtype=dtype)
self._compile_and_check([t_left, t_right],
self._compile_and_check(
[t_left, t_right],
[Elemwise(scalar.add)(t_left, t_right)],
[t_left_val, t_right_val], Elemwise)
......
......@@ -56,7 +56,6 @@ whitelist_flake8 = [
"tensor/tests/test_opt.py",
"tensor/tests/test_basic.py",
"tensor/tests/test_blas.py",
"tensor/tests/test_elemwise.py",
"tensor/tests/test_merge.py",
"tensor/tests/test_gc.py",
"tensor/tests/test_complex.py",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论