提交 175d3b15 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Flake8 fixes for tests.

上级 a56442e0
...@@ -116,7 +116,7 @@ class test_GpuCAReduceCPY(test_elemwise.test_CAReduce): ...@@ -116,7 +116,7 @@ class test_GpuCAReduceCPY(test_elemwise.test_CAReduce):
def test_infer_shape(self): def test_infer_shape(self):
for dtype in self.dtypes: for dtype in self.dtypes:
test_CAReduce.test_infer_shape(self, dtype) super(test_GpuCAReduceCPY, self).test_infer_shape(self, dtype)
class test_GpuCAReduceCuda(test_GpuCAReduceCPY): class test_GpuCAReduceCuda(test_GpuCAReduceCPY):
...@@ -129,15 +129,15 @@ class test_GpuCAReduceCuda(test_GpuCAReduceCPY): ...@@ -129,15 +129,15 @@ class test_GpuCAReduceCuda(test_GpuCAReduceCPY):
((5, 6), (1, )), ((5, 6), (1, )),
((5, 6), (-1, )), ((5, 6), (-1, )),
((5, 6), (-2, )), ((5, 6), (-2, )),
#((5, 6), ()), #reduce on no axis(copy) isn't implemented # ((5, 6), ()), #reduce on no axis(copy) isn't implemented
#((2, 3, 4, 5), (0, 1, 3)), mask 1101 isn't implemented # ((2, 3, 4, 5), (0, 1, 3)), mask 1101 isn't implemented
#((2, 3, 4, 5), (-2, -3)), mask 0110 isn't implemented # ((2, 3, 4, 5), (-2, -3)), mask 0110 isn't implemented
((5, 0), None), ((5, 0), None),
((5, 0), (0, )), ((5, 0), (0, )),
((5, 0), (1, )), ((5, 0), (1, )),
#((5, 0), ()), reduce on no axis isn't implemented # ((5, 0), ()), reduce on no axis isn't implemented
#((), None), reduce on no axis isn't implemented # ((), None), reduce on no axis isn't implemented
#((), ()) reduce on no axis isn't implemented # ((), ()) reduce on no axis isn't implemented
# Test all GPU cases implemented # Test all GPU cases implemented
((1, 0), (1,)), ((1, 0), (1,)),
...@@ -172,7 +172,7 @@ class test_GpuCAReduceCuda(test_GpuCAReduceCPY): ...@@ -172,7 +172,7 @@ class test_GpuCAReduceCuda(test_GpuCAReduceCPY):
((4100, 4, 3), [2]), ((5, 4100, 3), [2]), ((5, 4, 4100), [2]), # 001 ((4100, 4, 3), [2]), ((5, 4100, 3), [2]), ((5, 4, 4100), [2]), # 001
((4100, 4, 3), [0, 1]), ((5, 4100, 3), [0, 1]), ((5, 4, 4100), [0, 1]), # 110 ((4100, 4, 3), [0, 1]), ((5, 4100, 3), [0, 1]), ((5, 4, 4100), [0, 1]), # 110
((4100, 4, 3), [1, 2]), ((5, 4100, 3), [1, 2]), ((5, 4, 4100), [1, 2]), # 011 ((4100, 4, 3), [1, 2]), ((5, 4100, 3), [1, 2]), ((5, 4, 4100), [1, 2]), # 011
#((4100,4,3),[0,2]),((5,4100,3),[0,2]),((5,4,4100),[0,2]),#101 ##not implemented # ((4100,4,3),[0,2]),((5,4100,3),[0,2]),((5,4,4100),[0,2]),#101 ##not implemented
((4100, 4, 3), [0, 1, 2]), ((5, 4100, 3), [0, 1, 2]), ((5, 4, 4100), [0, 1, 2]), # 111 ((4100, 4, 3), [0, 1, 2]), ((5, 4100, 3), [0, 1, 2]), ((5, 4, 4100), [0, 1, 2]), # 111
((65, 4, 3), [0, 1, 2]), ((5, 65, 3), [0, 1, 2]), ((5, 4, 65), [0, 1, 2]), # 111 ((65, 4, 3), [0, 1, 2]), ((5, 65, 3), [0, 1, 2]), ((5, 4, 65), [0, 1, 2]), # 111
...@@ -185,12 +185,12 @@ class test_GpuCAReduceCuda(test_GpuCAReduceCPY): ...@@ -185,12 +185,12 @@ class test_GpuCAReduceCuda(test_GpuCAReduceCPY):
# test pattern implemented by reshape # test pattern implemented by reshape
# Skip them as this test the op directly, not the optimization with reshape # Skip them as this test the op directly, not the optimization with reshape
# ((4100,4,3,2),[0]),((4,4100,3,2),[0]),((4,3,4100,2),[0]),((4,3,2,4100),[0]),#1000 # ((4100,4,3,2),[0]),((4,4100,3,2),[0]),((4,3,4100,2),[0]),((4,3,2,4100),[0]),#1000
# ((4100,4,3,2),[1]),((4,4100,3,2),[1]),((4,3,4100,2),[1]),((4,3,2,4100),[1]),#0100 # ((4100,4,3,2),[1]),((4,4100,3,2),[1]),((4,3,4100,2),[1]),((4,3,2,4100),[1]),#0100
# ((4100,4,3,2),[2]),((4,4100,3,2),[2]),((4,3,4100,2),[2]),((4,3,2,4100),[2]),#0010 # ((4100,4,3,2),[2]),((4,4100,3,2),[2]),((4,3,4100,2),[2]),((4,3,2,4100),[2]),#0010
# ((4100,4,3,2),[3]),((4,4100,3,2),[3]),((4,3,4100,2),[3]),((4,3,2,4100),[3]),#0001 # ((4100,4,3,2),[3]),((4,4100,3,2),[3]),((4,3,4100,2),[3]),((4,3,2,4100),[3]),#0001
# ((1100,2,3,4,5),[0,1,2,3,4]),((2,1100,3,4,5),[0,1,2,3,4]),((2,3,1100,4,5),[0,1,2,3,4]),((2,3,4,1100,5),[0,1,2,3,4]),((2,3,4,5,1100),[0,1,2,3,4]),#11111 # ((1100,2,3,4,5),[0,1,2,3,4]),((2,1100,3,4,5),[0,1,2,3,4]),((2,3,1100,4,5),[0,1,2,3,4]),((2,3,4,1100,5),[0,1,2,3,4]),((2,3,4,5,1100),[0,1,2,3,4]),#11111
# ((5,4,3,10,11),[1,2]), # ((5,4,3,10,11),[1,2]),
] ]
op = GpuCAReduceCuda op = GpuCAReduceCuda
reds = [scalar.add, scalar.mul, reds = [scalar.add, scalar.mul,
......
...@@ -4,7 +4,6 @@ import theano ...@@ -4,7 +4,6 @@ import theano
from theano import tensor from theano import tensor
from theano.tests.breakpoint import PdbBreakpoint from theano.tests.breakpoint import PdbBreakpoint
from theano.tests import unittest_tools as utt from theano.tests import unittest_tools as utt
from theano.tests.unittest_tools import SkipTest
from theano.tensor.tests import test_basic from theano.tensor.tests import test_basic
import theano.sandbox.gpuarray import theano.sandbox.gpuarray
...@@ -14,8 +13,7 @@ from ..basic_ops import GpuAlloc, GpuReshape, GpuFromHost, host_from_gpu ...@@ -14,8 +13,7 @@ from ..basic_ops import GpuAlloc, GpuReshape, GpuFromHost, host_from_gpu
from ..elemwise import GpuCAReduceCuda, GpuCAReduceCPY, GpuElemwise from ..elemwise import GpuCAReduceCuda, GpuCAReduceCPY, GpuElemwise
from ..subtensor import GpuSubtensor from ..subtensor import GpuSubtensor
from .config import mode_with_gpu, mode_without_gpu, test_ctx_name from .config import mode_with_gpu, test_ctx_name
from .test_basic_ops import rand_gpuarray
def test_local_assert(): def test_local_assert():
...@@ -209,7 +207,7 @@ def test_pdbbreakpoint_op(): ...@@ -209,7 +207,7 @@ def test_pdbbreakpoint_op():
def test_local_gpu_elemwise_careduce(): def test_local_gpu_elemwise_careduce():
x = theano.tensor.matrix() x = theano.tensor.matrix()
o = (x*x).sum() o = (x * x).sum()
f = theano.function([x], o, mode=mode_with_gpu) f = theano.function([x], o, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert len(topo) == 3 assert len(topo) == 3
...@@ -235,7 +233,7 @@ def test_local_gpu_subtensor(): ...@@ -235,7 +233,7 @@ def test_local_gpu_subtensor():
# Test multiple use of the input # Test multiple use of the input
# We want the subtensor to be on the GPU to prevent multiple transfer. # We want the subtensor to be on the GPU to prevent multiple transfer.
t = tensor.fmatrix() t = tensor.fmatrix()
f = theano.function([t], [t[3:4], t+1], mode=mode_with_gpu) f = theano.function([t], [t[3:4], t + 1], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert not any([type(node.op) is tensor.Subtensor for node in topo]) assert not any([type(node.op) is tensor.Subtensor for node in topo])
assert any([isinstance(node.op, GpuSubtensor) for node in topo]) assert any([isinstance(node.op, GpuSubtensor) for node in topo])
...@@ -243,7 +241,7 @@ def test_local_gpu_subtensor(): ...@@ -243,7 +241,7 @@ def test_local_gpu_subtensor():
# Test multiple use of the input + input as output # Test multiple use of the input + input as output
# We want the subtensor to be on the GPU to prevent multiple transfer. # We want the subtensor to be on the GPU to prevent multiple transfer.
t = tensor.fmatrix() t = tensor.fmatrix()
f = theano.function([t], [t[3:4], t+1, t], mode=mode_with_gpu) f = theano.function([t], [t[3:4], t + 1, t], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert not any([type(node.op) is tensor.Subtensor for node in topo]) assert not any([type(node.op) is tensor.Subtensor for node in topo])
assert any([isinstance(node.op, GpuSubtensor) for node in topo]) assert any([isinstance(node.op, GpuSubtensor) for node in topo])
...@@ -251,7 +249,7 @@ def test_local_gpu_subtensor(): ...@@ -251,7 +249,7 @@ def test_local_gpu_subtensor():
# Test shared forced on CPU end we do computation on the output of # Test shared forced on CPU end we do computation on the output of
# the subtensor. # the subtensor.
t = tensor._shared(numpy.zeros(20, "float32")) t = tensor._shared(numpy.zeros(20, "float32"))
f = theano.function([], t[3:4]+1, mode=mode_with_gpu) f = theano.function([], t[3:4] + 1, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert any([type(node.op) is tensor.Subtensor for node in topo]) assert any([type(node.op) is tensor.Subtensor for node in topo])
assert not any([isinstance(node.op, GpuSubtensor) for node in topo]) assert not any([isinstance(node.op, GpuSubtensor) for node in topo])
...@@ -320,7 +318,7 @@ def test_local_gpu_elemwise(): ...@@ -320,7 +318,7 @@ def test_local_gpu_elemwise():
utt.assert_allclose(out[1], a_v * c_v) utt.assert_allclose(out[1], a_v * c_v)
# Test non-contiguous input # Test non-contiguous input
c = cuda.shared_constructor(numpy.asarray(c_v, dtype='float32')) c = gpuarray_shared_constructor(numpy.asarray(c_v, dtype='float32'))
f = theano.function([a, b], outs_op(a[::2], b[::2], c[::2]), f = theano.function([a, b], outs_op(a[::2], b[::2], c[::2]),
mode=mode_with_gpu) mode=mode_with_gpu)
out = f(a_v, b_v) out = f(a_v, b_v)
......
...@@ -238,4 +238,4 @@ class T_Scan(TestCase): ...@@ -238,4 +238,4 @@ class T_Scan(TestCase):
# I leave this to tested by debugmode, this test was anyway # I leave this to tested by debugmode, this test was anyway
# more of does the graph compile kind of test # more of does the graph compile kind of test
t_result = my_f() my_f()
...@@ -158,10 +158,6 @@ whitelist_flake8 = [ ...@@ -158,10 +158,6 @@ whitelist_flake8 = [
"sandbox/linalg/__init__.py", "sandbox/linalg/__init__.py",
"sandbox/linalg/tests/test_linalg.py", "sandbox/linalg/tests/test_linalg.py",
"sandbox/gpuarray/__init__.py", "sandbox/gpuarray/__init__.py",
"sandbox/gpuarray/tests/test_subtensor.py",
"sandbox/gpuarray/tests/test_scan.py",
"sandbox/gpuarray/tests/test_opt.py",
"sandbox/gpuarray/tests/test_elemwise.py",
"scan_module/scan_utils.py", "scan_module/scan_utils.py",
"scan_module/scan_views.py", "scan_module/scan_views.py",
"scan_module/scan.py", "scan_module/scan.py",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论