提交 b359a356 authored 作者: orhanf's avatar orhanf

tab fix

上级 a87abbc9
import operator import operator
import sys import sys
import unittest import unittest
import numpy import numpy
# Skip test if cuda_ndarray is not available. # Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
import theano import theano
from theano.compile.pfunc import pfunc from theano.compile.pfunc import pfunc
from theano import config, tensor from theano import config, tensor
import theano.tensor.tests.test_nlinalg import theano.tensor.tests.test_nlinalg
import theano.tensor.tests.test_opt as test_opt import theano.tensor.tests.test_opt as test_opt
from theano.tests import unittest_tools as utt from theano.tests import unittest_tools as utt
import theano.sandbox.cuda as cuda import theano.sandbox.cuda as cuda
if not cuda.cuda_available: if not cuda.cuda_available:
raise SkipTest('Optional package cuda disabled') raise SkipTest('Optional package cuda disabled')
import theano.sandbox.cuda.cula as cula import theano.sandbox.cuda.cula as cula
from theano.sandbox.cuda import basic_ops from theano.sandbox.cuda import basic_ops
from theano.sandbox.cuda.type import CudaNdarrayType from theano.sandbox.cuda.type import CudaNdarrayType
from theano.scalar.basic_scipy import erfinv from theano.scalar.basic_scipy import erfinv
if theano.config.mode == 'FAST_COMPILE': if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu') mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu') mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
else: else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu') mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu') mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
def test_no_shared_var_graph(): def test_no_shared_var_graph():
"""Test that the InputToGpuOptimizer optimizer make graph that don't have shared variable compiled too. """Test that the InputToGpuOptimizer optimizer make graph that don't have shared variable compiled too.
""" """
a = tensor.fmatrix() a = tensor.fmatrix()
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
assert numpy.any(isinstance(x.op, cuda.HostFromGpu) for x in l) assert numpy.any(isinstance(x.op, cuda.HostFromGpu) for x in l)
def test_local_assert(): def test_local_assert():
x = theano.tensor.fmatrix() x = theano.tensor.fmatrix()
a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any()) a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())
f = theano.function([x], a, mode=mode_with_gpu) f = theano.function([x], a, mode=mode_with_gpu)
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
assert isinstance(a_op[0].inputs[0].type, CudaNdarrayType) assert isinstance(a_op[0].inputs[0].type, CudaNdarrayType)
def test_local_remove_all_assert(): def test_local_remove_all_assert():
x = theano.tensor.fmatrix() x = theano.tensor.fmatrix()
a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any()) a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())
f = theano.function([x], a, mode=mode_with_gpu) f = theano.function([x], a, mode=mode_with_gpu)
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
assert len(a_op) == 0 assert len(a_op) == 0
def test_int_pow(): def test_int_pow():
a = CudaNdarrayType([False])() a = CudaNdarrayType([False])()
f = theano.function([a], (a*4).sum(), mode=mode_with_gpu) f = theano.function([a], (a*4).sum(), mode=mode_with_gpu)
...@@ -78,7 +78,7 @@ ...@@ -78,7 +78,7 @@
assert op_names == ['GpuElemwise', 'GpuCAReduce', 'HostFromGpu'] assert op_names == ['GpuElemwise', 'GpuCAReduce', 'HostFromGpu']
def test_gpualloc(): def test_gpualloc():
''' '''
This tests tries to catch the scenario when, due to infer_shape, This tests tries to catch the scenario when, due to infer_shape,
the input of the alloc changes from tensor scalar to a constant the input of the alloc changes from tensor scalar to a constant
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
assert numpy.any([isinstance(x.op, cuda.GpuAlloc) for x in l]) assert numpy.any([isinstance(x.op, cuda.GpuAlloc) for x in l])
class Test_local_elemwise_alloc(test_opt.Test_local_elemwise_alloc): class Test_local_elemwise_alloc(test_opt.Test_local_elemwise_alloc):
dtype = 'float32' dtype = 'float32'
def setUp(self): def setUp(self):
...@@ -141,7 +141,7 @@ ...@@ -141,7 +141,7 @@
) )
def test_alloc_memset_0(): def test_alloc_memset_0():
i = tensor.iscalar() i = tensor.iscalar()
z = numpy.zeros((1,), dtype='float32') z = numpy.zeros((1,), dtype='float32')
o = numpy.ones((1,), dtype='float32') o = numpy.ones((1,), dtype='float32')
...@@ -174,7 +174,7 @@ ...@@ -174,7 +174,7 @@
assert (numpy.asarray(f(2)) == 1).all() assert (numpy.asarray(f(2)) == 1).all()
def test_gpuspecifyshape(): def test_gpuspecifyshape():
x = cuda.shared_constructor(numpy.ones(3, dtype='float32'), 'x') x = cuda.shared_constructor(numpy.ones(3, dtype='float32'), 'x')
m = theano.tensor.specify_shape(x + numpy.float32(1), (3,)) m = theano.tensor.specify_shape(x + numpy.float32(1), (3,))
f = theano.function([], updates=[(x, m * numpy.float32(2))], f = theano.function([], updates=[(x, m * numpy.float32(2))],
...@@ -183,7 +183,7 @@ ...@@ -183,7 +183,7 @@
assert not numpy.any([isinstance(x.op, cuda.HostFromGpu) for x in l]) assert not numpy.any([isinstance(x.op, cuda.HostFromGpu) for x in l])
def test_softmax(): def test_softmax():
x = tensor.fmatrix() x = tensor.fmatrix()
f = theano.function([x], tensor.nnet.nnet.Softmax()(x), f = theano.function([x], tensor.nnet.nnet.Softmax()(x),
...@@ -195,7 +195,7 @@ ...@@ -195,7 +195,7 @@
assert numpy.allclose(f(xv), f2(xv)) assert numpy.allclose(f(xv), f2(xv))
def test_softmax_with_bias(): def test_softmax_with_bias():
x = tensor.fmatrix() x = tensor.fmatrix()
b = tensor.fvector() b = tensor.fvector()
...@@ -210,7 +210,7 @@ ...@@ -210,7 +210,7 @@
assert numpy.allclose(f(xv, bv), f2(xv, bv)) assert numpy.allclose(f(xv, bv), f2(xv, bv))
def test_opt_gpujoin_onlyajoin(): def test_opt_gpujoin_onlyajoin():
# from a bug in normal sampling # from a bug in normal sampling
_a = numpy.asarray([[1, 2], [3, 4]], dtype='float32') _a = numpy.asarray([[1, 2], [3, 4]], dtype='float32')
_b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float32') _b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float32')
...@@ -231,7 +231,7 @@ ...@@ -231,7 +231,7 @@
assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1)) assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1))
def test_opt_gpujoin_joinvectors_elemwise_then_minusone(): def test_opt_gpujoin_joinvectors_elemwise_then_minusone():
# from a bug in gpu normal sampling # from a bug in gpu normal sampling
_a = numpy.asarray([1, 2, 3, 4], dtype='float32') _a = numpy.asarray([1, 2, 3, 4], dtype='float32')
_b = numpy.asarray([5, 6, 7, 8], dtype='float32') _b = numpy.asarray([5, 6, 7, 8], dtype='float32')
...@@ -259,7 +259,7 @@ ...@@ -259,7 +259,7 @@
assert numpy.allclose(numpy.asarray(f()), concat) assert numpy.allclose(numpy.asarray(f()), concat)
def test_local_gpu_subtensor(): def test_local_gpu_subtensor():
# Test shared forced on CPU. # Test shared forced on CPU.
t = tensor._shared(numpy.zeros(20, "float32")) t = tensor._shared(numpy.zeros(20, "float32"))
f = theano.function([], t[3:4], mode=mode_with_gpu) f = theano.function([], t[3:4], mode=mode_with_gpu)
...@@ -300,7 +300,7 @@ ...@@ -300,7 +300,7 @@
assert any([isinstance(node.op, cuda.GpuElemwise) for node in topo]) assert any([isinstance(node.op, cuda.GpuElemwise) for node in topo])
def test_local_gpu_split(): def test_local_gpu_split():
""" Test that the GpuSplit op is being applied and works """ """ Test that the GpuSplit op is being applied and works """
# Construct symbolic split # Construct symbolic split
x = tensor.fvector() x = tensor.fvector()
...@@ -348,13 +348,13 @@ ...@@ -348,13 +348,13 @@
assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)]) assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)])
def test_print_op(): def test_print_op():
""" Test that print ops don't block gpu optimization""" """ Test that print ops don't block gpu optimization"""
b = tensor.fmatrix() b = tensor.fmatrix()
f = theano.function([b], theano.printing.Print()(b)*2, mode=mode_with_gpu) f = theano.function([b], theano.printing.Print()(b)*2, mode=mode_with_gpu)
# theano.printing.debugprint(f) # theano.printing.debugprint(f)
# print f.maker.fgraph.toposort() # print f.maker.fgraph.toposort()
#[GpuFromHost(<TensorType(float32, matrix)>), <theano.printing.Print object at 0x3581210>(GpuFromHost.0), GpuElemwise{mul}(CudaNdarray{[[ 2.]]}, <theano.printing.Print object at 0x3581210>.0), HostFromGpu(GpuElemwise{mul}.0)] #[GpuFromHost(<TensorType(float32, matrix)>), <theano.printing.Print object at 0x3581210>(GpuFromHost.0), GpuElemwise{mul}(CudaNdarray{[[ 2.]]}, <theano.printing.Print object at 0x3581210>.0), HostFromGpu(GpuElemwise{mul}.0)]
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert topo[0].op == cuda.gpu_from_host assert topo[0].op == cuda.gpu_from_host
assert isinstance(topo[1].op, theano.printing.Print) assert isinstance(topo[1].op, theano.printing.Print)
...@@ -363,7 +363,7 @@ ...@@ -363,7 +363,7 @@
f(numpy.random.random((5, 5)).astype('float32')) f(numpy.random.random((5, 5)).astype('float32'))
def test_huge_elemwise_fusion(): def test_huge_elemwise_fusion():
""" Test the the GpuElemwise fusion work correctly """ Test the the GpuElemwise fusion work correctly
We check that we fuse one node with part of its input We check that we fuse one node with part of its input
in case their is too many inputs and that would make it bust the 256 in case their is too many inputs and that would make it bust the 256
...@@ -418,9 +418,9 @@ ...@@ -418,9 +418,9 @@
(2, 2, 2, 2), (2, 2, 2, 2),
(2, 2, 2, 2, 2), # 5d (2, 2, 2, 2, 2), # 5d
(2, 2, 2, 2, 2, 2), (2, 2, 2, 2, 2, 2),
# (2, 2, 2, 2, 2, 2, 2), # (2, 2, 2, 2, 2, 2, 2),
# (2, 2, 2, 2, 2, 2, 2, 2), # (2, 2, 2, 2, 2, 2, 2, 2),
# (2, 2, 2, 1, 1, 1, 1, 2, 2), # 9d # (2, 2, 2, 1, 1, 1, 1, 2, 2), # 9d
]: ]:
vals = [cuda.shared_constructor(gen(shape)) for x in range(max_var)] vals = [cuda.shared_constructor(gen(shape)) for x in range(max_var)]
for use_tan in [True, False]: for use_tan in [True, False]:
...@@ -445,7 +445,7 @@ ...@@ -445,7 +445,7 @@
f() f()
def test_local_gpu_elemwise_0(): def test_local_gpu_elemwise_0():
""" """
Test local_gpu_elemwise_0 when there is a dtype upcastable to float32 Test local_gpu_elemwise_0 when there is a dtype upcastable to float32
""" """
...@@ -479,7 +479,7 @@ ...@@ -479,7 +479,7 @@
f(a_v, b_v, c_v) f(a_v, b_v, c_v)
def test_elemwise_fusion(): def test_elemwise_fusion():
""" Test the the GpuElemwise fusion work correctly""" """ Test the the GpuElemwise fusion work correctly"""
shape = (3, 4) shape = (3, 4)
a = cuda.shared_constructor(theano._asarray(numpy.random.rand(*shape), a = cuda.shared_constructor(theano._asarray(numpy.random.rand(*shape),
...@@ -497,10 +497,10 @@ ...@@ -497,10 +497,10 @@
theano._asarray(numpy.random.rand(*shape), dtype='float32')) theano._asarray(numpy.random.rand(*shape), dtype='float32'))
import theano.tests.test_ifelse import theano.tests.test_ifelse
class TestIfElse(theano.tests.test_ifelse.test_ifelse): class TestIfElse(theano.tests.test_ifelse.test_ifelse):
dtype = "float32" dtype = "float32"
mode = mode_with_gpu mode = mode_with_gpu
cast_output = staticmethod(basic_ops.as_cuda_ndarray_variable) cast_output = staticmethod(basic_ops.as_cuda_ndarray_variable)
...@@ -510,7 +510,7 @@ ...@@ -510,7 +510,7 @@
return theano.ifelse.IfElse(n, gpu=True, as_view=True) return theano.ifelse.IfElse(n, gpu=True, as_view=True)
def test_incsubtensor_mixed(): def test_incsubtensor_mixed():
# This catches a bug that occurred when incrementing # This catches a bug that occurred when incrementing
# a float32 tensor by a float64 tensor. # a float32 tensor by a float64 tensor.
...@@ -534,7 +534,7 @@ ...@@ -534,7 +534,7 @@
assert isinstance(client.op, cuda.GpuFromHost) assert isinstance(client.op, cuda.GpuFromHost)
def test_erfinvgpu(): def test_erfinvgpu():
""" Test that local_gpu_elemwise_0 replaces Erfinv with ErfinvGPU """ """ Test that local_gpu_elemwise_0 replaces Erfinv with ErfinvGPU """
x = tensor.fmatrix() x = tensor.fmatrix()
f = theano.function([x], tensor.Elemwise(erfinv)(x), mode=mode_with_gpu) f = theano.function([x], tensor.Elemwise(erfinv)(x), mode=mode_with_gpu)
...@@ -547,7 +547,7 @@ ...@@ -547,7 +547,7 @@
assert numpy.allclose(f(xv), f2(xv)) assert numpy.allclose(f(xv), f2(xv))
def test_local_gpu_solve(): def test_local_gpu_solve():
if not cula.cula_available: if not cula.cula_available:
raise SkipTest('Optional dependency CULA not available') raise SkipTest('Optional dependency CULA not available')
...@@ -577,7 +577,7 @@ ...@@ -577,7 +577,7 @@
cmp((5, 5), (5, 1)) cmp((5, 5), (5, 1))
def test_local_gpu_dot_to_dot22dot(): def test_local_gpu_dot_to_dot22dot():
def cmp(a_shp, b_shp): def cmp(a_shp, b_shp):
a0 = numpy.random.rand(*a_shp).astype('float32') a0 = numpy.random.rand(*a_shp).astype('float32')
a = cuda.shared_constructor(a0, 'a') a = cuda.shared_constructor(a0, 'a')
...@@ -603,7 +603,7 @@ ...@@ -603,7 +603,7 @@
cmp((3, 4), (4,)) cmp((3, 4), (4,))
class test_diag(theano.tensor.tests.test_nlinalg.test_diag): class test_diag(theano.tensor.tests.test_nlinalg.test_diag):
mode = mode_with_gpu mode = mode_with_gpu
shared = staticmethod(cuda.shared_constructor) shared = staticmethod(cuda.shared_constructor)
floatX = 'float32' floatX = 'float32'
...@@ -614,7 +614,8 @@ ...@@ -614,7 +614,8 @@
self).__init__(name) self).__init__(name)
if __name__ == '__main__': if __name__ == '__main__':
test_gpualloc() test_gpualloc()
test_opt_gpujoin_onlyajoin() test_opt_gpujoin_onlyajoin()
test_opt_gpujoin_joinvectors_elemwise_then_minusone() test_opt_gpujoin_joinvectors_elemwise_then_minusone()
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论