提交 07d4461d authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Use relavtive import in tests too.

上级 fe380388
...@@ -34,16 +34,16 @@ if cuda_ndarray.cuda_available and not theano.sandbox.gpuarray.pygpu_activated: ...@@ -34,16 +34,16 @@ if cuda_ndarray.cuda_available and not theano.sandbox.gpuarray.pygpu_activated:
if not theano.sandbox.gpuarray.pygpu_activated: if not theano.sandbox.gpuarray.pygpu_activated:
raise SkipTest("pygpu disabled") raise SkipTest("pygpu disabled")
from theano.sandbox.gpuarray.type import (GpuArrayType, from ..type import (GpuArrayType,
gpuarray_shared_constructor) gpuarray_shared_constructor)
from theano.sandbox.gpuarray.basic_ops import ( from ..basic_ops import (
host_from_gpu, gpu_from_host, host_from_gpu, gpu_from_host,
gpu_alloc, GpuAlloc, gpu_alloc, GpuAlloc,
gpu_from_cuda, gpu_from_cuda,
cuda_from_gpu, HostFromGpu, cuda_from_gpu, HostFromGpu,
GpuFromHost, GpuReshape, GpuFromHost, GpuReshape,
gpu_join, GpuJoin, GpuSplit, GpuEye, gpu_contiguous) gpu_join, GpuJoin, GpuSplit, GpuEye, gpu_contiguous)
from theano.sandbox.gpuarray.subtensor import GpuSubtensor from ..subtensor import GpuSubtensor
from theano.tests import unittest_tools as utt from theano.tests import unittest_tools as utt
utt.seed_rng() utt.seed_rng()
......
...@@ -8,14 +8,14 @@ from theano.tensor.blas import (gemv_inplace, gemm_inplace, ger_destructive, ...@@ -8,14 +8,14 @@ from theano.tensor.blas import (gemv_inplace, gemm_inplace, ger_destructive,
_dot22) _dot22)
from theano.tensor.tests.test_blas import TestGer, BaseGemv from theano.tensor.tests.test_blas import TestGer, BaseGemv
from theano.sandbox.gpuarray import gpuarray_shared_constructor from .. import gpuarray_shared_constructor
from theano.sandbox.gpuarray.tests.test_basic_ops import (makeTester, rand, from .test_basic_ops import (makeTester, rand,
mode_with_gpu) mode_with_gpu)
from theano.sandbox.gpuarray.blas import (gpugemv_inplace, gpugemv_no_inplace, from ..blas import (gpugemv_inplace, gpugemv_no_inplace,
gpugemm_inplace, gpugemm_no_inplace, gpugemm_inplace, gpugemm_no_inplace,
gpuger_inplace, gpuger_no_inplace, gpuger_inplace, gpuger_no_inplace,
GpuGer, gpu_dot22) GpuGer, gpu_dot22)
GpuGemvTester = makeTester('GpuGemvTester', GpuGemvTester = makeTester('GpuGemvTester',
......
...@@ -22,10 +22,10 @@ from theano.compat.python2x import any ...@@ -22,10 +22,10 @@ from theano.compat.python2x import any
from theano.tests.unittest_tools import seed_rng from theano.tests.unittest_tools import seed_rng
# We let that import do the init of the back-end if needed. # We let that import do the init of the back-end if needed.
from theano.sandbox.gpuarray.tests.test_basic_ops import (mode_with_gpu, from .test_basic_ops import (mode_with_gpu,
mode_without_gpu) mode_without_gpu)
from theano.sandbox.gpuarray.type import GpuArrayType from ..type import GpuArrayType
from theano.sandbox.gpuarray.conv import GpuConv from ..conv import GpuConv
import pygpu import pygpu
gftensor4 = GpuArrayType('float32', [False] * 4) gftensor4 = GpuArrayType('float32', [False] * 4)
......
...@@ -6,11 +6,10 @@ from theano.tests.unittest_tools import SkipTest ...@@ -6,11 +6,10 @@ from theano.tests.unittest_tools import SkipTest
from theano.tensor.tests.test_elemwise import (test_Broadcast, test_DimShuffle, from theano.tensor.tests.test_elemwise import (test_Broadcast, test_DimShuffle,
test_CAReduce, T_reduce_dtype) test_CAReduce, T_reduce_dtype)
from theano.sandbox.gpuarray.tests.test_basic_ops import (mode_with_gpu, from .test_basic_ops import mode_with_gpu, rand_gpuarray
rand_gpuarray) from ..elemwise import (GpuElemwise, GpuDimShuffle,
from theano.sandbox.gpuarray.elemwise import (GpuElemwise, GpuDimShuffle, GpuCAReduceCuda, GpuCAReduceCPY)
GpuCAReduceCuda, GpuCAReduceCPY) from ..type import GpuArrayType
from theano.sandbox.gpuarray.type import GpuArrayType
from pygpu.array import gpuarray from pygpu.array import gpuarray
......
import unittest import unittest
from theano.tensor.nnet.tests import test_neighbours
# We let that import do the init of the back-end if needed. # We let that import do the init of the back-end if needed.
from theano.sandbox.gpuarray.tests.test_basic_ops import (mode_with_gpu, from .test_basic_ops import (mode_with_gpu,
mode_without_gpu) mode_without_gpu)
import theano.tensor.nnet.tests.test_neighbours from ..neighbours import GpuImages2Neibs
from theano.sandbox.gpuarray.neighbours import GpuImages2Neibs
class T_GpuImages2Neibs(theano.tensor.nnet.tests.test_neighbours.T_Images2Neibs): class T_GpuImages2Neibs(test_neighbours.T_Images2Neibs):
mode = mode_with_gpu mode = mode_with_gpu
op = GpuImages2Neibs op = GpuImages2Neibs
dtypes = ['int64', 'float32', 'float64'] dtypes = ['int64', 'float32', 'float64']
......
...@@ -9,12 +9,13 @@ import theano.tests.unittest_tools as utt ...@@ -9,12 +9,13 @@ import theano.tests.unittest_tools as utt
from theano.sandbox import gpuarray from theano.sandbox import gpuarray
# We let that import do the init of the back-end if needed. # We let that import do the init of the back-end if needed.
from theano.sandbox.gpuarray.tests.test_basic_ops import (mode_with_gpu, from .test_basic_ops import (mode_with_gpu,
mode_without_gpu) mode_without_gpu)
from theano.sandbox.gpuarray.nnet import ( from ..nnet import (
GpuCrossentropySoftmaxArgmax1HotWithBias, GpuCrossentropySoftmaxArgmax1HotWithBias,
GpuCrossentropySoftmax1HotWithBiasDx) GpuCrossentropySoftmax1HotWithBiasDx,
GpuSoftmaxWithBias, GpuSoftmax)
def test_GpuCrossentropySoftmaxArgmax1HotWithBias(): def test_GpuCrossentropySoftmaxArgmax1HotWithBias():
...@@ -203,7 +204,7 @@ def softmax_with_bias_unittest_template(dtypeInput, dtypeBias): ...@@ -203,7 +204,7 @@ def softmax_with_bias_unittest_template(dtypeInput, dtypeBias):
f_gpu = theano.function([x], z, mode=mode_with_gpu) f_gpu = theano.function([x], z, mode=mode_with_gpu)
assert f.maker.fgraph.toposort()[-1].op == T.nnet.softmax_with_bias assert f.maker.fgraph.toposort()[-1].op == T.nnet.softmax_with_bias
assert isinstance(f_gpu.maker.fgraph.toposort()[-2].op, assert isinstance(f_gpu.maker.fgraph.toposort()[-2].op,
theano.sandbox.gpuarray.nnet.GpuSoftmaxWithBias) GpuSoftmaxWithBias)
def cmp(n, m): def cmp(n, m):
# print "test_softmax",n,m # print "test_softmax",n,m
...@@ -261,7 +262,7 @@ def softmax_unittest_template(dtypeInput): ...@@ -261,7 +262,7 @@ def softmax_unittest_template(dtypeInput):
f_gpu = theano.function([x], z, mode=mode_with_gpu) f_gpu = theano.function([x], z, mode=mode_with_gpu)
assert f.maker.fgraph.toposort()[-1].op == T.nnet.softmax assert f.maker.fgraph.toposort()[-1].op == T.nnet.softmax
assert isinstance(f_gpu.maker.fgraph.toposort()[-2].op, assert isinstance(f_gpu.maker.fgraph.toposort()[-2].op,
theano.sandbox.gpuarray.nnet.GpuSoftmax) GpuSoftmax)
def cmp(n, m): def cmp(n, m):
if dtypeInput == 'float32': if dtypeInput == 'float32':
......
...@@ -3,19 +3,16 @@ import numpy ...@@ -3,19 +3,16 @@ import numpy
import theano import theano
from theano import tensor from theano import tensor
from theano.tests import unittest_tools as utt from theano.tests import unittest_tools as utt
import theano.sandbox.gpuarray
from theano.sandbox.gpuarray.type import (
GpuArrayType, gpuarray_shared_constructor)
from theano.sandbox.gpuarray.basic_ops import (
GpuAlloc, GpuReshape, gpu_alloc, gpu_from_host, host_from_gpu)
from theano.sandbox.gpuarray.elemwise import (
GpuCAReduceCuda, GpuCAReduceCPY, GpuElemwise)
from theano.sandbox.gpuarray.subtensor import GpuSubtensor
from theano.sandbox.gpuarray.tests.test_basic_ops import (
rand_gpuarray, mode_with_gpu, mode_without_gpu
)
from theano.tests.unittest_tools import SkipTest from theano.tests.unittest_tools import SkipTest
from theano.tensor.tests.test_basic import TestSpecifyShape from theano.tensor.tests import test_basic
import theano.sandbox.gpuarray
from ..type import GpuArrayType, gpuarray_shared_constructor
from ..basic_ops import (GpuAlloc, GpuReshape, gpu_alloc,
gpu_from_host, host_from_gpu)
from ..elemwise import GpuCAReduceCuda, GpuCAReduceCPY, GpuElemwise
from ..subtensor import GpuSubtensor
from .test_basic_ops import rand_gpuarray, mode_with_gpu, mode_without_gpu
def test_local_assert(): def test_local_assert():
...@@ -135,10 +132,9 @@ def test_rebroadcast(): ...@@ -135,10 +132,9 @@ def test_rebroadcast():
assert isinstance(rebr.outputs[0].type, GpuArrayType) assert isinstance(rebr.outputs[0].type, GpuArrayType)
class TestSpecifyShape(TestSpecifyShape): class TestSpecifyShape(test_basic.TestSpecifyShape):
mode = mode_with_gpu mode = mode_with_gpu
input_type = GpuArrayType input_type = GpuArrayType
pass
def test_print_op(): def test_print_op():
...@@ -146,9 +142,6 @@ def test_print_op(): ...@@ -146,9 +142,6 @@ def test_print_op():
b = tensor.fmatrix() b = tensor.fmatrix()
f = theano.function([b], theano.printing.Print()(b) * 2, f = theano.function([b], theano.printing.Print()(b) * 2,
mode=mode_with_gpu) mode=mode_with_gpu)
theano.printing.debugprint(f)
# print f.maker.fgraph.toposort()
#[GpuFromHost(<TensorType(float32, matrix)>), <theano.printing.Print object at 0x3581210>(GpuFromHost.0), GpuElemwise{mul}(CudaNdarray{[[ 2.]]}, <theano.printing.Print object at 0x3581210>.0), HostFromGpu(GpuElemwise{mul}.0)]
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert topo[0].op == gpu_from_host assert topo[0].op == gpu_from_host
assert isinstance(topo[1].op, theano.printing.Print) assert isinstance(topo[1].op, theano.printing.Print)
......
...@@ -5,12 +5,10 @@ import theano ...@@ -5,12 +5,10 @@ import theano
from theano.tests import unittest_tools as utt from theano.tests import unittest_tools as utt
import theano.sandbox.rng_mrg import theano.sandbox.rng_mrg
from theano.sandbox.gpuarray.basic_ops import ( from ..basic_ops import gpu_from_host, GpuFromHost, HostFromGpu
gpu_from_host, GpuFromHost, HostFromGpu from ..elemwise import GpuElemwise
)
from theano.sandbox.gpuarray.elemwise import GpuElemwise
from theano.sandbox.gpuarray.tests.test_basic_ops import mode_with_gpu from .test_basic_ops import mode_with_gpu
class T_Scan(TestCase): class T_Scan(TestCase):
......
import numpy import numpy
import theano import theano
from theano.tensor.tests.test_subtensor import T_subtensor from theano import tensor
from theano.compile import DeepCopyOp
from theano.sandbox.gpuarray.basic_ops import (HostFromGpu, GpuFromHost)
from theano.sandbox.gpuarray.subtensor import (GpuIncSubtensor, GpuSubtensor,
GpuAdvancedIncSubtensor1)
from theano.sandbox.gpuarray.type import gpuarray_shared_constructor from theano.tensor.tests.test_subtensor import T_subtensor
from theano.sandbox.gpuarray.tests.test_basic_ops import mode_with_gpu from ..basic_ops import HostFromGpu, GpuFromHost
from ..subtensor import (GpuIncSubtensor, GpuSubtensor,
GpuAdvancedIncSubtensor1)
from ..type import gpuarray_shared_constructor
from theano.compile import DeepCopyOp from .test_basic_ops import mode_with_gpu
from theano import tensor
class G_subtensor(T_subtensor): class G_subtensor(T_subtensor):
......
...@@ -5,9 +5,9 @@ import numpy ...@@ -5,9 +5,9 @@ import numpy
import theano import theano
from theano.compile import DeepCopyOp from theano.compile import DeepCopyOp
from theano.sandbox.gpuarray.tests.test_basic_ops import rand_gpuarray from .test_basic_ops import rand_gpuarray
from theano.sandbox.gpuarray.type import GpuArrayType from ..type import GpuArrayType
def test_deep_copy(): def test_deep_copy():
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论