提交 0efa1e46 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Remove import of test classes.

上级 b2d3d192
...@@ -7,9 +7,10 @@ import theano ...@@ -7,9 +7,10 @@ import theano
import theano.tensor as T import theano.tensor as T
from theano.tensor import TensorType from theano.tensor import TensorType
from theano.tensor.basic import alloc from theano.tensor.basic import alloc
from theano.tensor.tests.test_basic import (
rand, safe_make_node, T_reshape, T_Join_and_Split # Don't import test classes otherwise they get tested as part of the file
) from theano.tensor.tests import test_basic
from theano.tensor.tests.test_basic import rand, safe_make_node
from theano.tests.unittest_tools import SkipTest from theano.tests.unittest_tools import SkipTest
from numpy.testing.noseclasses import KnownFailureTest from numpy.testing.noseclasses import KnownFailureTest
...@@ -340,17 +341,16 @@ def test_gpu_contiguous(): ...@@ -340,17 +341,16 @@ def test_gpu_contiguous():
assert f(a_val, 2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous
class G_reshape(T_reshape): class G_reshape(test_basic.T_reshape):
def shortDescription(self): def shortDescription(self):
return None return None
def __init__(self, name): def __init__(self, name):
T_reshape.__init__(self, name, test_basic.T_reshape.__init__(
self, name,
shared=gpuarray_shared_constructor, shared=gpuarray_shared_constructor,
op=GpuReshape, op=GpuReshape,
mode=mode_with_gpu, mode=mode_with_gpu,
# avoid errors with limited devices
# dtype='float32',
ignore_topo=(HostFromGpu, GpuFromHost, ignore_topo=(HostFromGpu, GpuFromHost,
theano.compile.DeepCopyOp, theano.compile.DeepCopyOp,
theano.sandbox.gpuarray.elemwise.GpuElemwise, theano.sandbox.gpuarray.elemwise.GpuElemwise,
...@@ -359,7 +359,7 @@ class G_reshape(T_reshape): ...@@ -359,7 +359,7 @@ class G_reshape(T_reshape):
assert self.op == GpuReshape assert self.op == GpuReshape
class G_Join_and_Split(T_Join_and_Split): class G_Join_and_Split(test_basic.T_Join_and_Split):
def setUp(self): def setUp(self):
super(G_Join_and_Split, self).setUp() super(G_Join_and_Split, self).setUp()
self.mode = mode_with_gpu.excluding('constant_folding') self.mode = mode_with_gpu.excluding('constant_folding')
......
...@@ -3,8 +3,7 @@ import numpy ...@@ -3,8 +3,7 @@ import numpy
import theano import theano
from theano import tensor from theano import tensor
from theano.compile import DeepCopyOp from theano.compile import DeepCopyOp
from theano.tensor.tests import test_subtensor
from theano.tensor.tests.test_subtensor import T_subtensor
from ..basic_ops import HostFromGpu, GpuFromHost from ..basic_ops import HostFromGpu, GpuFromHost
from ..subtensor import (GpuIncSubtensor, GpuSubtensor, from ..subtensor import (GpuIncSubtensor, GpuSubtensor,
...@@ -15,12 +14,13 @@ from .test_basic_ops import mode_with_gpu ...@@ -15,12 +14,13 @@ from .test_basic_ops import mode_with_gpu
class G_subtensor(T_subtensor): class G_subtensor(test_subtensor.T_subtensor):
def shortDescription(self): def shortDescription(self):
return None return None
def __init__(self, name): def __init__(self, name):
T_subtensor.__init__(self, name, test_subtensor.T_subtensor.__init__(
self, name,
shared=gpuarray_shared_constructor, shared=gpuarray_shared_constructor,
sub=GpuSubtensor, sub=GpuSubtensor,
inc_sub=GpuIncSubtensor, inc_sub=GpuIncSubtensor,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论