提交 13a81986 authored 作者: Frederic's avatar Frederic

Remove duplicate gpu join test and move 1 gpu join test to the cpu/gpu test.

上级 46f56a7d
...@@ -648,46 +648,6 @@ def test_hostfromgpu_shape_i(): ...@@ -648,46 +648,6 @@ def test_hostfromgpu_shape_i():
import theano.sandbox.cuda as cuda_ndarray import theano.sandbox.cuda as cuda_ndarray
from theano.sandbox.cuda.basic_ops import gpu_join, GpuDimShuffle from theano.sandbox.cuda.basic_ops import gpu_join, GpuDimShuffle
def test_gpujoin_concatenate_one_element():
m = T.fmatrix()
c = T.concatenate([m])
f = theano.function(inputs=[m], outputs=[c], mode=mode_with_gpu)
topo = f.maker.env.toposort()
assert len(topo)==1
assert isinstance(topo[0].op,theano.compile.DeepCopyOp)
def test_gpujoin_twomatrices_joincolumns():
_a = numpy.asarray([[1,2],[3,4]],dtype='float32')
_b = numpy.asarray([[5,6,7],[8,9,10]],dtype='float32')
a = tcn.shared_constructor(_a)
b = tcn.shared_constructor(_b)
c = gpu_join(1,a,b)
f = theano.function([], c)
assert numpy.all(f() == numpy.concatenate([_a,_b], axis=1))
def test_gpujoin_twomatrices_badshapes():
_a = numpy.asarray([[1,2],[3,4]],dtype='float32')
_b = numpy.asarray([[5,6,7],[8,9,10]],dtype='float32')
a = tcn.shared_constructor(_a)
b = tcn.shared_constructor(_b)
# try to join on dimension 0 where they don't agree (2!=3)
c = gpu_join(0,a,b)
f = theano.function([], c)
try:
f()
assert False
except ValueError:
assert True
def test_gpujoin_preserves_broadcasting(): def test_gpujoin_preserves_broadcasting():
_a = numpy.asarray([[1,2],[3,4]],dtype='float32') _a = numpy.asarray([[1,2],[3,4]],dtype='float32')
_b = numpy.asarray([[5,6,7],[8,9,10]],dtype='float32') _b = numpy.asarray([[5,6,7],[8,9,10]],dtype='float32')
...@@ -797,6 +757,7 @@ class T_Join_and_Split(theano.tensor.tests.test_basic.T_Join_and_Split): ...@@ -797,6 +757,7 @@ class T_Join_and_Split(theano.tensor.tests.test_basic.T_Join_and_Split):
utt.seed_rng() utt.seed_rng()
self.mode = mode_with_gpu.excluding('constant_folding') self.mode = mode_with_gpu.excluding('constant_folding')
self.join_op = cuda.GpuJoin self.join_op = cuda.GpuJoin
# No gpu split.
self.split_op = tensor.Split self.split_op = tensor.Split
# No Make vector on the gpu, Join used instead # No Make vector on the gpu, Join used instead
self.make_vector_op = cuda.GpuJoin self.make_vector_op = cuda.GpuJoin
......
...@@ -2578,7 +2578,7 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -2578,7 +2578,7 @@ class T_Join_and_Split(unittest.TestCase):
# tested only on cpu as gpu support only float32 # tested only on cpu as gpu support only float32
a = as_tensor_variable(1) a = as_tensor_variable(1)
b = as_tensor_variable(2.0) b = as_tensor_variable(2.0)
c = shared(numpy.asarray(3.0).astype(self.floatX)) c = shared(numpy.asarray(3.0, dtype=self.floatX))
s = stack(a, b, c) s = stack(a, b, c)
...@@ -2641,6 +2641,17 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -2641,6 +2641,17 @@ class T_Join_and_Split(unittest.TestCase):
assert len([n for n in topo if isinstance(n, self.join_op)]) == 0 assert len([n for n in topo if isinstance(n, self.join_op)]) == 0
assert f.maker.env.outputs[0].dtype == 'int64' assert f.maker.env.outputs[0].dtype == 'int64'
def test_join_concatenate_one_element(self):
''' Fast test of concatenate as this is an alias for join.
also test that we remove the Join op if there is only 1 input'''
m = tensor.fmatrix()
c = tensor.concatenate([m])
f = theano.function(inputs=[m], outputs=[c],
mode=self.mode.including('local_join_1'))
topo = f.maker.env.toposort()
assert len(topo)==1
assert isinstance(topo[0].op,theano.compile.DeepCopyOp)
def test_join_vector(self): def test_join_vector(self):
a = self.shared(numpy.array([1, 2, 3], dtype=self.floatX)) a = self.shared(numpy.array([1, 2, 3], dtype=self.floatX))
b = as_tensor_variable(numpy.array([7, 8, 9], dtype=self.floatX)) b = as_tensor_variable(numpy.array([7, 8, 9], dtype=self.floatX))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论