提交 9fcdcdba authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Use op instances instead of op classes for test classes.

上级 7fd59ec5
...@@ -945,14 +945,14 @@ class TestAlloc(theano.tensor.tests.test_basic.TestAlloc): ...@@ -945,14 +945,14 @@ class TestAlloc(theano.tensor.tests.test_basic.TestAlloc):
dtype = "float32" dtype = "float32"
mode = mode_with_gpu mode = mode_with_gpu
shared = staticmethod(cuda.shared_constructor) shared = staticmethod(cuda.shared_constructor)
allocs = [B.GpuAlloc, B.GpuAlloc, tensor.Alloc] allocs = [B.GpuAlloc(), B.GpuAlloc(), tensor.Alloc()]
class T_Join_and_Split(theano.tensor.tests.test_basic.T_Join_and_Split): class T_Join_and_Split(theano.tensor.tests.test_basic.T_Join_and_Split):
def setUp(self): def setUp(self):
utt.seed_rng() utt.seed_rng()
self.mode = mode_with_gpu.excluding('constant_folding') self.mode = mode_with_gpu.excluding('constant_folding')
self.join_op = cuda.GpuJoin self.join_op = cuda.GpuJoin()
# No gpu split. # No gpu split.
self.split_op = tensor.Split self.split_op = tensor.Split
# No Make vector on the gpu, Join used instead # No Make vector on the gpu, Join used instead
......
...@@ -305,7 +305,7 @@ class TestAlloc(theano.tensor.tests.test_basic.TestAlloc): ...@@ -305,7 +305,7 @@ class TestAlloc(theano.tensor.tests.test_basic.TestAlloc):
dtype = "float32" dtype = "float32"
mode = mode_with_gpu mode = mode_with_gpu
shared = staticmethod(gpuarray_shared_constructor) shared = staticmethod(gpuarray_shared_constructor)
allocs = [GpuAlloc, GpuAlloc, T.Alloc] allocs = [GpuAlloc(), GpuAlloc(), T.Alloc]
def test_shape(): def test_shape():
...@@ -363,7 +363,7 @@ class G_Join_and_Split(test_basic.T_Join_and_Split): ...@@ -363,7 +363,7 @@ class G_Join_and_Split(test_basic.T_Join_and_Split):
def setUp(self): def setUp(self):
super(G_Join_and_Split, self).setUp() super(G_Join_and_Split, self).setUp()
self.mode = mode_with_gpu.excluding('constant_folding') self.mode = mode_with_gpu.excluding('constant_folding')
self.join_op = GpuJoin self.join_op = GpuJoin()
self.split_op = GpuSplit self.split_op = GpuSplit
# Use join instead of MakeVector since there is no MakeVector on GPU # Use join instead of MakeVector since there is no MakeVector on GPU
self.make_vector_op = GpuJoin self.make_vector_op = GpuJoin
......
...@@ -2099,7 +2099,7 @@ class TestAlloc(unittest.TestCase): ...@@ -2099,7 +2099,7 @@ class TestAlloc(unittest.TestCase):
dtype = config.floatX dtype = config.floatX
mode = mode_opt mode = mode_opt
shared = staticmethod(theano.shared) shared = staticmethod(theano.shared)
allocs = [tensor.Alloc] * 3 allocs = [tensor.Alloc()] * 3
def setUp(self): def setUp(self):
self.rng = numpy.random.RandomState(seed=utt.fetch_seed()) self.rng = numpy.random.RandomState(seed=utt.fetch_seed())
...@@ -2131,13 +2131,13 @@ class TestAlloc(unittest.TestCase): ...@@ -2131,13 +2131,13 @@ class TestAlloc(unittest.TestCase):
#<= is needed as the GPU currently don't implement #<= is needed as the GPU currently don't implement
# AdvancedIncSubtensor. When this is the case it can be # AdvancedIncSubtensor. When this is the case it can be
# replaced with ==. # replaced with ==.
assert numpy.sum([isinstance(node.op, alloc) assert numpy.sum([isinstance(node.op, type(alloc))
for node in topo_obj]) <= 1 for node in topo_obj]) <= 1
topo_grad = fgrad.maker.fgraph.toposort() topo_grad = fgrad.maker.fgraph.toposort()
# print subtensor # print subtensor
# theano.printing.debugprint(fgrad) # theano.printing.debugprint(fgrad)
assert numpy.sum([isinstance(node.op, alloc) assert numpy.sum([isinstance(node.op, type(alloc))
for node in topo_grad]) == n_alloc, ( for node in topo_grad]) == n_alloc, (
alloc, subtensor, n_alloc, topo_grad) alloc, subtensor, n_alloc, topo_grad)
fobj(test_params) fobj(test_params)
...@@ -2148,46 +2148,51 @@ class TestAlloc(unittest.TestCase): ...@@ -2148,46 +2148,51 @@ class TestAlloc(unittest.TestCase):
for alloc in self.allocs: for alloc in self.allocs:
# The output is the result of the alloc operation, # The output is the result of the alloc operation,
# we do not want it to be constant-folded # we do not want it to be constant-folded
out = alloc()(val, 50, 60) out = alloc(val, 50, 60)
f = theano.function([], out) f = theano.function([], out, mode=self.mode)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert numpy.sum([isinstance(node.op, alloc) assert numpy.sum([isinstance(node.op, type(alloc))
for node in topo]) == 1 for node in topo]) == 1
assert not isinstance(topo[0].op, DeepCopyOp) assert not isinstance(topo[0].op, DeepCopyOp)
def test_ones(self): def test_ones(self):
for shp in [[], 1, [1], [1, 2], [1, 2, 3]]: for shp in [[], 1, [1], [1, 2], [1, 2, 3]]:
ones = theano.function([], [tensor.ones(shp)]) ones = theano.function([], [tensor.ones(shp)], mode=self.mode)
assert numpy.allclose(ones(), numpy.ones(shp)) assert numpy.allclose(ones(), numpy.ones(shp))
# scalar doesn't have to be provided as input # scalar doesn't have to be provided as input
x = scalar() x = scalar()
shp = [] shp = []
ones_scalar = theano.function([], [tensor.ones(x.shape)]) ones_scalar = theano.function([], [tensor.ones(x.shape)],
mode=self.mode)
assert numpy.allclose(ones_scalar(), numpy.ones(shp)) assert numpy.allclose(ones_scalar(), numpy.ones(shp))
for (typ, shp) in [(vector, [3]), (matrix, [3, 4])]: for (typ, shp) in [(vector, [3]), (matrix, [3, 4])]:
x = typ() x = typ()
ones_tensor = theano.function([x], [tensor.ones(x.shape)]) ones_tensor = theano.function([x], [tensor.ones(x.shape)],
mode=self.mode)
inp = numpy.zeros(shp, dtype=config.floatX) inp = numpy.zeros(shp, dtype=config.floatX)
assert numpy.allclose(ones_tensor(inp), assert numpy.allclose(ones_tensor(inp),
numpy.ones(shp)) numpy.ones(shp))
def test_zeros(self): def test_zeros(self):
for shp in [[], 1, [1], [1, 2], [1, 2, 3]]: for shp in [[], 1, [1], [1, 2], [1, 2, 3]]:
zeros = theano.function([], [tensor.zeros(shp)]) zeros = theano.function([], [tensor.zeros(shp)],
mode=self.mode)
assert numpy.allclose(zeros(), numpy.zeros(shp)) assert numpy.allclose(zeros(), numpy.zeros(shp))
# scalar doesn't have to be provided as input # scalar doesn't have to be provided as input
x = scalar() x = scalar()
shp = [] shp = []
zeros_scalar = theano.function([], [tensor.zeros(x.shape)]) zeros_scalar = theano.function([], [tensor.zeros(x.shape)],
mode=self.mode)
assert numpy.allclose(zeros_scalar(), numpy.zeros(shp)) assert numpy.allclose(zeros_scalar(), numpy.zeros(shp))
for (typ, shp) in [(vector, [3]), (matrix, [3, 4])]: for (typ, shp) in [(vector, [3]), (matrix, [3, 4])]:
x = typ() x = typ()
zeros_tensor = theano.function([x], [tensor.zeros(x.shape)]) zeros_tensor = theano.function([x], [tensor.zeros(x.shape)],
mode=self.mode)
inp = numpy.zeros(shp, dtype=config.floatX) inp = numpy.zeros(shp, dtype=config.floatX)
assert numpy.allclose(zeros_tensor(inp), assert numpy.allclose(zeros_tensor(inp),
numpy.zeros(shp)) numpy.zeros(shp))
...@@ -3187,7 +3192,7 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3187,7 +3192,7 @@ class T_Join_and_Split(unittest.TestCase):
self.mode = theano.compile.get_default_mode().excluding( self.mode = theano.compile.get_default_mode().excluding(
'constant_folding' 'constant_folding'
) )
self.join_op = Join self.join_op = Join()
self.split_op = Split self.split_op = Split
self.make_vector_op = opt.MakeVector self.make_vector_op = opt.MakeVector
self.floatX = config.floatX self.floatX = config.floatX
...@@ -3199,7 +3204,8 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3199,7 +3204,8 @@ class T_Join_and_Split(unittest.TestCase):
def eval_outputs_and_check_join(self, outputs): def eval_outputs_and_check_join(self, outputs):
f = theano.function([], outputs, self.mode) f = theano.function([], outputs, self.mode)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert [True for node in topo if isinstance(node.op, self.join_op)] assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
variables = f() variables = f()
if isinstance(variables, (tuple, list)) and len(variables) == 1: if isinstance(variables, (tuple, list)) and len(variables) == 1:
return variables[0] return variables[0]
...@@ -3259,7 +3265,7 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3259,7 +3265,7 @@ class T_Join_and_Split(unittest.TestCase):
self.assertTrue(numpy.all(val == [1, 2, 1, 2])) self.assertTrue(numpy.all(val == [1, 2, 1, 2]))
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0 assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0
assert len([n for n in topo if isinstance(n, self.join_op)]) == 0 assert len([n for n in topo if isinstance(n, type(self.join_op))]) == 0
assert f.maker.fgraph.outputs[0].dtype == self.floatX assert f.maker.fgraph.outputs[0].dtype == self.floatX
def test_stack_scalar_make_vector_dtype(self): def test_stack_scalar_make_vector_dtype(self):
...@@ -3273,7 +3279,7 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3273,7 +3279,7 @@ class T_Join_and_Split(unittest.TestCase):
self.assertTrue(numpy.all(val == [1, 2, 1, 2])) self.assertTrue(numpy.all(val == [1, 2, 1, 2]))
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0 assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0
assert len([n for n in topo if isinstance(n, self.join_op)]) == 0 assert len([n for n in topo if isinstance(n, type(self.join_op))]) == 0
assert f.maker.fgraph.outputs[0].dtype == 'int64' assert f.maker.fgraph.outputs[0].dtype == 'int64'
def test_stack_scalar_make_vector_constant(self): def test_stack_scalar_make_vector_constant(self):
...@@ -3289,7 +3295,7 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3289,7 +3295,7 @@ class T_Join_and_Split(unittest.TestCase):
self.assertTrue(numpy.all(val == [10, 1, 2, 3])) self.assertTrue(numpy.all(val == [10, 1, 2, 3]))
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0 assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0
assert len([n for n in topo if isinstance(n, self.join_op)]) == 0 assert len([n for n in topo if isinstance(n, type(self.join_op))]) == 0
assert f.maker.fgraph.outputs[0].dtype == 'int64' assert f.maker.fgraph.outputs[0].dtype == 'int64'
def test_stack_hessian(self): def test_stack_hessian(self):
...@@ -3459,8 +3465,8 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3459,8 +3465,8 @@ class T_Join_and_Split(unittest.TestCase):
out = self.eval_outputs_and_check_join([s]) out = self.eval_outputs_and_check_join([s])
self.assertTrue((out == want).all()) self.assertTrue((out == want).all())
assert (grad(s.sum(), b).eval() == 0).all() assert (numpy.asarray(grad(s.sum(), b).eval()) == 0).all()
assert (grad(s.sum(), a).eval() == 0).all() assert (numpy.asarray(grad(s.sum(), a).eval()) == 0).all()
def test_join_matrix1_using_vertical_stack(self): def test_join_matrix1_using_vertical_stack(self):
a = self.shared(numpy.array([[1, 2, 3], [4, 5, 6]], dtype=self.floatX)) a = self.shared(numpy.array([[1, 2, 3], [4, 5, 6]], dtype=self.floatX))
...@@ -3499,7 +3505,8 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3499,7 +3505,8 @@ class T_Join_and_Split(unittest.TestCase):
f = inplace_func([ax], [s], mode=self.mode) f = inplace_func([ax], [s], mode=self.mode)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert [True for node in topo if isinstance(node.op, self.join_op)] assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
want = numpy.array([[.1, .2, .3], [.4, .5, .6], want = numpy.array([[.1, .2, .3], [.4, .5, .6],
[.1, .2, .3], [.4, .5, .6]]) [.1, .2, .3], [.4, .5, .6]])
...@@ -3540,17 +3547,17 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3540,17 +3547,17 @@ class T_Join_and_Split(unittest.TestCase):
a = self.shared(a_val, broadcastable=(False, False, True)) a = self.shared(a_val, broadcastable=(False, False, True))
b = self.shared(b_val, broadcastable=(True, False, True)) b = self.shared(b_val, broadcastable=(True, False, True))
c = self.join_op()(1, a, b) c = self.join_op(1, a, b)
assert c.type.broadcastable[0] and c.type.broadcastable[2] assert c.type.broadcastable[0] and c.type.broadcastable[2]
assert not c.type.broadcastable[1] assert not c.type.broadcastable[1]
# Opt can remplace the int by a Theano constant # Opt can remplace the int by a Theano constant
c = self.join_op()(theano.tensor.constant(1), a, b) c = self.join_op(theano.tensor.constant(1), a, b)
assert c.type.broadcastable[0] and c.type.broadcastable[2] assert c.type.broadcastable[0] and c.type.broadcastable[2]
assert not c.type.broadcastable[1] assert not c.type.broadcastable[1]
# In case futur opt insert other useless stuff # In case futur opt insert other useless stuff
c = self.join_op()(theano.tensor.cast(theano.tensor.constant(1), c = self.join_op(theano.tensor.cast(theano.tensor.constant(1),
dtype="int32"), dtype="int32"),
a, b) a, b)
assert c.type.broadcastable[0] and c.type.broadcastable[2] assert c.type.broadcastable[0] and c.type.broadcastable[2]
...@@ -3558,7 +3565,8 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3558,7 +3565,8 @@ class T_Join_and_Split(unittest.TestCase):
f = function([], c, mode=self.mode) f = function([], c, mode=self.mode)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert [True for node in topo if isinstance(node.op, self.join_op)] assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
f() f()
utt.verify_grad((lambda a, b: join(1, a, b)), [a_val, b_val], rng=rng, utt.verify_grad((lambda a, b: join(1, a, b)), [a_val, b_val], rng=rng,
...@@ -3580,12 +3588,13 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3580,12 +3588,13 @@ class T_Join_and_Split(unittest.TestCase):
a = self.shared(a_val, broadcastable=(False, False, True)) a = self.shared(a_val, broadcastable=(False, False, True))
b = self.shared(b_val, broadcastable=(True, False, True)) b = self.shared(b_val, broadcastable=(True, False, True))
c = self.join_op()(0, a, b) c = self.join_op(0, a, b)
assert not c.type.broadcastable[0] assert not c.type.broadcastable[0]
f = function([], c, mode=self.mode) f = function([], c, mode=self.mode)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert [True for node in topo if isinstance(node.op, self.join_op)] assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
f() f()
utt.verify_grad((lambda a, b: join(0, a, b)), [a_val, b_val], rng=rng, utt.verify_grad((lambda a, b: join(0, a, b)), [a_val, b_val], rng=rng,
...@@ -3596,7 +3605,7 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3596,7 +3605,7 @@ class T_Join_and_Split(unittest.TestCase):
rng.rand(3, 4, 1).astype(self.floatX)) rng.rand(3, 4, 1).astype(self.floatX))
a = TensorType(dtype=self.floatX, broadcastable=[0, 0, 1])() a = TensorType(dtype=self.floatX, broadcastable=[0, 0, 1])()
b = TensorType(dtype=self.floatX, broadcastable=[1, 0, 1])() b = TensorType(dtype=self.floatX, broadcastable=[1, 0, 1])()
c = join(0, a, b) c = self.join_op(0, a, b)
f = function([a, b], c, mode=self.mode) f = function([a, b], c, mode=self.mode)
bad_b_val = rng.rand(3, 4, 1).astype(self.floatX) bad_b_val = rng.rand(3, 4, 1).astype(self.floatX)
self.assertRaises(TypeError, f, a_val, bad_b_val) self.assertRaises(TypeError, f, a_val, bad_b_val)
...@@ -3613,12 +3622,13 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3613,12 +3622,13 @@ class T_Join_and_Split(unittest.TestCase):
a = self.shared(a_val, broadcastable=(True, False, True)) a = self.shared(a_val, broadcastable=(True, False, True))
b = self.shared(b_val, broadcastable=(True, False, True)) b = self.shared(b_val, broadcastable=(True, False, True))
c = self.join_op()(0, a, b) c = self.join_op(0, a, b)
assert not c.type.broadcastable[0] assert not c.type.broadcastable[0]
f = function([], c, mode=self.mode) f = function([], c, mode=self.mode)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert [True for node in topo if isinstance(node.op, self.join_op)] assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
f() f()
utt.verify_grad((lambda a, b: join(0, a, b)), [a_val, b_val], rng=rng, utt.verify_grad((lambda a, b: join(0, a, b)), [a_val, b_val], rng=rng,
...@@ -3630,7 +3640,7 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3630,7 +3640,7 @@ class T_Join_and_Split(unittest.TestCase):
rng = numpy.random.RandomState(seed=utt.fetch_seed()) rng = numpy.random.RandomState(seed=utt.fetch_seed())
a_val = rng.rand(1, 4, 1).astype(self.floatX) a_val = rng.rand(1, 4, 1).astype(self.floatX)
a = self.shared(a_val, broadcastable=(True, False, True)) a = self.shared(a_val, broadcastable=(True, False, True))
b = self.join_op()(0, a) b = self.join_op(0, a)
assert b.type.broadcastable[0] assert b.type.broadcastable[0]
assert b.type.broadcastable[2] assert b.type.broadcastable[2]
assert not b.type.broadcastable[1] assert not b.type.broadcastable[1]
...@@ -3638,8 +3648,8 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3638,8 +3648,8 @@ class T_Join_and_Split(unittest.TestCase):
f = function([], b, mode=self.mode) f = function([], b, mode=self.mode)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
if theano.config.mode != 'FAST_COMPILE': if theano.config.mode != 'FAST_COMPILE':
assert not [True for node in topo if isinstance( assert not [True for node in topo
node.op, self.join_op)] if isinstance(node.op, type(self.join_op))]
f() f()
utt.verify_grad((lambda a: join(0, a)), [a_val], rng=rng, utt.verify_grad((lambda a: join(0, a)), [a_val], rng=rng,
...@@ -3657,19 +3667,20 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3657,19 +3667,20 @@ class T_Join_and_Split(unittest.TestCase):
c = TensorType(dtype=self.floatX, broadcastable=[1, 0, 0, 0, 0, 0])() c = TensorType(dtype=self.floatX, broadcastable=[1, 0, 0, 0, 0, 0])()
d = TensorType(dtype=self.floatX, broadcastable=[1, 0, 1, 1, 0, 1])() d = TensorType(dtype=self.floatX, broadcastable=[1, 0, 1, 1, 0, 1])()
e = TensorType(dtype=self.floatX, broadcastable=[1, 0, 1, 0, 0, 1])() e = TensorType(dtype=self.floatX, broadcastable=[1, 0, 1, 0, 0, 1])()
f = join(0, a, b, c, d, e) f = self.join_op(0, a, b, c, d, e)
fb = f.type.broadcastable fb = f.type.broadcastable
assert not fb[0] and fb[1] and fb[2] and fb[3] and not fb[4] and fb[5] assert not fb[0] and fb[1] and fb[2] and fb[3] and not fb[4] and fb[5]
g = join(1, a, b, c, d, e) g = self.join_op(1, a, b, c, d, e)
gb = g.type.broadcastable gb = g.type.broadcastable
assert gb[0] and not gb[1] and gb[2] and gb[3] and not gb[4] and gb[5] assert gb[0] and not gb[1] and gb[2] and gb[3] and not gb[4] and gb[5]
h = join(4, a, b, c, d, e) h = self.join_op(4, a, b, c, d, e)
hb = h.type.broadcastable hb = h.type.broadcastable
assert hb[0] and hb[1] and hb[2] and hb[3] and not hb[4] and hb[5] assert hb[0] and hb[1] and hb[2] and hb[3] and not hb[4] and hb[5]
f = function([a, b, c, d, e], f, mode=self.mode) f = function([a, b, c, d, e], f, mode=self.mode)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
assert [True for node in topo if isinstance(node.op, self.join_op)] assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
rng = numpy.random.RandomState(seed=utt.fetch_seed()) rng = numpy.random.RandomState(seed=utt.fetch_seed())
a_val = rng.rand(1, 1, 1, 1, 2, 1).astype(self.floatX) a_val = rng.rand(1, 1, 1, 1, 2, 1).astype(self.floatX)
...@@ -3710,7 +3721,7 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3710,7 +3721,7 @@ class T_Join_and_Split(unittest.TestCase):
dtype=self.floatX) dtype=self.floatX)
# Test dim 0 # Test dim 0
z = join(0, x1, x2, x3) z = self.join_op(0, x1, x2, x3)
f = theano.function([x1, x2, x3], z.shape, mode=self.mode) f = theano.function([x1, x2, x3], z.shape, mode=self.mode)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
...@@ -3719,10 +3730,10 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3719,10 +3730,10 @@ class T_Join_and_Split(unittest.TestCase):
if theano.config.mode != 'FAST_COMPILE': if theano.config.mode != 'FAST_COMPILE':
for node in f.maker.fgraph.toposort(): for node in f.maker.fgraph.toposort():
assert not isinstance(node.op, tensor.Join) assert not isinstance(node.op, type(self.join_op))
# Test dim 1 # Test dim 1
z = join(1, x1, x2, x3) z = self.join_op(1, x1, x2, x3)
f = theano.function([x1, x2, x3], z.shape, mode=self.mode) f = theano.function([x1, x2, x3], z.shape, mode=self.mode)
topo = f.maker.fgraph.toposort() topo = f.maker.fgraph.toposort()
...@@ -3731,7 +3742,7 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3731,7 +3742,7 @@ class T_Join_and_Split(unittest.TestCase):
if theano.config.mode != 'FAST_COMPILE': if theano.config.mode != 'FAST_COMPILE':
for node in topo: for node in topo:
assert not isinstance(node.op, tensor.Join) assert not isinstance(node.op, type(self.join_op))
# Test hide error # Test hide error
if not self.hide_error: if not self.hide_error:
...@@ -3757,8 +3768,8 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3757,8 +3768,8 @@ class T_Join_and_Split(unittest.TestCase):
f = function([], Tout, mode=self.mode) f = function([], Tout, mode=self.mode)
out = f() out = f()
if theano.config.mode != 'FAST_COMPILE': if theano.config.mode != 'FAST_COMPILE':
assert [True for node in f.maker.fgraph.toposort() if isinstance( assert [True for node in f.maker.fgraph.toposort()
node.op, self.join_op)] if isinstance(node.op, type(self.join_op))]
assert numpy.allclose(out, assert numpy.allclose(out,
numpy.concatenate([T_shared.get_value(), numpy.concatenate([T_shared.get_value(),
T_shared.get_value()])) T_shared.get_value()]))
...@@ -3767,7 +3778,7 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -3767,7 +3778,7 @@ class T_Join_and_Split(unittest.TestCase):
rng = numpy.random.RandomState(seed=utt.fetch_seed()) rng = numpy.random.RandomState(seed=utt.fetch_seed())
v = self.shared(rng.rand(4).astype(self.floatX)) v = self.shared(rng.rand(4).astype(self.floatX))
m = self.shared(rng.rand(4, 4).astype(self.floatX)) m = self.shared(rng.rand(4, 4).astype(self.floatX))
self.assertRaises(TypeError, self.join_op(), 0, v, m) self.assertRaises(TypeError, self.join_op, 0, v, m)
def test_split_0elem(self): def test_split_0elem(self):
rng = numpy.random.RandomState(seed=utt.fetch_seed()) rng = numpy.random.RandomState(seed=utt.fetch_seed())
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论