提交 15cc9076 authored 作者: nouiz's avatar nouiz

Merge pull request #5 from dwf/nouiz_join_test

pep8 + missing imports
......@@ -653,7 +653,7 @@ def test_gpujoin_assert_cndas():
a = theano.shared(_a)
try:
c = cuda.basic_ops.gpu_join(1,a)
c = cuda.basic_ops.gpu_join(1, a)
# can't "assert False" here, as we want the assertion
# error from gpu_join
except AssertionError:
......@@ -733,7 +733,7 @@ class T_Join_and_Split(theano.tensor.tests.test_basic.T_Join_and_Split):
self.make_vector_op = cuda.GpuJoin
self.floatX = "float32"
# In FAST_COMPILE mode, we force the FAST_RUN mode for optimization.
self.hide_error = not theano.config.mode in ['DebugMode', 'DEBUG_MODE']
self.hide_error = theano.config.mode not in ['DebugMode', 'DEBUG_MODE']
self.shared = cuda.shared_constructor
......
......@@ -13,7 +13,7 @@ from numpy.testing import dec
from numpy.testing.noseclasses import KnownFailureTest
import theano
from theano import compile, config, function, gof, tensor
from theano import compile, config, function, gof, tensor, shared
from theano.compile.mode import get_default_mode
from theano.gof.python25 import any, all, combinations
from theano.tensor import (_shared, wvector, bvector, autocast_float_as,
......@@ -30,7 +30,7 @@ from theano.tensor import (_shared, wvector, bvector, autocast_float_as,
var, value, Join, shape, MaxAndArgmax, lscalar, zvector, exp,
get_constant_value, ivector, reshape, scalar_from_tensor, scal,
iscalars, arange, dscalars, fvector, imatrix, numeric_grad,
opt, ComplexError, TensorDot, lvector, true_div, max, min)
opt, ComplexError, TensorDot, lvector, true_div, max, min, Split)
from theano.tests import unittest_tools as utt
......@@ -56,7 +56,7 @@ def inplace_func(inputs, outputs, mode=None, allow_input_downcast=False):
def eval_outputs(outputs):
variables = inplace_func([], outputs)()
if isinstance(variables,(tuple,list)) and len(variables) == 1:
if isinstance(variables,(tuple, list)) and len(variables) == 1:
return variables[0]
return variables
......@@ -2536,12 +2536,16 @@ class T_Join_and_Split(unittest.TestCase):
def setUp(self):
Join.debug = False
utt.seed_rng()
self.mode = theano.compile.get_default_mode().excluding('constant_folding')
self.mode = theano.compile.get_default_mode().excluding(
'constant_folding'
)
self.join_op = Join
self.split_op = Split
self.make_vector_op = opt.MakeVector
self.floatX = config.floatX
self.hide_error = not theano.config.mode in ['DebugMode', 'DEBUG_MODE', 'FAST_COMPILE']
self.hide_error = theano.config.mode not in ['DebugMode',
'DEBUG_MODE',
'FAST_COMPILE']
self.shared = shared
def eval_outputs_and_check_join(self, outputs):
......@@ -2549,19 +2553,19 @@ class T_Join_and_Split(unittest.TestCase):
topo = f.maker.env.toposort()
assert [True for node in topo if isinstance(node.op, self.join_op)]
variables = f()
if isinstance(variables,(tuple,list)) and len(variables) == 1:
if isinstance(variables, (tuple, list)) and len(variables) == 1:
return variables[0]
return variables
def eval_outputs_and_check_vector(self, outputs,
make_vector_op = None):
make_vector_op=None):
if make_vector_op is None:
make_vector_op = self.make_vector_op
f = theano.function([], outputs, self.mode)
topo = f.maker.env.toposort()
assert [True for node in topo if isinstance(node.op, make_vector_op)]
variables = f()
if isinstance(variables,(tuple,list)) and len(variables) == 1:
if isinstance(variables, (tuple, list)) and len(variables) == 1:
return variables[0]
return variables
......@@ -2580,8 +2584,6 @@ class T_Join_and_Split(unittest.TestCase):
b = as_tensor_variable(2.0)
c = shared(numpy.asarray(3.0, dtype=self.floatX))
s = stack(a, b, c)
want = numpy.array([1, 2, 3])
out = self.eval_outputs_and_check_vector([s], opt.MakeVector)
self.assertTrue((out == want).all())
......@@ -2597,15 +2599,16 @@ class T_Join_and_Split(unittest.TestCase):
self.assertTrue((out == want).all())
def test_stack_scalar_make_vector(self):
'''Test that calling stack() on scalars instantiates MakeVector,
not Join. Test that the floatX dtype stay floatX, not downcasted to int64'''
"""Test that calling stack() on scalars instantiates MakeVector,
not Join. Test that the floatX dtype stay floatX, not downcasted
to int64"""
a = tensor.scalar('a', dtype=self.floatX)
b = tensor.scalar('b', dtype=self.floatX)
s = stack(a, b, a, b)
f = function([a,b], s, mode=self.mode)
val = f(1,2)
f = function([a, b], s, mode=self.mode)
val = f(1, 2)
print val
self.assertTrue(numpy.all(val == [1,2,1,2]))
self.assertTrue(numpy.all(val == [1, 2, 1, 2]))
topo = f.maker.env.toposort()
assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0
assert len([n for n in topo if isinstance(n, self.join_op)]) == 0
......@@ -2617,9 +2620,9 @@ class T_Join_and_Split(unittest.TestCase):
a = tensor.iscalar('a')
b = tensor.lscalar('b')
s = stack(a, b, a, b)
f = function([a,b], s, mode=self.mode)
val = f(1,2)
self.assertTrue(numpy.all(val == [1,2,1,2]))
f = function([a, b], s, mode=self.mode)
val = f(1, 2)
self.assertTrue(numpy.all(val == [1, 2, 1, 2]))
topo = f.maker.env.toposort()
assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0
assert len([n for n in topo if isinstance(n, self.join_op)]) == 0
......@@ -2632,12 +2635,12 @@ class T_Join_and_Split(unittest.TestCase):
b = tensor.lscalar('b')
#test when the constant is the first element.
#The first element is used in a special way
s = stack(10,a,b, numpy.int8(3))
f = function([a,b], s, mode=self.mode)
val = f(1,2)
self.assertTrue(numpy.all(val == [10,1,2,3]))
s = stack(10, a, b, numpy.int8(3))
f = function([a, b], s, mode=self.mode)
val = f(1, 2)
self.assertTrue(numpy.all(val == [10, 1, 2, 3]))
topo = f.maker.env.toposort()
assert len([n for n in topo if isinstance(n.op,opt.MakeVector)]) > 0
assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0
assert len([n for n in topo if isinstance(n, self.join_op)]) == 0
assert f.maker.env.outputs[0].dtype == 'int64'
......@@ -2649,8 +2652,8 @@ class T_Join_and_Split(unittest.TestCase):
f = theano.function(inputs=[m], outputs=[c],
mode=self.mode.including('local_join_1'))
topo = f.maker.env.toposort()
assert len(topo)==1
assert isinstance(topo[0].op,theano.compile.DeepCopyOp)
assert len(topo) == 1
assert isinstance(topo[0].op, theano.compile.DeepCopyOp)
def test_join_vector(self):
a = self.shared(numpy.array([1, 2, 3], dtype=self.floatX))
......@@ -2666,7 +2669,7 @@ class T_Join_and_Split(unittest.TestCase):
b = as_tensor_variable(numpy.array([7, 8, 9], dtype=self.floatX))
s = stack(a, b)
want = numpy.array([[1, 2, 3],[ 7, 8, 9]])
want = numpy.array([[1, 2, 3], [7, 8, 9]])
out = self.eval_outputs_and_check_join([s])
self.assertTrue((out == want).all())
......@@ -2676,13 +2679,13 @@ class T_Join_and_Split(unittest.TestCase):
b = as_tensor_variable(numpy.array([[7, 8, 9]], dtype=self.floatX))
s = join(0, a, b)
want = numpy.array([[1, 2, 3],[4,5,6],[7, 8, 9]])
want = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
out = self.eval_outputs_and_check_join([s])
self.assertTrue((out == want).all())
def test_join_matrix1(self):
av = numpy.array([[1, 2, 3], [4, 5, 6]], dtype='float32')
bv = numpy.array([[7], [8]],dtype='float32')
bv = numpy.array([[7], [8]], dtype='float32')
a = self.shared(av)
b = as_tensor_variable(bv)
s = join(1, a, b)
......@@ -2691,7 +2694,8 @@ class T_Join_and_Split(unittest.TestCase):
self.assertTrue((out == want).all())
# assert tensor.grad(join(1,a,b), a
utt.verify_grad(lambda a, b: join(1,a,b), [av, bv], eps=1.0e-4, rel_tol=1.0e-3)
utt.verify_grad(lambda a, b: join(1, a, b), [av, bv],
eps=1.0e-4, rel_tol=1.0e-3)
def test_join_matrix1_using_vertical_stack(self):
a = self.shared(numpy.array([[1, 2, 3], [4, 5, 6]], dtype=self.floatX))
......@@ -2699,23 +2703,25 @@ class T_Join_and_Split(unittest.TestCase):
c = as_tensor_variable(numpy.array([[9, 8, 7]], dtype=self.floatX))
s = vertical_stack(a, b, c)
want = numpy.array([[1, 2, 3],[4,5,6],[7, 8, 9], [9, 8, 7]])
want = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [9, 8, 7]])
out = self.eval_outputs_and_check_join([s])
self.assertTrue((out == want).all())
def test_join_matrix1_using_horizontal_stack(self):
av=numpy.array([[1, 2, 3], [4, 5, 6]], dtype='float32')
bv=numpy.array([[7], [8]],dtype='float32')
cv=numpy.array([[3, 2, 1], [6, 5, 4]], dtype='float32')
av = numpy.array([[1, 2, 3], [4, 5, 6]], dtype='float32')
bv = numpy.array([[7], [8]], dtype='float32')
cv = numpy.array([[3, 2, 1], [6, 5, 4]], dtype='float32')
a = self.shared(av)
b = as_tensor_variable(bv)
c = as_tensor_variable(cv)
s = horizontal_stack(a, b, c)
want = numpy.array([[1, 2, 3, 7, 3, 2, 1], [4, 5, 6, 8, 6, 5, 4]], dtype='float32')
want = numpy.array([[1, 2, 3, 7, 3, 2, 1], [4, 5, 6, 8, 6, 5, 4]],
dtype='float32')
out = self.eval_outputs_and_check_join([s])
self.assertTrue((out == want).all())
utt.verify_grad(lambda a, b: join(1,a,b), [av, bv], eps=1.0e-4, rel_tol=1.0e-3)
utt.verify_grad(lambda a, b: join(1, a, b), [av, bv],
eps=1.0e-4, rel_tol=1.0e-3)
def test_join_matrixV(self):
"""variable join axis"""
......@@ -2729,16 +2735,16 @@ class T_Join_and_Split(unittest.TestCase):
topo = f.maker.env.toposort()
assert [True for node in topo if isinstance(node.op, self.join_op)]
want = numpy.array([[1, 2, 3], [4, 5, 6] ,[1, 2, 3], [4, 5, 6]])
want = numpy.array([[1, 2, 3], [4, 5, 6], [1, 2, 3], [4, 5, 6]])
got = f(0)
self.assertTrue((got == want).all(), (got, want))
want = numpy.array([[ 1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, 6]])
want = numpy.array([[1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, 6]])
got = f(1)
self.assertTrue((got == want).all(), (got, want))
utt.verify_grad(lambda a, b: join(0,a,b), [v, 2*v])
utt.verify_grad(lambda a, b: join(1,a,b), [v, 2*v])
utt.verify_grad(lambda a, b: join(0, a, b), [v, 2 * v])
utt.verify_grad(lambda a, b: join(1, a, b), [v, 2 * v])
def test_vector_len(self):
x = lscalar('x')
......@@ -2747,8 +2753,8 @@ class T_Join_and_Split(unittest.TestCase):
triple = as_tensor_variable((x, y, 9.0))
assert 3 == get_vector_length(triple)
a,b,c = triple
f = function([x,y], [b,c,a], mode=self.mode)
a, b, c = triple
f = function([x, y], [b, c, a], mode=self.mode)
topo = f.maker.env.toposort()
assert [True for node in topo if isinstance(node.op, opt.MakeVector)]
......@@ -2776,7 +2782,8 @@ class T_Join_and_Split(unittest.TestCase):
assert not c.type.broadcastable[1]
# In case futur opt insert other useless stuff
c = self.join_op()(theano.tensor.cast(theano.tensor.constant(1), dtype="int32"),
c = self.join_op()(theano.tensor.cast(theano.tensor.constant(1),
dtype="int32"),
a, b)
assert c.type.broadcastable[0] and c.type.broadcastable[2]
assert not c.type.broadcastable[1]
......@@ -2786,7 +2793,7 @@ class T_Join_and_Split(unittest.TestCase):
assert [True for node in topo if isinstance(node.op, self.join_op)]
f()
utt.verify_grad((lambda a,b: join(1,a,b)), [a_val, b_val], rng=rng)
utt.verify_grad((lambda a, b: join(1, a, b)), [a_val, b_val], rng=rng)
# Should raise an error if dimension 0 does not match
a.set_value(rng.rand(2, 4, 1).astype(self.floatX))
......@@ -2812,7 +2819,7 @@ class T_Join_and_Split(unittest.TestCase):
assert [True for node in topo if isinstance(node.op, self.join_op)]
f()
utt.verify_grad((lambda a,b: join(0,a,b)), [a_val, b_val], rng=rng)
utt.verify_grad((lambda a, b: join(0, a, b)), [a_val, b_val], rng=rng)
# Should raise an error if b_val.shape[0] is not 1
# We can't set the value|
self.assertRaises(TypeError, b.set_value,
......@@ -2844,7 +2851,7 @@ class T_Join_and_Split(unittest.TestCase):
assert [True for node in topo if isinstance(node.op, self.join_op)]
f()
utt.verify_grad((lambda a,b: join(0,a,b)), [a_val, b_val], rng=rng)
utt.verify_grad((lambda a, b: join(0, a, b)), [a_val, b_val], rng=rng)
def test_broadcastable_single_input_broadcastable_dimension(self):
"""
......@@ -2865,7 +2872,7 @@ class T_Join_and_Split(unittest.TestCase):
assert not [True for node in topo if isinstance(node.op, self.join_op)]
f()
utt.verify_grad((lambda a: join(0,a)), [a_val], rng=rng)
utt.verify_grad((lambda a: join(0, a)), [a_val], rng=rng)
# Should raise an error if length of dimension 0 is not 1
self.assertRaises(TypeError, a.set_value,
rng.rand(2, 4, 1).astype(self.floatX))
......@@ -2891,7 +2898,7 @@ class T_Join_and_Split(unittest.TestCase):
hb = h.type.broadcastable
assert hb[0] and hb[1] and hb[2] and hb[3] and not hb[4] and hb[5]
f = function([a,b,c,d,e], f, mode=self.mode)
f = function([a, b, c, d, e], f, mode=self.mode)
topo = f.maker.env.toposort()
assert [True for node in topo if isinstance(node.op, self.join_op)]
......@@ -2902,8 +2909,8 @@ class T_Join_and_Split(unittest.TestCase):
d_val = rng.rand(1, 1, 1, 1, 2, 1).astype(self.floatX)
e_val = rng.rand(1, 1, 1, 1, 2, 1).astype(self.floatX)
f(a_val, b_val, c_val, d_val, e_val)
utt.verify_grad((lambda a,b,c,d,e: join(0,a,b,c,d,e)),
[a_val, b_val, c_val, d_val, e_val], rng=rng)
utt.verify_grad((lambda a, b, c, d, e: join(0, a, b, c, d, e)),
[a_val, b_val, c_val, d_val, e_val], rng=rng)
# Should raise an error if length of dimension 0 is not 1
bad_val = rng.rand(2, 1, 1, 1, 2, 1).astype(self.floatX)
self.assertRaises(TypeError, g, bad_val, b_val, c_val, d_val, e_val)
......@@ -2928,29 +2935,29 @@ class T_Join_and_Split(unittest.TestCase):
x2 = matrix()
x3 = matrix()
def get_mat(s1,s2):
return numpy.asarray( numpy.random.uniform(size=(s1,s2)),
dtype= self.floatX)
def get_mat(s1, s2):
return numpy.asarray(numpy.random.uniform(size=(s1, s2)),
dtype=self.floatX)
# Test dim 0
z = join(0,x1,x2,x3)
f = theano.function([x1,x2,x3], z.shape, mode=self.mode)
z = join(0, x1, x2, x3)
f = theano.function([x1, x2, x3], z.shape, mode=self.mode)
topo = f.maker.env.toposort()
out = f( get_mat(3,4), get_mat(2,4), get_mat(1,4))
assert (out == [6,4]).all()
out = f(get_mat(3, 4), get_mat(2, 4), get_mat(1, 4))
assert (out == [6, 4]).all()
if theano.config.mode != 'FAST_COMPILE':
for node in f.maker.env.toposort():
assert not isinstance(node.op, tensor.Join)
# Test dim 1
z = join(1,x1,x2,x3)
f = theano.function([x1,x2,x3], z.shape, mode=self.mode)
z = join(1, x1, x2, x3)
f = theano.function([x1, x2, x3], z.shape, mode=self.mode)
topo = f.maker.env.toposort()
out = f( get_mat(3,4), get_mat(3,4), get_mat(3,5))
assert (out == [3,13]).all()
out = f( get_mat(3, 4), get_mat(3, 4), get_mat(3, 5))
assert (out == [3, 13]).all()
if theano.config.mode != 'FAST_COMPILE':
for node in f.maker.env.toposort():
......@@ -2958,10 +2965,10 @@ class T_Join_and_Split(unittest.TestCase):
# Test hide error
if not self.hide_error:
self.assertRaises(ValueError, f, get_mat(3,4), get_mat(3,4), get_mat(2,5))
self.assertRaises(ValueError, f, get_mat(3, 4), get_mat(3, 4),
get_mat(2, 5))
else:
f(get_mat(3,4), get_mat(3,4), get_mat(2,5))
f(get_mat(3, 4), get_mat(3, 4), get_mat(2, 5))
class test_comparison(unittest.TestCase):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论