提交 a6c2d45f authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #2324 from nouiz/tests

Tests
......@@ -203,9 +203,9 @@ if __name__ == "__main__":
cuda version 6.5 6.0 5.5 5.0 4.2 4.1 4.0 3.2 3.0 # note
gpu
K6000/NOECC 0.06s
K6000/NOECC 0.06s 0.06s
K40 0.07s
K20m/ECC 0.07s
K20m/ECC 0.08s 0.07s
K20/NOECC 0.07s
M2090 0.19s
C2075 0.25s
......
......@@ -174,8 +174,8 @@ class TestPushOutScanOutputDot(object):
# Ensure that the function compiled with the optimization produces
# the same results as the function compiled without
v_value = numpy.random.random((4))
m_value = numpy.random.random((4, 5))
v_value = numpy.random.random((4)).astype(config.floatX)
m_value = numpy.random.random((4, 5)).astype(config.floatX)
output_opt = f_opt(v_value, m_value)
output_no_opt = f_no_opt(v_value, m_value)
......@@ -218,8 +218,8 @@ class TestPushOutScanOutputDot(object):
# Ensure that the function compiled with the optimization produces
# the same results as the function compiled without
a_value = numpy.random.random((3, 4))
b_value = numpy.random.random((4, 5))
a_value = numpy.random.random((3, 4)).astype(config.floatX)
b_value = numpy.random.random((4, 5)).astype(config.floatX)
output_opt = f_opt(a_value, b_value)
output_no_opt = f_no_opt(a_value, b_value)
......@@ -264,8 +264,8 @@ class TestPushOutScanOutputDot(object):
# Ensure that the function compiled with the optimization produces
# the same results as the function compiled without
a_value = numpy.random.random((3, 4))
b_value = numpy.random.random((4, 5))
a_value = numpy.random.random((3, 4)).astype(config.floatX)
b_value = numpy.random.random((4, 5)).astype(config.floatX)
output_opt = f_opt(a_value, b_value)
output_no_opt = f_no_opt(a_value, b_value)
......
......@@ -2490,9 +2490,11 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
f = theano.function([x, y, idx], o, self.mode)
# test wrong index
for i in [dx.shape[0], -dx.shape[0] - 1]:
self.assertRaises(AssertionError, f, dx, dy, [i, i])
self.assertRaises((AssertionError, IndexError),
f, dx, dy, [i, i])
# test wrong shape
self.assertRaises(AssertionError, f, dx, dy, [1])
self.assertRaises((AssertionError, ValueError),
f, dx, dy, [1])
class Test_alloc_zero(unittest.TestCase):
......@@ -2509,7 +2511,7 @@ class Test_alloc_zero(unittest.TestCase):
y0 = tensor.zeros_like(y)
z = tensor.set_subtensor(x0[:4], y0)
f = theano.function([x, y], z, mode=self.mode)
assert numpy.all([not isinstance(x.op, tensor.IncSubtensor) for x in
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in
f.maker.fgraph.toposort()])
def test_setsubtensor_allocs1(self):
......@@ -2519,7 +2521,7 @@ class Test_alloc_zero(unittest.TestCase):
y0 = tensor.zeros_like(y)
z = tensor.set_subtensor(x0[:4], y0)
f = theano.function([y], z, mode=self.mode)
assert numpy.all([not isinstance(x.op, tensor.IncSubtensor) for x in
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in
f.maker.fgraph.toposort()])
def test_setsubtensor_allocs1t(self):
......@@ -2529,7 +2531,7 @@ class Test_alloc_zero(unittest.TestCase):
y0 = tensor.zeros_like(y)
z = tensor.set_subtensor(x0[:4], y0.T)
f = theano.function([y], z, mode=mode_opt)
assert numpy.all([not isinstance(x.op, tensor.IncSubtensor) for x in
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in
f.maker.fgraph.toposort()])
def test_setsubtensor_allocs2(self):
......@@ -2548,7 +2550,7 @@ class Test_alloc_zero(unittest.TestCase):
y0 = tensor.zeros_like(y)
z = tensor.inc_subtensor(x[:4], y0)
f = theano.function([x, y], z, mode=self.mode)
assert numpy.all([not isinstance(x.op, tensor.IncSubtensor) for x in
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in
f.maker.fgraph.toposort()])
def test_incsubtensor_allocs0t(self):
......@@ -2557,7 +2559,7 @@ class Test_alloc_zero(unittest.TestCase):
y0 = tensor.zeros_like(y)
z = tensor.inc_subtensor(x[:4], y0.T)
f = theano.function([x, y], z, mode=mode_opt)
assert numpy.all([not isinstance(x.op, tensor.IncSubtensor) for x in
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in
f.maker.fgraph.toposort()])
def test_incsubtensor_allocs1(self):
......@@ -2575,8 +2577,8 @@ class Test_alloc_zero(unittest.TestCase):
y0 = tensor.zeros_like(y)
z = tensor.inc_subtensor(x[[0, 1, 2, 3]], y0)
f = theano.function([x, y], z, mode=self.mode)
assert numpy.all([not isinstance(x.op, tensor.AdvancedIncSubtensor1)
for x in f.maker.fgraph.toposort()])
assert numpy.all([not isinstance(n.op, tensor.AdvancedIncSubtensor1)
for n in f.maker.fgraph.toposort()])
def test_advancedincsubtensor1_allocs0t(self):
x = tensor.matrix()
......@@ -2584,8 +2586,8 @@ class Test_alloc_zero(unittest.TestCase):
y0 = tensor.zeros_like(y)
z = tensor.inc_subtensor(x[[0, 1, 2, 3]], y0.T)
f = theano.function([x, y], z, mode=mode_opt)
assert numpy.all([not isinstance(x.op, tensor.AdvancedIncSubtensor1)
for x in f.maker.fgraph.toposort()])
assert numpy.all([not isinstance(n.op, tensor.AdvancedIncSubtensor1)
for n in f.maker.fgraph.toposort()])
def test_advancedincsubtensor1_allocs1(self):
x = tensor.matrix()
......@@ -2593,44 +2595,44 @@ class Test_alloc_zero(unittest.TestCase):
dtype=config.floatX))
z = tensor.inc_subtensor(x[[0, 1, 2, 3]], y0)
f = theano.function([x], z, mode=self.mode)
assert numpy.all([not isinstance(x.op, tensor.AdvancedIncSubtensor1)
for x in f.maker.fgraph.toposort()])
assert numpy.all([not isinstance(n.op, tensor.AdvancedIncSubtensor1)
for n in f.maker.fgraph.toposort()])
def test_advancedincsubtensor_allocs0(self):
if tensor.inplace_increment is None:
raise SkipTest('NumPy version >= 1.8 not available')
x = tensor.matrix()
y = tensor.matrix()
y0 = tensor.zeros_like(y)
z = tensor.inc_subtensor(x[[[0, 0], [1, 1]], [[0, 1], [0, 1]]], y0)
f = theano.function([x, y], z, mode=self.mode)
assert numpy.all([not isinstance(x.op, tensor.AdvancedIncSubtensor)
for x in f.maker.fgraph.toposort()])
assert numpy.all([not isinstance(n.op, tensor.AdvancedIncSubtensor)
for n in f.maker.fgraph.toposort()])
def test_advancedincsubtensor_allocs0t(self):
if tensor.inplace_increment is None:
raise SkipTest('NumPy version >= 1.8 not available')
x = tensor.matrix()
y = tensor.matrix()
y0 = tensor.zeros_like(y)
z = tensor.inc_subtensor(x[[[0, 0], [1, 1]], [[0, 1], [0, 1]]], y0.T)
f = theano.function([x, y], z, mode=mode_opt)
assert numpy.all([not isinstance(x.op, tensor.AdvancedIncSubtensor)
for x in f.maker.fgraph.toposort()])
assert numpy.all([not isinstance(n.op, tensor.AdvancedIncSubtensor)
for n in f.maker.fgraph.toposort()])
def test_advancedincsubtensor_allocs1(self):
if tensor.inplace_increment is None:
raise SkipTest('NumPy version >= 1.8 not available')
x = tensor.matrix()
y0 = tensor.constant(numpy.asarray(numpy.zeros_like((2, 2)),
dtype=config.floatX))
z = tensor.inc_subtensor(x[[[0, 0], [1, 1]], [[0, 1], [0, 1]]], y0)
f = theano.function([x], z, mode=self.mode)
assert numpy.all([not isinstance(x.op, tensor.AdvancedIncSubtensor)
for x in f.maker.fgraph.toposort()])
assert numpy.all([not isinstance(n.op, tensor.AdvancedIncSubtensor)
for n in f.maker.fgraph.toposort()])
def test_dot_allocs_0(self):
v1 = tensor.vector('v1')
......@@ -2656,7 +2658,7 @@ class Test_alloc_zero(unittest.TestCase):
f = theano.function([_e1[0], _e2[0]], o, mode=self.mode)
f(_e1[1], _e2[1])
f(_e1[2], _e2[2])
assert numpy.all([not isinstance(x.op, tensor.Dot) for x in
assert numpy.all([not isinstance(n.op, tensor.Dot) for n in
f.maker.fgraph.toposort()])
#test that we don't remove shape errors
......@@ -3305,7 +3307,11 @@ class test_assert(utt.InferShapeTester):
y = T.scalar()
f = theano.function([x, y], theano.tensor.opt.assert_op(x, y),
mode=mode)
f(1, 0) # Without opt, it should fail.
if isinstance(mode, theano.compile.debugmode.DebugMode):
# DebugMode will run the original version with the Assert
self.assertRaises(AssertionError, f, 1, 0)
else:
f(1, 0) # Without opt, it should fail.
topo = f.maker.fgraph.toposort()
assert len(topo) == 1, topo
assert topo[0].op == deep_copy_op, topo
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论