提交 fcb37509 authored 作者: Frederic's avatar Frederic

Refactor IfElse tests to reuse them on the gpu and extend them.

I removed the test_lazy_if_inplace test as now we check the exact op in each tests and they are always set as inplace. I added tests when we have multiple outputs.
上级 3a85a032
......@@ -13,48 +13,39 @@ from nose.plugins.skip import SkipTest
import theano
from theano import tensor
import theano.ifelse
from theano.ifelse import IfElse, ifelse
from theano.tests import unittest_tools as utt
class test_ifelse(unittest.TestCase):
class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
mode = None
dtype = theano.config.floatX
cast_output = staticmethod(tensor.as_tensor_variable)
def get_ifelse(self, n):
if theano.config.mode == "FAST_COMPILE":
return IfElse(n)
else:
return IfElse(n, as_view=True)
def test_lazy_if(self):
# Tests that lazy if works .. even if the two results have different
# shapes but the same type (i.e. both vectors, or matrices or
# whatnot of same dtype)
x = tensor.vector('x')
y = tensor.vector('y')
x = tensor.vector('x', dtype=self.dtype)
y = tensor.vector('y', dtype=self.dtype)
c = tensor.iscalar('c')
f = theano.function([c, x, y], ifelse(c, x, y))
f = theano.function([c, x, y], ifelse(c, x, y), mode=self.mode)
self.assertFunctionContains1(f, self.get_ifelse(1))
rng = numpy.random.RandomState(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
vx = numpy.asarray(rng.uniform(size=(xlen,)), theano.config.floatX)
vy = numpy.asarray(rng.uniform(size=(ylen,)), theano.config.floatX)
assert numpy.allclose(vx, f(1, vx, vy))
assert numpy.allclose(vy, f(0, vx, vy))
def test_lazy_if_inplace(self):
# Tests that lazy if works inplace
x = tensor.vector('x')
y = tensor.vector('y')
c = tensor.iscalar('c')
f = theano.function([c, x, y], ifelse(c, x, y))
rng = numpy.random.RandomState(utt.fetch_seed())
vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)
xlen = rng.randint(200)
ylen = rng.randint(200)
vx = numpy.asarray(rng.uniform(size=(xlen,)), theano.config.floatX)
vy = numpy.asarray(rng.uniform(size=(ylen,)), theano.config.floatX)
if theano.config.mode != "FAST_COMPILE":
assert numpy.all([x.op.as_view for x in f.maker.env.toposort() if
isinstance(x.op, IfElse)])
assert len([x.op for x in f.maker.env.toposort()
if isinstance(x.op, IfElse)]) > 0
assert numpy.allclose(vx, f(1, vx, vy))
assert numpy.allclose(vy, f(0, vx, vy))
......@@ -71,31 +62,100 @@ class test_ifelse(unittest.TestCase):
def test_grad_lazy_if(self):
# Tests that we can compute the gradients through lazy if
x = tensor.vector('x')
y = tensor.vector('y')
x = tensor.vector('x', dtype=self.dtype)
y = tensor.vector('y', dtype=self.dtype)
c = tensor.iscalar('c')
z = ifelse(c, x, y)
gx, gy = tensor.grad(z.sum(), [x, y])
f = theano.function([c, x, y], [gx, gy])
f = theano.function([c, x, y], [self.cast_output(gx),
self.cast_output(gy)],
mode=self.mode)
# There is only 2 of the 3 ifelse that are moved on the GPU.
# The one that stay on the CPU is for the shape.
self.assertFunctionContains(f, self.get_ifelse(1), min=2, max=3)
rng = numpy.random.RandomState(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
vx = numpy.asarray(rng.uniform(size=(xlen,)), theano.config.floatX)
vy = numpy.asarray(rng.uniform(size=(ylen,)), theano.config.floatX)
vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)
gx0, gy0 = f(1, vx, vy)
assert numpy.allclose(gx0.shape, vx.shape)
assert numpy.allclose(gy0.shape, vy.shape)
assert numpy.all(gx0 == 1.)
assert numpy.all(gy0 == 0.)
assert numpy.all(numpy.asarray(gx0) == 1.)
assert numpy.all(numpy.asarray(gy0) == 0.)
gx0, gy0 = f(0, vx, vy)
assert numpy.allclose(gx0.shape, vx.shape)
assert numpy.allclose(gy0.shape, vy.shape)
assert numpy.all(gx0 == 0.)
assert numpy.all(gy0 == 1.)
assert numpy.all(numpy.asarray(gx0) == 0.)
assert numpy.all(numpy.asarray(gy0) == 1.)
def test_multiple_out(self):
x1 = tensor.vector('x1', dtype=self.dtype)
x2 = tensor.vector('x2', dtype=self.dtype)
y1 = tensor.vector('y1', dtype=self.dtype)
y2 = tensor.vector('y2', dtype=self.dtype)
c = tensor.iscalar('c')
z = ifelse(c, (x1, x2), (y1, y2))
f = theano.function([c, x1, x2, y1, y2], z, mode=self.mode)
self.assertFunctionContains1(f, self.get_ifelse(2))
ifnode = [x for x in f.maker.env.toposort()
if isinstance(x.op, IfElse)][0]
assert len(ifnode.outputs) == 2
rng = numpy.random.RandomState(utt.fetch_seed())
x1len = rng.randint(200)
x2len = rng.randint(200)
y1len = rng.randint(200)
y2len = rng.randint(200)
vx1 = numpy.asarray(rng.uniform(size=(x1len,)), self.dtype)
vx2 = numpy.asarray(rng.uniform(size=(x2len,)), self.dtype)
vy1 = numpy.asarray(rng.uniform(size=(y1len,)), self.dtype)
vy2 = numpy.asarray(rng.uniform(size=(y2len,)), self.dtype)
ovx1, ovx2 = f(1, vx1, vx2, vy1, vy2)
ovy1, ovy2 = f(0, vx1, vx2, vy1, vy2)
assert numpy.allclose(vx1, ovx1)
assert numpy.allclose(vy1, ovy1)
assert numpy.allclose(vx2, ovx2)
assert numpy.allclose(vy2, ovy2)
def test_multiple_out_grad(self):
# Tests that we can compute the gradients through lazy if
x1 = tensor.vector('x1')
x2 = tensor.vector('x2')
y1 = tensor.vector('y1')
y2 = tensor.vector('y2')
c = tensor.iscalar('c')
z = ifelse(c, (x1, x2), (y1, y2))
grads = tensor.grad(z[0].sum() + z[1].sum(),
[x1, x2, y1, y2])
f = theano.function([c, x1, x2, y1, y2], grads)
rng = numpy.random.RandomState(utt.fetch_seed())
lens = [rng.randint(200) for i in range(4)]
values = [numpy.asarray(rng.uniform(size=(l,)), theano.config.floatX)
for l in lens]
outs_1 = f(1, *values)
assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
assert numpy.all(outs_1[0] == 1.)
assert numpy.all(outs_1[1] == 1.)
assert numpy.all(outs_1[2] == 0.)
assert numpy.all(outs_1[3] == 0.)
outs_0 = f(0, *values)
assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
assert numpy.all(outs_0[0] == 0.)
assert numpy.all(outs_0[1] == 0.)
assert numpy.all(outs_0[2] == 1.)
assert numpy.all(outs_0[3] == 1.)
def test_merge(self):
raise SkipTest("Optimization temporarily disabled")
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论