提交 42750428 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #5245 from lamblin/fix_subtensor_setsubtensor

Add tests for local_subtensor_inc_subtensor
......@@ -1882,15 +1882,19 @@ def test_local_subtensor_remove_broadcastable_index():
f2(xn)
def test_subtensor_inc_subtensor():
class Test_subtensor_inc_subtensor(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.mode = theano.compile.mode.get_default_mode().including('local_subtensor_inc_subtensor')
def test_basic(self):
# basic test
x = tensor.matrix('x')
i = tensor.iscalar('i')
v = tensor.vector('v')
y = tensor.set_subtensor(x[i], v)
z = y[i]
mode = theano.compile.mode.get_default_mode().including('local_subtensor_inc_subtensor')
f = theano.function([x, i, v], z, mode=mode)
f = theano.function([x, i, v], z, mode=self.mode)
prog = f.maker.fgraph.toposort()
assert len(prog) == 1
assert isinstance(prog[0].op, DeepCopyOp)
......@@ -1900,6 +1904,7 @@ def test_subtensor_inc_subtensor():
i_ = 1
assert numpy.array_equal(f(x_, i_, v_), v_)
def test_multiple_idx(self):
# complicated test
x = tensor.tensor4('x')
i1 = tensor.iscalar('i1')
......@@ -1909,8 +1914,7 @@ def test_subtensor_inc_subtensor():
v = tensor.tensor3('v')
y = tensor.set_subtensor(x[i1, :i2, i3:, ::i4], v)
z = y[i1, :i2, i3:, ::i4]
mode = theano.compile.mode.get_default_mode().including('local_subtensor_inc_subtensor')
f = theano.function([x, i1, i2, i3, i4, v], z, mode=mode)
f = theano.function([x, i1, i2, i3, i4, v], z, mode=self.mode)
prog = f.maker.fgraph.toposort()
assert len(prog) == 1
assert isinstance(prog[0].op, DeepCopyOp)
......@@ -1920,10 +1924,17 @@ def test_subtensor_inc_subtensor():
i1_, i2_, i3_, i4_ = 1, 2, 3, 4
assert numpy.array_equal(f(x_, i1_, i2_, i3_, i4_, v_), v_)
def test_not_applied(self):
# case not use this optimization
x = tensor.tensor4('x')
i1 = tensor.iscalar('i1')
i2 = tensor.iscalar('i2')
i3 = tensor.iscalar('i3')
i4 = tensor.iscalar('i4')
v = tensor.tensor3('v')
y = tensor.set_subtensor(x[i1, :i2, i3:, ::i4], v)
z = y[i1, :i3, i2:, ::i4]
mode = theano.compile.mode.get_default_mode().including('local_subtensor_inc_subtensor')
f = theano.function([x, i1, i2, i3, i4, v], z, mode=mode)
f = theano.function([x, i1, i2, i3, i4, v], z, mode=self.mode)
prog = f.maker.fgraph.toposort()
assert len(prog) != 1
assert any(isinstance(x.op, tensor.IncSubtensor) for x in prog)
......@@ -1935,15 +1946,15 @@ def test_subtensor_inc_subtensor():
x_[i1_, :i2_, i3_:, ::i4_] = v_
assert numpy.array_equal(f(x_, i1_, i2_, i3_, i4_, v_), x_[i1_, :i3_, i2_:, ::i4_])
# case when v is broadcastable
def test_fewer_dims(self):
# case when v has fewer dimensions
x = tensor.matrix('x')
i1 = tensor.iscalar('i')
i2 = tensor.iscalar('i')
v = tensor.vector('v')
y = tensor.set_subtensor(x[:i1, :i2], v)
z = y[:i1, :i2]
mode = theano.compile.mode.get_default_mode().including('local_subtensor_inc_subtensor')
f = theano.function([x, i1, i2, v], z, mode=mode)
f = theano.function([x, i1, i2, v], z, mode=self.mode)
prog = f.maker.fgraph.toposort()
assert any(isinstance(x.op, tensor.Alloc) for x in prog)
# case when v is broadcastable, numerical check
......@@ -1953,6 +1964,41 @@ def test_subtensor_inc_subtensor():
x_[:i1_, :i2_] = v_
assert numpy.array_equal(f(x_, i1_, i2_, v_), x_[:i1_, :i2_])
def test_broadcasted(self):
# case when v has the same number of dimensions, some broadcastable
x = tensor.matrix('x')
i1 = tensor.iscalar('i')
i2 = tensor.iscalar('i')
v = tensor.col('v')
y = tensor.set_subtensor(x[:i1, :i2], v)
z = y[:i1, :i2]
f = theano.function([x, i1, i2, v], z, mode=self.mode)
prog = f.maker.fgraph.toposort()
assert any(isinstance(x.op, tensor.Alloc) for x in prog)
# case when v is broadcastable, numerical check
x_ = numpy.random.uniform(size=[3, 4]).astype(config.floatX)
v_ = numpy.random.uniform(size=[2, 1]).astype(config.floatX)
i1_, i2_ = 2, 2
x_[:i1_, :i2_] = v_
assert numpy.array_equal(f(x_, i1_, i2_, v_), x_[:i1_, :i2_])
def test_different_dtypes(self):
# Case when the dtype differs
x = tensor.bmatrix('x')
i = tensor.iscalar('i')
v = tensor.vector('v')
y = tensor.set_subtensor(x[i], v)
z = y[i]
f = theano.function([x, i, v], z, mode=self.mode)
prog = f.maker.fgraph.toposort()
assert len(prog) == 1
assert prog[0].op == tensor.basic._convert_to_int8
# basic test, numerical check
x_ = numpy.random.randint(12, size=[3, 4]).astype('int8')
v_ = numpy.random.uniform(12, size=[4, ]).astype(config.floatX)
i_ = 1
assert numpy.array_equal(f(x_, i_, v_), v_.astype('int8'))
class test_local_subtensor_make_vector(unittest.TestCase):
def test_scalar_idx(self):
......@@ -6763,15 +6809,3 @@ def test_local_log_sum_exp3():
optimised_ret = f(x_val)
assert numpy.allclose(optimised_ret, 100.)
if __name__ == '__main__':
t = TestMakeVector('setUp')
t.setUp()
# t.test_perform()
t.test_infer_shape()
test_subtensor_inc_subtensor()
"""
# unittest.main()
test_fusion().tes_memory_leak()
"""
......@@ -15,22 +15,20 @@ import theano
import theano.scalar as scal
import theano.tensor as tensor
from theano import config, gof
from theano.compat import PY3, exc_message, izip
from theano.compat import PY3, izip
from theano.compile import DeepCopyOp
from theano.tensor import (MakeSlice, NotScalarConstantError, _shared,
as_tensor_variable, cscalar, ctensor3, dmatrix,
from theano.tensor import (_shared, cscalar, ctensor3, dmatrix,
dscalar, dtensor4, dvector, fmatrix, fscalar,
fvector, ftensor4, iscalar, lmatrix, lrow, lvector,
matrix, vector)
from theano.tensor.basic import DimShuffle
from theano.tensor.subtensor import (AdvancedIncSubtensor,
AdvancedIncSubtensor1, AdvancedSubtensor,
AdvancedSubtensor1, IncSubtensor,
IncSubtensor,
Subtensor, advanced_inc_subtensor,
advanced_inc_subtensor1,
advanced_set_subtensor,
advanced_set_subtensor1,
advanced_subtensor1,
get_canonical_form_slice, inc_subtensor,
inplace_increment, set_subtensor)
from theano.tensor.tests.test_basic import inplace_func, rand, randint_ranged
......@@ -42,7 +40,7 @@ if PY3:
return i
else:
def L(i):
return long(i)
return long(i) # noqa for Python 3
class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
......@@ -122,7 +120,7 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
# it is impossible to retrieve a view of a 0-d tensor
n = self.shared(numpy.ones((), dtype=self.dtype))
try:
t = n[0]
n[0]
except ValueError as e:
self.assertTrue(hasattr(e, 'subtensor_invalid'))
return
......@@ -144,7 +142,7 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
try:
try:
self.eval_output_and_check(t)
except IndexError as e:
except IndexError:
return
self.fail()
finally:
......@@ -153,8 +151,8 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
def test1_err_subslice(self):
n = self.shared(numpy.ones(3, dtype=self.dtype))
try:
t = n[slice(0, slice(1, 2, None), None)]
except Exception as e:
n[slice(0, slice(1, 2, None), None)]
except Exception:
# Relax constraint on the type of Exception,
# since this might be handled by AvancedSubtensor
# if e[0] != Subtensor.e_indextype:
......@@ -190,7 +188,7 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
def test1_err_invalid(self):
n = self.shared(numpy.ones(1, dtype=self.dtype))
try:
t = n[0, 0]
n[0, 0]
except ValueError as e:
self.assertTrue(hasattr(e, 'subtensor_invalid'))
return
......@@ -407,10 +405,10 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
vs1, vn3, vn4 = theano.function([s], [s1, n3, n4])(-2.0)
assert numpy.all(vs1 == [-2.0])
assert numpy.all(vn3
== numpy.arange(24)[newaxis, :, newaxis])
assert numpy.all(vn4
== numpy.arange(24).reshape((2, 3, 4))[:, :, :, newaxis])
assert numpy.all(vn3 ==
numpy.arange(24)[newaxis, :, newaxis])
assert numpy.all(vn4 ==
numpy.arange(24).reshape((2, 3, 4))[:, :, :, newaxis])
def test_grad_1d(self):
subi = 0
......@@ -463,8 +461,8 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
gn = theano.tensor.grad(theano.tensor.sum(theano.tensor.exp(t)), n)
f = self.function([], gn)
topo = f.maker.fgraph.toposort()
topo_ = [node for node in topo if not isinstance(node.op,
self.ignore_topo)]
topo_ = [node for node in topo
if not isinstance(node.op, self.ignore_topo)]
if not self.fast_compile:
assert_equal(len(topo_), 6)
assert numpy.sum([isinstance(node.op, self.inc_sub)
......@@ -487,8 +485,8 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
# Test 4 dims as gpu code use another algo
# in that case This new algo is not as much
# optimized for that case.
(rand(4, 4, 2, 3), [3,
3, 1, 1, 2, 2, 0, 0, -1, -2, -3, -4]),
(rand(4, 4, 2, 3),
[3, 3, 1, 1, 2, 2, 0, 0, -1, -2, -3, -4]),
# Test with TensorConstant index.
(rand(4, 2, 3),
theano.tensor.constant([3, 3, 1, 1, 2, 2, 0, 0])),
......@@ -526,7 +524,7 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
g = self.function([], gn, op=self.adv_incsub1)
utt.verify_grad(lambda m: m[[1, 3]],
[numpy.random.rand(5, 5).astype(self.dtype)])
g_0 = g()
g()
utt.verify_grad(lambda m: m[idx],
[data])
......@@ -558,7 +556,7 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
def test_adv_sub1_broadcast(self):
v = numpy.arange(3, dtype=self.dtype).reshape((1, 3))
n = self.shared(v*5, broadcastable=(True, False))
n = self.shared(v * 5, broadcastable=(True, False))
idx = tensor.lvector()
t = n[idx]
self.assertTrue(isinstance(t.owner.op, tensor.AdvancedSubtensor1))
......@@ -571,10 +569,10 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
self.assertTrue(isinstance(topo_[0].op, self.adv_sub1))
f_0 = f([0])
self.assertTrue(f_0.shape == (1, 3))
self.assertTrue(numpy.allclose(f_0, v*5))
self.assertTrue(numpy.allclose(f_0, v * 5))
f_00 = f([0, 0])
self.assertTrue(f_00.shape == (2, 3))
self.assertTrue(numpy.allclose(f_00, v*5))
self.assertTrue(numpy.allclose(f_00, v * 5))
self.assertRaises(IndexError, f, [0, 1])
# Test the gradient
......@@ -603,7 +601,6 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
# test set_subtensor broadcast
self.dtype = 'float32'
from theano.sandbox.cuda.dnn import dnn_conv
x = tensor.tensor4('x', dtype=self.dtype)
indexes = theano.shared(numpy.int32([1, 2, 3, 4]))
......@@ -667,8 +664,8 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
t_shapes = f()
for t_shape, shape in zip(t_shapes, shapes):
assert numpy.all(t_shape == shape)
assert tensor.Subtensor not in [x.op for x in
f.maker.fgraph.toposort()]
assert tensor.Subtensor not in [x.op
for x in f.maker.fgraph.toposort()]
def test_shape_i_scalar(self):
# Each axis is treated independently by shape_i/shape operators
......@@ -685,8 +682,8 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
mode=mode_opt,
op=self.ops,
N=0)
assert tensor.Subtensor not in [x.op for x in f.maker.
fgraph.toposort()]
assert tensor.Subtensor not in [x.op
for x in f.maker.fgraph.toposort()]
for start in [-8, -5, -4, -1, 0, 1, 4, 5, 8]:
for stop in [-8, -5, -4, -1, 0, 1, 4, 5, 8]:
for step in [-3, -1, 2, 5]:
......@@ -847,11 +844,11 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
f = self.function([], [gn, gn.shape], op=self.adv_incsub1)
topo = f.maker.fgraph.toposort()
if not self.fast_compile:
assert any([isinstance(node.op, self.
adv_incsub1) and node.op.inplace for node in topo])
assert any([isinstance(node.op, self.adv_incsub1) and
node.op.inplace for node in topo])
else:
assert any([isinstance(node.op, self.
adv_incsub1) for node in topo])
assert any([isinstance(node.op, self.adv_incsub1)
for node in topo])
assert any([isinstance(node.op, self.adv_sub1) for node in topo])
gval, gshape = f()
good = numpy.zeros_like(data)
......@@ -1257,7 +1254,7 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
def test_take(self):
a = tensor.matrix()
f = theano.function([a], a.take(0, axis=-1), allow_input_downcast=True)
x = f(numpy.random.normal(0, 1, (30, 4)))
f(numpy.random.normal(0, 1, (30, 4)))
class TestIncSubtensor1(unittest.TestCase):
......@@ -1370,8 +1367,8 @@ class TestAdvancedSubtensor(unittest.TestCase):
def eval_output_and_check(self, t):
f = inplace_func([], t, mode=self.mode)
topo = f.maker.fgraph.toposort()
topo_ = [node for node in topo if not isinstance(node.op,
self.ignore_topo)]
topo_ = [node for node in topo
if not isinstance(node.op, self.ignore_topo)]
assert len(topo_) == 1
assert isinstance(topo_[0].op, self.sub)
tval = f()
......@@ -1406,8 +1403,8 @@ class TestAdvancedSubtensor(unittest.TestCase):
# Test 4 dims as gpu code use another algo
# in that case This new algo is not as much
# optimized for that case.
(rand(4, 4, 2, 3), [3,
3, 1, 1, 2, 2, 0, 0, -1, -2, -3, -4]),
(rand(4, 4, 2, 3),
[3, 3, 1, 1, 2, 2, 0, 0, -1, -2, -3, -4]),
# Test with TensorConstant index.
(rand(2, 4, 3),
theano.tensor.constant([3, 3, 1, 1, 2, 2, 0, 0])),
......
......@@ -46,7 +46,6 @@ whitelist_flake8 = [
"typed_list/tests/__init__.py",
"tensor/__init__.py",
"tensor/tests/__init__.py",
"tensor/tests/test_subtensor.py",
"tensor/tests/test_utils.py",
"tensor/tests/test_nlinalg.py",
"tensor/tests/test_shared_randomstreams.py",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论