提交 553c304b authored 作者: Razvan Pascanu's avatar Razvan Pascanu

Removed a few commented lines and ``dont_optmize`` tests since those are

optmized right now, and so they make no sense .. The code asks now for more tests though.
上级 50d34560
...@@ -1283,11 +1283,8 @@ class test_local_subtensor_merge(unittest.TestCase): ...@@ -1283,11 +1283,8 @@ class test_local_subtensor_merge(unittest.TestCase):
f = function([x], x[idx::][-1], mode=mode_opt) f = function([x], x[idx::][-1], mode=mode_opt)
g = function([x], x[idx::][-1], mode=mode_opt.excluding('local_subtensor_merge')) g = function([x], x[idx::][-1], mode=mode_opt.excluding('local_subtensor_merge'))
#theano.printing.debugprint(f, print_type=True)
topo=f.maker.env.toposort() topo=f.maker.env.toposort()
#print [t for t in topo if isinstance(t.op, TT.Subtensor)]
assert len([t for t in topo if isinstance(t.op, TT.Subtensor)]) == 1 assert len([t for t in topo if isinstance(t.op, TT.Subtensor)]) == 1
#print topo[-1].op
assert isinstance(topo[-1].op, theano.compile.function_module.DeepCopyOp) assert isinstance(topo[-1].op, theano.compile.function_module.DeepCopyOp)
for x_s in self.x_shapes: for x_s in self.x_shapes:
...@@ -1326,25 +1323,6 @@ class test_local_subtensor_merge(unittest.TestCase): ...@@ -1326,25 +1323,6 @@ class test_local_subtensor_merge(unittest.TestCase):
self.assertRaises(IndexError, f, x_val, idx) self.assertRaises(IndexError, f, x_val, idx)
self.assertRaises(IndexError, g, x_val, idx) self.assertRaises(IndexError, g, x_val, idx)
def test_dont_opt(self):
# Test that we don't optimize some case
# var[int::][-1]] should be optimized but not
# var[int::][other int]
x = TT.matrix('x')
f = function([x], x[1::][0], mode=mode_opt)
#theano.printing.debugprint(f)
topo=f.maker.env.toposort()
assert len(topo)==3
assert isinstance(topo[0].op, TT.Subtensor)
assert isinstance(topo[1].op, TT.Subtensor)
assert isinstance(topo[2].op, theano.compile.function_module.DeepCopyOp)
# let debugmode test something
for x_s in self.x_shapes:
if x_s[0] > 1:
x_val = self.rng.uniform(size=x_s).astype(config.floatX)
f(x_val)
def test_const2(self): def test_const2(self):
# var[::-1][const] -> var[-1] # var[::-1][const] -> var[-1]
x = TT.matrix('x') x = TT.matrix('x')
...@@ -1392,24 +1370,6 @@ class test_local_subtensor_merge(unittest.TestCase): ...@@ -1392,24 +1370,6 @@ class test_local_subtensor_merge(unittest.TestCase):
self.assertRaises(IndexError, f, x_val, idx) self.assertRaises(IndexError, f, x_val, idx)
self.assertRaises(IndexError, g, x_val, idx) self.assertRaises(IndexError, g, x_val, idx)
def test_dont_opt2(self):
# Test that we don't optimize some case
# var[::-1][const] should be optimized but not
# x[::other int][const]
x = TT.matrix('x')
f = function([x], x[::-2][0], mode=mode_opt)
#theano.printing.debugprint(f)
topo=f.maker.env.toposort()
assert len(topo)==3
assert isinstance(topo[0].op, TT.Subtensor)
assert isinstance(topo[1].op, TT.Subtensor)
assert isinstance(topo[2].op, theano.compile.function_module.DeepCopyOp)
# let debugmode test something
for x_s in self.x_shapes:
if x_s[0] > 0:
x_val = self.rng.uniform(size=x_s).astype(config.floatX)
f(x_val)
def test_const3(self): def test_const3(self):
# var[::-1][:const] -> var[-1] # var[::-1][:const] -> var[-1]
...@@ -1446,24 +1406,6 @@ class test_local_subtensor_merge(unittest.TestCase): ...@@ -1446,24 +1406,6 @@ class test_local_subtensor_merge(unittest.TestCase):
for idx in range(-7,7): for idx in range(-7,7):
f(x_val, idx) # let debugmode test something f(x_val, idx) # let debugmode test something
def test_dont_opt3(self):
# Test that we don't optimize some case
# var[::-1][:const] should be optimized but not
# x[::other int][const]
x = TT.matrix('x')
f = function([x], x[::-2][:0], mode=mode_opt)
#theano.printing.debugprint(f)
topo=f.maker.env.toposort()
assert len(topo)==3
assert isinstance(topo[0].op, TT.Subtensor)
assert isinstance(topo[1].op, TT.Subtensor)
assert isinstance(topo[2].op, theano.compile.function_module.DeepCopyOp)
# let debugmode test something
for x_s in self.x_shapes:
x_val = self.rng.uniform(size=x_s).astype(config.floatX)
f(x_val)
def test_const4(self): def test_const4(self):
# var[const1::][:const2] # var[const1::][:const2]
x = TT.matrix('x') x = TT.matrix('x')
...@@ -1502,23 +1444,6 @@ class test_local_subtensor_merge(unittest.TestCase): ...@@ -1502,23 +1444,6 @@ class test_local_subtensor_merge(unittest.TestCase):
for idx2 in range(-11,11): for idx2 in range(-11,11):
f(x_val, idx1, idx2) # let debugmode test something f(x_val, idx1, idx2) # let debugmode test something
def test_dont_opt4(self):
# Test that we don't optimize some case
# var[int1:][:int2] should be optimized but not
# x[::other int][const]
x = TT.matrix('x')
f = function([x], x[-2:0][:0], mode=mode_opt)
theano.printing.debugprint(f)
topo=f.maker.env.toposort()
assert len(topo)==3
assert isinstance(topo[0].op, TT.Subtensor)
assert isinstance(topo[1].op, TT.Subtensor)
assert isinstance(topo[2].op, theano.compile.function_module.DeepCopyOp)
# let debugmode test something
for x_s in self.x_shapes:
x_val = self.rng.uniform(size=x_s).astype(config.floatX)
f(x_val)
def test_local_fill_useless(): def test_local_fill_useless():
m = theano.config.mode m = theano.config.mode
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论