提交 4bfde33c authored 作者: Frederic's avatar Frederic

less print in tests.

上级 0156327d
...@@ -335,7 +335,7 @@ class TestConv3D(unittest.TestCase): ...@@ -335,7 +335,7 @@ class TestConv3D(unittest.TestCase):
col_steps = self.rng.randint(1,4) col_steps = self.rng.randint(1,4)
time_steps = self.rng.randint(1,4) time_steps = self.rng.randint(1,4)
print (row_steps,col_steps,time_steps) #print (row_steps,col_steps,time_steps)
videoDur = (time_steps-1)*dt+filterDur + self.rng.randint(0,3) videoDur = (time_steps-1)*dt+filterDur + self.rng.randint(0,3)
videoWidth = (col_steps-1)*dc+filterWidth + self.rng.randint(0,3) videoWidth = (col_steps-1)*dc+filterWidth + self.rng.randint(0,3)
......
...@@ -112,8 +112,8 @@ class T_SoftmaxWithBias(unittest.TestCase): ...@@ -112,8 +112,8 @@ class T_SoftmaxWithBias(unittest.TestCase):
assert softmax_with_bias not in ops assert softmax_with_bias not in ops
assert softmax in ops assert softmax in ops
print f([0,1,0]) f([0,1,0])
print f.maker.env.toposort() #print f.maker.env.toposort()
def test_infer_shape(self): def test_infer_shape(self):
fff=theano.function([],outputs=softmax_with_bias(numpy.random.rand(3,4),numpy.random.rand(4)).shape) fff=theano.function([],outputs=softmax_with_bias(numpy.random.rand(3,4),numpy.random.rand(4)).shape)
...@@ -299,20 +299,20 @@ class T_CrossentropyCategorical1Hot(unittest.TestCase): ...@@ -299,20 +299,20 @@ class T_CrossentropyCategorical1Hot(unittest.TestCase):
[op(softmax(x+b), one_of_n)]) [op(softmax(x+b), one_of_n)])
assert env.outputs[0].owner.op == op assert env.outputs[0].owner.op == op
print 'BEFORE' #print 'BEFORE'
for node in env.toposort(): #for node in env.toposort():
print node.op # print node.op
print printing.pprint(node.outputs[0]) #print printing.pprint(node.outputs[0])
print '----' #print '----'
theano.compile.mode.optdb.query( theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(env) theano.compile.mode.OPT_FAST_RUN).optimize(env)
print 'AFTER' #print 'AFTER'
for node in env.toposort(): #for node in env.toposort():
print node.op # print node.op
print printing.pprint(node.outputs[0]) #print printing.pprint(node.outputs[0])
print '====' #print '===='
assert len(env.toposort()) == 2 assert len(env.toposort()) == 2
assert str(env.outputs[0].owner.op) == 'OutputGuard' assert str(env.outputs[0].owner.op) == 'OutputGuard'
...@@ -330,18 +330,18 @@ class T_CrossentropyCategorical1Hot(unittest.TestCase): ...@@ -330,18 +330,18 @@ class T_CrossentropyCategorical1Hot(unittest.TestCase):
[op(softmax(T.add(x,b,c)), one_of_n)]) [op(softmax(T.add(x,b,c)), one_of_n)])
assert env.outputs[0].owner.op == op assert env.outputs[0].owner.op == op
print 'BEFORE' #print 'BEFORE'
for node in env.toposort(): #for node in env.toposort():
print node.op # print node.op
print '----' #print '----'
theano.compile.mode.optdb.query( theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(env) theano.compile.mode.OPT_FAST_RUN).optimize(env)
print 'AFTER' #print 'AFTER'
for node in env.toposort(): #for node in env.toposort():
print node.op # print node.op
print '====' #print '===='
assert len(env.toposort()) == 3 assert len(env.toposort()) == 3
assert str(env.outputs[0].owner.op) == 'OutputGuard' assert str(env.outputs[0].owner.op) == 'OutputGuard'
...@@ -356,18 +356,18 @@ class T_CrossentropyCategorical1Hot(unittest.TestCase): ...@@ -356,18 +356,18 @@ class T_CrossentropyCategorical1Hot(unittest.TestCase):
[x, b, one_of_n], [x, b, one_of_n],
[op(softmax(x+b), one_of_n)]) [op(softmax(x+b), one_of_n)])
assert env.outputs[0].owner.op == op assert env.outputs[0].owner.op == op
print 'BEFORE' #print 'BEFORE'
for node in env.toposort(): #for node in env.toposort():
print node.op # print node.op
print printing.pprint(node.outputs[0]) #print printing.pprint(node.outputs[0])
print '----' #print '----'
theano.compile.mode.optdb.query( theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(env) theano.compile.mode.OPT_FAST_RUN).optimize(env)
print 'AFTER' #print 'AFTER'
for node in env.toposort(): #for node in env.toposort():
print node.op # print node.op
print '====' #print '===='
assert len(env.toposort()) == 3 assert len(env.toposort()) == 3
assert str(env.outputs[0].owner.op) == 'OutputGuard' assert str(env.outputs[0].owner.op) == 'OutputGuard'
assert env.outputs[0].owner.inputs[0].owner.op == crossentropy_softmax_argmax_1hot_with_bias assert env.outputs[0].owner.inputs[0].owner.op == crossentropy_softmax_argmax_1hot_with_bias
...@@ -385,16 +385,16 @@ class T_CrossentropyCategorical1Hot(unittest.TestCase): ...@@ -385,16 +385,16 @@ class T_CrossentropyCategorical1Hot(unittest.TestCase):
[x, one_of_n], [x, one_of_n],
[g_x]) [g_x])
print 'BEFORE' #print 'BEFORE'
for node in env.toposort(): #for node in env.toposort():
print node.op, node.inputs # print node.op, node.inputs
print '----' #print '----'
theano.compile.mode.optdb.query( theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(env) theano.compile.mode.OPT_FAST_RUN).optimize(env)
print 'AFTER' #print 'AFTER'
for node in env.toposort(): #for node in env.toposort():
print node.op, node.inputs # print node.op, node.inputs
# the function has 9 ops because the dimshuffle and elemwise{second} aren't getting # the function has 9 ops because the dimshuffle and elemwise{second} aren't getting
# cleaned up as well as we'd like. # cleaned up as well as we'd like.
...@@ -428,16 +428,16 @@ class T_CrossentropyCategorical1Hot(unittest.TestCase): ...@@ -428,16 +428,16 @@ class T_CrossentropyCategorical1Hot(unittest.TestCase):
[x, one_of_n], [x, one_of_n],
[g_x]) [g_x])
print 'BEFORE' #print 'BEFORE'
for node in env.toposort(): #for node in env.toposort():
print node.op, node.inputs # print node.op, node.inputs
print '----' #print '----'
theano.compile.mode.optdb.query( theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(env) theano.compile.mode.OPT_FAST_RUN).optimize(env)
print 'AFTER' #print 'AFTER'
for node in env.toposort(): #for node in env.toposort():
print node.op, node.inputs # print node.op, node.inputs
# the function has 9 ops because the dimshuffle and elemwise{second} aren't getting # the function has 9 ops because the dimshuffle and elemwise{second} aren't getting
# cleaned up as well as we'd like. # cleaned up as well as we'd like.
...@@ -1021,9 +1021,9 @@ class Test_softmax_opt: ...@@ -1021,9 +1021,9 @@ class Test_softmax_opt:
# test that function contains softmax and no div. # test that function contains softmax and no div.
f = theano.function([c],p_y, mode=self.mode) f = theano.function([c],p_y, mode=self.mode)
f_ops = [n.op for n in f.maker.env.toposort()] f_ops = [n.op for n in f.maker.env.toposort()]
print '--- f =' #print '--- f ='
printing.debugprint(f) #printing.debugprint(f)
print '===' #print '==='
assert len(f_ops) == 1 assert len(f_ops) == 1
assert softmax in f_ops assert softmax in f_ops
f(self.rng.rand(3,4).astype(config.floatX)) f(self.rng.rand(3,4).astype(config.floatX))
...@@ -1041,9 +1041,9 @@ class Test_softmax_opt: ...@@ -1041,9 +1041,9 @@ class Test_softmax_opt:
finally: finally:
config.warn.sum_div_dimshuffle_bug = backup config.warn.sum_div_dimshuffle_bug = backup
g_ops = [n.op for n in g.maker.env.toposort()] g_ops = [n.op for n in g.maker.env.toposort()]
print '--- g =' #print '--- g ='
printing.debugprint(g) #printing.debugprint(g)
print '===' #print '==='
raise SkipTest('Optimization not enabled for the moment') raise SkipTest('Optimization not enabled for the moment')
assert len(g_ops) == 2 assert len(g_ops) == 2
...@@ -1058,7 +1058,7 @@ class Test_softmax_opt: ...@@ -1058,7 +1058,7 @@ class Test_softmax_opt:
# test that function contains softmax and no div. # test that function contains softmax and no div.
f = theano.function([c],p_y) f = theano.function([c],p_y)
printing.debugprint(f) #printing.debugprint(f)
# test that function contains softmax and no div. # test that function contains softmax and no div.
backup = config.warn.sum_div_dimshuffle_bug backup = config.warn.sum_div_dimshuffle_bug
...@@ -1067,7 +1067,7 @@ class Test_softmax_opt: ...@@ -1067,7 +1067,7 @@ class Test_softmax_opt:
g = theano.function([c],T.grad(p_y.sum(), c)) g = theano.function([c],T.grad(p_y.sum(), c))
finally: finally:
config.warn.sum_div_dimshuffle_bug = backup config.warn.sum_div_dimshuffle_bug = backup
printing.debugprint(g) #printing.debugprint(g)
raise SkipTest('Optimization not enabled for the moment') raise SkipTest('Optimization not enabled for the moment')
def test_1d_basic(self): def test_1d_basic(self):
...@@ -1077,7 +1077,7 @@ class Test_softmax_opt: ...@@ -1077,7 +1077,7 @@ class Test_softmax_opt:
# test that function contains softmax and no div. # test that function contains softmax and no div.
f = theano.function([c], p_y) f = theano.function([c], p_y)
printing.debugprint(f) #printing.debugprint(f)
# test that function contains softmax and no div. # test that function contains softmax and no div.
backup = config.warn.sum_div_dimshuffle_bug backup = config.warn.sum_div_dimshuffle_bug
...@@ -1086,7 +1086,7 @@ class Test_softmax_opt: ...@@ -1086,7 +1086,7 @@ class Test_softmax_opt:
g = theano.function([c], T.grad(p_y.sum(), c)) g = theano.function([c], T.grad(p_y.sum(), c))
finally: finally:
config.warn.sum_div_dimshuffle_bug = backup config.warn.sum_div_dimshuffle_bug = backup
printing.debugprint(g) #printing.debugprint(g)
raise SkipTest('Optimization not enabled for the moment') raise SkipTest('Optimization not enabled for the moment')
# REPEAT 3 CASES in presence of log(softmax) with the advanced indexing etc. # REPEAT 3 CASES in presence of log(softmax) with the advanced indexing etc.
......
...@@ -50,8 +50,8 @@ class TestDownsampleFactorMax(unittest.TestCase): ...@@ -50,8 +50,8 @@ class TestDownsampleFactorMax(unittest.TestCase):
for maxpoolshp in maxpoolshps: for maxpoolshp in maxpoolshps:
for ignore_border in [True,False]: for ignore_border in [True,False]:
print 'maxpoolshp =', maxpoolshp #print 'maxpoolshp =', maxpoolshp
print 'ignore_border =', ignore_border #print 'ignore_border =', ignore_border
## Pure Numpy computation ## Pure Numpy computation
numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp, ignore_border) numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp, ignore_border)
...@@ -74,8 +74,8 @@ class TestDownsampleFactorMax(unittest.TestCase): ...@@ -74,8 +74,8 @@ class TestDownsampleFactorMax(unittest.TestCase):
for maxpoolshp in maxpoolshps: for maxpoolshp in maxpoolshps:
for ignore_border in [True,False]: for ignore_border in [True,False]:
print 'maxpoolshp =', maxpoolshp #print 'maxpoolshp =', maxpoolshp
print 'ignore_border =', ignore_border #print 'ignore_border =', ignore_border
def mp(input): def mp(input):
return DownsampleFactorMax(maxpoolshp, ignore_border=ignore_border)(input) return DownsampleFactorMax(maxpoolshp, ignore_border=ignore_border)(input)
utt.verify_grad(mp, [imval], rng=rng) utt.verify_grad(mp, [imval], rng=rng)
...@@ -89,8 +89,8 @@ class TestDownsampleFactorMax(unittest.TestCase): ...@@ -89,8 +89,8 @@ class TestDownsampleFactorMax(unittest.TestCase):
for maxpoolshp in maxpoolshps: for maxpoolshp in maxpoolshps:
for ignore_border in [True,False]: for ignore_border in [True,False]:
print 'maxpoolshp =', maxpoolshp #print 'maxpoolshp =', maxpoolshp
print 'ignore_border =', ignore_border #print 'ignore_border =', ignore_border
numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp, ignore_border) numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp, ignore_border)
output = max_pool_2d(images, maxpoolshp, ignore_border) output = max_pool_2d(images, maxpoolshp, ignore_border)
...@@ -110,8 +110,8 @@ class TestDownsampleFactorMax(unittest.TestCase): ...@@ -110,8 +110,8 @@ class TestDownsampleFactorMax(unittest.TestCase):
for maxpoolshp in maxpoolshps: for maxpoolshp in maxpoolshps:
for ignore_border in [True,False]: for ignore_border in [True,False]:
print 'maxpoolshp =', maxpoolshp #print 'maxpoolshp =', maxpoolshp
print 'ignore_border =', ignore_border #print 'ignore_border =', ignore_border
numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp, ignore_border) numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp, ignore_border)
output = max_pool_2d(images, maxpoolshp, ignore_border) output = max_pool_2d(images, maxpoolshp, ignore_border)
...@@ -144,8 +144,8 @@ class TestDownsampleFactorMax(unittest.TestCase): ...@@ -144,8 +144,8 @@ class TestDownsampleFactorMax(unittest.TestCase):
for maxpoolshp in maxpoolshps: for maxpoolshp in maxpoolshps:
for ignore_border in [True,False]: for ignore_border in [True,False]:
print 'maxpoolshp =', maxpoolshp #print 'maxpoolshp =', maxpoolshp
print 'ignore_border =', ignore_border #print 'ignore_border =', ignore_border
numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp, ignore_border) numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp, ignore_border)
output = max_pool_2d(images, maxpoolshp, ignore_border) output = max_pool_2d(images, maxpoolshp, ignore_border)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论