提交 793a0222 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Remove the old way of silencing warnings.

上级 c5753a6c
...@@ -386,8 +386,6 @@ class TestEquilibrium(object): ...@@ -386,8 +386,6 @@ class TestEquilibrium(object):
g = Env([x, y, z], [e]) g = Env([x, y, z], [e])
print 'before', g print 'before', g
# display pesky warnings along with stdout # display pesky warnings along with stdout
oldstderr = sys.stderr
sys.stderr = sys.stdout
# also silence logger for 'theano.gof.opt' # also silence logger for 'theano.gof.opt'
_logger = logging.getLogger('theano.gof.opt') _logger = logging.getLogger('theano.gof.opt')
oldlevel = _logger.getEffectiveLevel() oldlevel = _logger.getEffectiveLevel()
...@@ -401,7 +399,6 @@ class TestEquilibrium(object): ...@@ -401,7 +399,6 @@ class TestEquilibrium(object):
max_use_ratio = 1. / len(g.nodes)) # each opt can only be applied once max_use_ratio = 1. / len(g.nodes)) # each opt can only be applied once
opt.optimize(g) opt.optimize(g)
finally: finally:
sys.stderr = oldstderr
_logger.setLevel(oldlevel) _logger.setLevel(oldlevel)
print 'after', g print 'after', g
assert str(g) == '[Op4(x, y)]' assert str(g) == '[Op4(x, y)]'
......
...@@ -353,6 +353,9 @@ def local_softmax_with_bias(node): ...@@ -353,6 +353,9 @@ def local_softmax_with_bias(node):
vectors = [] vectors = []
non_vectors = [] non_vectors = []
for x_in in x.owner.inputs: for x_in in x.owner.inputs:
print 'x_in =', x_in
print 'x_in.type =', x_in.type
print 'x_in.broadcastable =', x_in.broadcastable
if list(x_in.type.broadcastable) == [True, False]: if list(x_in.type.broadcastable) == [True, False]:
# print isinstance(x_in.owner.op, tensor.DimShuffle) # print isinstance(x_in.owner.op, tensor.DimShuffle)
#since specialization comes relatively late in optimization, #since specialization comes relatively late in optimization,
...@@ -1239,7 +1242,7 @@ def categorical_crossentropy(coding_dist, true_dist): ...@@ -1239,7 +1242,7 @@ def categorical_crossentropy(coding_dist, true_dist):
""" """
if true_dist.ndim == coding_dist.ndim: if true_dist.ndim == coding_dist.ndim:
return -theano.sum(true_dist * log(coding_dist), axis=coding_dist.ndim-1) return -tensor.sum(true_dist * tensor.log(coding_dist), axis=coding_dist.ndim-1)
elif true_dist.ndim == coding_dist.ndim - 1: elif true_dist.ndim == coding_dist.ndim - 1:
return crossentropy_categorical_1hot(coding_dist, true_dist) return crossentropy_categorical_1hot(coding_dist, true_dist)
else: else:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论