提交 b01f8a62 authored 作者: Frederic Bastien's avatar Frederic Bastien

remove print in test and pep8 fix.

上级 0a7cb330
...@@ -464,7 +464,7 @@ def grad(cost, wrt, consider_constant=None, ...@@ -464,7 +464,7 @@ def grad(cost, wrt, consider_constant=None,
known_grads = OrderedDict() known_grads = OrderedDict()
else: else:
m = "known_grads must be an OrderedDict. " m = "known_grads must be an OrderedDict. "
assert isinstance(known_grads, OrderedDict) or len(known_grads) <=1, m assert isinstance(known_grads, OrderedDict) or len(known_grads) <= 1, m
# The gradient of the cost is 1 unless specified otherwise by known_grads. # The gradient of the cost is 1 unless specified otherwise by known_grads.
if cost is not None: if cost is not None:
......
...@@ -472,7 +472,6 @@ def test_known_grads(): ...@@ -472,7 +472,6 @@ def test_known_grads():
true_grads = true_grads(*values) true_grads = true_grads(*values)
for layer in layers: for layer in layers:
print('Testing by separately computing ', layer)
first = theano.tensor.grad(cost, layer, disconnected_inputs='ignore') first = theano.tensor.grad(cost, layer, disconnected_inputs='ignore')
known = OrderedDict(izip(layer, first)) known = OrderedDict(izip(layer, first))
full = theano.tensor.grad(cost=None, known_grads=known, wrt=inputs, disconnected_inputs='ignore') full = theano.tensor.grad(cost=None, known_grads=known, wrt=inputs, disconnected_inputs='ignore')
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论