提交 113ba725 authored 作者: David Warde-Farley's avatar David Warde-Farley

Fix mutable default arguments. This is a bug waiting to happen.

上级 ffb29872
...@@ -117,7 +117,7 @@ def Rop(f, wrt, eval_points): ...@@ -117,7 +117,7 @@ def Rop(f, wrt, eval_points):
return rval return rval
def Lop(f, wrt, eval_points, consider_constant=[], warn_type=False, def Lop(f, wrt, eval_points, consider_constant=None, warn_type=False,
disconnected_inputs='raise'): disconnected_inputs='raise'):
""" """
Computes the L operation on `f` wrt to `wrt` evaluated at points given Computes the L operation on `f` wrt to `wrt` evaluated at points given
...@@ -140,6 +140,8 @@ def Lop(f, wrt, eval_points, consider_constant=[], warn_type=False, ...@@ -140,6 +140,8 @@ def Lop(f, wrt, eval_points, consider_constant=[], warn_type=False,
indices that specify both the position within a list and all indices that specify both the position within a list and all
coordinates of the tensor element in the last coordinates of the tensor element in the last
""" """
if consider_constant is None:
consider_constant = []
if not isinstance(f, TensorVariable): if not isinstance(f, TensorVariable):
raise TypeError('In tensor.Lop(), cost argument should be a TensorVariable.', f) raise TypeError('In tensor.Lop(), cost argument should be a TensorVariable.', f)
...@@ -155,7 +157,6 @@ def Lop(f, wrt, eval_points, consider_constant=[], warn_type=False, ...@@ -155,7 +157,6 @@ def Lop(f, wrt, eval_points, consider_constant=[], warn_type=False,
list(inputs) + list(consider_constant), list(inputs) + list(consider_constant),
warn_type=warn_type) warn_type=warn_type)
# Note : If p is not in gmap there can be several reasons, among which # Note : If p is not in gmap there can be several reasons, among which
# is the fact that p might not be part of the computational graph. A # is the fact that p might not be part of the computational graph. A
# simple example is that for a+b for e.g. a[0] is not part of the graph, # simple example is that for a+b for e.g. a[0] is not part of the graph,
...@@ -196,7 +197,7 @@ def Lop(f, wrt, eval_points, consider_constant=[], warn_type=False, ...@@ -196,7 +197,7 @@ def Lop(f, wrt, eval_points, consider_constant=[], warn_type=False,
# Gradient # Gradient
######################### #########################
def grad(cost, wrt, g_cost=None, consider_constant=[], warn_type=False, def grad(cost, wrt, g_cost=None, consider_constant=None, warn_type=False,
disconnected_inputs='raise'): disconnected_inputs='raise'):
""" """
:type cost: Scalar (0-dimensional) `Variable` :type cost: Scalar (0-dimensional) `Variable`
...@@ -228,6 +229,9 @@ def grad(cost, wrt, g_cost=None, consider_constant=[], warn_type=False, ...@@ -228,6 +229,9 @@ def grad(cost, wrt, g_cost=None, consider_constant=[], warn_type=False,
`theano.gradient.grad_sources_inputs``. `theano.gradient.grad_sources_inputs``.
""" """
if consider_constant is None:
consider_constant = []
if not isinstance(cost, TensorVariable): if not isinstance(cost, TensorVariable):
raise TypeError('In tensor.grad(), cost argument should be a TensorVariable.', cost) raise TypeError('In tensor.grad(), cost argument should be a TensorVariable.', cost)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论