提交 475e8ad9 authored 作者: Frederic's avatar Frederic

pep8

上级 c3d4ad81
......@@ -78,10 +78,9 @@ def grad_not_implemented(op, x_pos, x, comment=""):
gradient is not implemented.
"""
return (NullType(
(
"This variable is Null because the grad method for "
"input %s (%s) of the %s op is not implemented. %s"
return (NullType((
"This variable is Null because the grad method for "
"input %s (%s) of the %s op is not implemented. %s"
) % (x_pos, x, op, comment)))()
......@@ -341,8 +340,8 @@ def Lop(f, wrt, eval_points, consider_constant=None,
known = dict(izip(f, grads))
ret = grad(cost=None, known_grads=known,
consider_constant=consider_constant, wrt=wrt,
disconnected_inputs=disconnected_inputs)
consider_constant=consider_constant, wrt=wrt,
disconnected_inputs=disconnected_inputs)
return format_as(using_list, using_tuple, ret)
......@@ -352,8 +351,8 @@ def Lop(f, wrt, eval_points, consider_constant=None,
#########################
def grad(cost, wrt, consider_constant=None,
disconnected_inputs='raise', add_names=True,
known_grads=None, return_disconnected='zero'):
disconnected_inputs='raise', add_names=True,
known_grads=None, return_disconnected='zero'):
"""
:type cost: Scalar (0-dimensional) Variable.
May optionally be None if known_grads is provided.
......@@ -406,17 +405,16 @@ def grad(cost, wrt, consider_constant=None,
if cost is not None and isinstance(cost.type, NullType):
raise ValueError("Can't differentiate a NaN cost."
"cost is NaN because " + \
cost.type.why_null)
"cost is NaN because " +
cost.type.why_null)
if cost is not None and cost.ndim != 0:
raise TypeError("cost must be a scalar.")
if isinstance(wrt, set):
raise TypeError("wrt must not be a set. sets have no defined "
"iteration order, so we can't return gradients in a matching"
" order.")
"iteration order, so we can't return gradients in a"
" matching order.")
using_list = isinstance(wrt, list)
using_tuple = isinstance(wrt, tuple)
......@@ -426,7 +424,7 @@ def grad(cost, wrt, consider_constant=None,
for elem in wrt:
if not isinstance(elem, Variable):
raise TypeError("Expected Variable, got " + str(elem) +
" of type "+str(type(elem)))
" of type " + str(type(elem)))
outputs = []
if cost is not None:
......@@ -435,7 +433,7 @@ def grad(cost, wrt, consider_constant=None,
outputs.extend(known_grads.keys())
var_to_app_to_idx = _populate_var_to_app_to_idx(
outputs, wrt, consider_constant)
outputs, wrt, consider_constant)
# build a dict mapping var to the gradient of cost with respect to var
grad_dict = OrderedDict()
......@@ -452,7 +450,8 @@ def grad(cost, wrt, consider_constant=None,
# g_cost may be Disconnected or NullType. A creative use of the function,
# sure, but nonetheless one we can and should support. So before we try
# to cast it make sure it even has a dtype
if hasattr(g_cost.type, 'dtype') and cost.type.dtype not in tensor.discrete_dtypes:
if (hasattr(g_cost.type, 'dtype') and
cost.type.dtype not in tensor.discrete_dtypes):
# Here we enforce the constraint that floating point variables have
# the same dtype as their gradient.
g_cost = g_cost.astype(cost.type.dtype)
......@@ -471,8 +470,8 @@ def grad(cost, wrt, consider_constant=None,
'Ambiguous whether %s should be made into tensor'
' or sparse theano variable' % str(type(g_var)))
if not isinstance(g_var.type, (NullType, DisconnectedType)) and 'float' \
not in str(g_var.type.dtype):
if (not isinstance(g_var.type, (NullType, DisconnectedType)) and
'float' not in str(g_var.type.dtype)):
raise TypeError("Gradients must always be NullType, "
"DisconnectedType, or continuous, but grad was "
"given a known_grad of type "+str(g_var.type))
......@@ -728,11 +727,13 @@ def _populate_var_to_app_to_idx(outputs, wrt, consider_constant):
return var_to_app_to_idx
class NullTypeGradError(TypeError):
"""
Raised when grad encounters a NullType.
"""
class DisconnectedInputError(ValueError):
"""
Raised when grad is asked to compute the gradient
......@@ -740,8 +741,9 @@ class DisconnectedInputError(ValueError):
disconnected_inputs='raise'.
"""
def _populate_grad_dict(var_to_app_to_idx,
grad_dict, wrt, cost_name=None):
grad_dict, wrt, cost_name=None):
"""
Helper function for grad function.
......@@ -783,7 +785,7 @@ def _populate_grad_dict(var_to_app_to_idx,
# list of bools indicating if each output is connected to the cost
outputs_connected = [not isinstance(g.type, DisconnectedType)
for g in output_grads]
for g in output_grads]
connection_pattern = _node_to_pattern(node)
......@@ -840,7 +842,7 @@ def _populate_grad_dict(var_to_app_to_idx,
# each destroyed input.
try:
dinputs = [node.inputs[x[0]] for x in
node.op.destroy_map.values()]
node.op.destroy_map.values()]
except AttributeError:
dinputs = []
......@@ -899,11 +901,11 @@ def _populate_grad_dict(var_to_app_to_idx,
if input_grads is None:
raise TypeError("%s.grad returned NoneType, "
"expected iterable." % str(node.op))
"expected iterable." % str(node.op))
if len(input_grads) != len(inputs):
raise ValueError(("%s returned the wrong number of" +\
" gradient terms.") % str(node.op))
raise ValueError(("%s returned the wrong number of" +
" gradient terms.") % str(node.op))
# must convert to list in case the op returns a tuple
# we won't be able to post-process out the Nones if it does that
......@@ -926,7 +928,7 @@ def _populate_grad_dict(var_to_app_to_idx,
# used to mean undefined, zero, or disconnected.
# We therefore don't allow it because its usage has become
# so muddied.
raise TypeError(('%s.grad returned None for' +\
raise TypeError(('%s.grad returned None for' +
' a gradient term, '
'this is prohibited. Instead of None,'
'return zeros_like(input), DisconnectedType()(),'
......@@ -964,7 +966,7 @@ def _populate_grad_dict(var_to_app_to_idx,
msg += "verifiably zeros."
msg = msg % (str(node.op), str(term),
str(type(term)), i)
str(type(term)), i)
if is_zero == 'no':
msg = "%s.grad returned %s of type %s for input"
......@@ -980,8 +982,8 @@ def _populate_grad_dict(var_to_app_to_idx,
#Check that op.connection_pattern matches the connectivity
#logic driving the op.grad method
for i, packed in \
enumerate(zip(inputs, input_grads, inputs_connected)):
for i, packed in enumerate(zip(inputs, input_grads,
inputs_connected)):
ipt, ig, connected = packed
actually_connected = \
not isinstance(ig.type, DisconnectedType)
......@@ -1027,11 +1029,11 @@ def _populate_grad_dict(var_to_app_to_idx,
if not isinstance(term, gof.Variable):
raise TypeError("%s.grad returned %s, expected"
" Variable instance." % (str(node.op),
type(term)))
type(term)))
if isinstance(term.type, NullType):
raise NullTypeGradError("tensor.grad "
"encountered a NaN. " +\
"encountered a NaN. " +
term.type.why_null)
#Don't try to sum up DisconnectedType placeholders
......@@ -1121,9 +1123,9 @@ class numeric_grad(object):
# For now, we use a heuristic that catches very bad gradients, but is not
# perfectly accurate.
type_eps = {'float64': 1e-7,
'float32': 3e-4,
numpy.dtype('float64'): 1e-7,
numpy.dtype('float32'): 3e-4}
'float32': 3e-4,
numpy.dtype('float64'): 1e-7,
numpy.dtype('float32'): 3e-4}
def __init__(self, f, pt, eps=None, out_type=None):
"""Return the gradient of f at pt.
......@@ -1243,15 +1245,13 @@ class numeric_grad(object):
"""
if len(g_pt) != len(self.gf):
raise ValueError(
'argument has wrong number of elements',
len(g_pt))
raise ValueError('argument has wrong number of elements',
len(g_pt))
errs = []
for i, (a, b) in enumerate(zip(g_pt, self.gf)):
if a.shape != b.shape:
raise ValueError(
'argument element %i has wrong shape %s' % (
i, str((a.shape, b.shape))))
raise ValueError('argument element %i has wrong shape %s' % (
i, str((a.shape, b.shape))))
errs.append(numeric_grad.abs_rel_err(a, b))
return errs
......@@ -1368,11 +1368,12 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None,
def function(inputs, output):
if mode is None:
f = compile.function(inputs, output, accept_inplace=True,
allow_input_downcast=True, on_unused_input='ignore')
allow_input_downcast=True,
on_unused_input='ignore')
else:
f = compile.function(inputs, output, accept_inplace=True,
allow_input_downcast=True, mode=mode,
on_unused_input='ignore')
allow_input_downcast=True, mode=mode,
on_unused_input='ignore')
return f
tensor_pt = [TensorType(
......@@ -1525,9 +1526,9 @@ def jacobian(expression, wrt, consider_constant=None,
rvals = []
for inp in args[2:]:
rval = grad(expr[idx],
inp,
consider_constant=consider_constant,
disconnected_inputs=disconnected_inputs)
inp,
consider_constant=consider_constant,
disconnected_inputs=disconnected_inputs)
rvals.append(rval)
return rvals
# Computing the gradients does not affect the random seeds on any random
......@@ -1535,8 +1536,8 @@ def jacobian(expression, wrt, consider_constant=None,
# just backtracking over old values. (rp Jan 2012 - if anyone has a
# counter example please show me)
jacobs, updates = theano.scan(inner_function,
sequences=arange(expression.shape[0]),
non_sequences=[expression] + wrt)
sequences=arange(expression.shape[0]),
non_sequences=[expression] + wrt)
assert not updates, \
("Scan has returned a list of updates. This should not "
"happen! Report this to theano-users (also include the "
......@@ -1545,7 +1546,7 @@ def jacobian(expression, wrt, consider_constant=None,
def hessian(cost, wrt, consider_constant=None,
disconnected_inputs='raise'):
disconnected_inputs='raise'):
"""
:type cost: Scalar (0-dimensional) Variable.
:type wrt: Vector (1-dimensional tensor) 'Variable' or list of
......
......@@ -204,8 +204,8 @@ def test_rop_lop():
rop_f = function([mx, mv], yv)
sy, _ = theano.scan(lambda i, y, x, v: (tensor.grad(y[i], x) * v).sum(),
sequences=tensor.arange(y.shape[0]),
non_sequences=[y, mx, mv])
sequences=tensor.arange(y.shape[0]),
non_sequences=[y, mx, mv])
scan_f = function([mx, mv], sy)
rng = numpy.random.RandomState(utt.fetch_seed())
......@@ -561,6 +561,7 @@ class test_Eigh(test_Eig):
class test_Eigh_float32(test_Eigh):
dtype = 'float32'
def test_matrix_inverse_solve():
if not imported_scipy:
raise SkipTest("Scipy needed for the Solve op.")
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论