提交 c616a7ac authored 作者: David Warde-Farley's avatar David Warde-Farley 提交者: Arnaud Bergeron

Uncontroversial/necessary list() additions around zip().

上级 9733b595
......@@ -101,7 +101,7 @@ def fgraph_updated_vars(fgraph, expanded_inputs):
potential_values = list(fgraph.outputs) # copy the list
if len(expanded_inputs) != len(fgraph.inputs):
raise ValueError('expanded_inputs must match len(fgraph.inputs)')
for e_input, ivar in reversed(zip(expanded_inputs, fgraph.inputs)):
for e_input, ivar in reversed(list(zip(expanded_inputs, fgraph.inputs))):
if e_input.update is not None:
updated_vars[ivar] = potential_values.pop()
return updated_vars
......@@ -657,8 +657,8 @@ returned directly?"""
if getattr(self.fn, 'need_update_inputs', True):
# Update the inputs that have an update function
for input, storage in reversed(zip(self.maker.expanded_inputs,
self.input_storage)):
for input, storage in reversed(list(zip(self.maker.expanded_inputs,
self.input_storage))):
if input.update is not None:
storage.data = outputs.pop()
else:
......
......@@ -150,7 +150,7 @@ class SymbolicInputKit(object):
ret.sort()
if not ret:
return [[], []]
return zip(*ret)
return list(zip(*ret))
class In(SymbolicInput):
......
......@@ -7,7 +7,7 @@ import traceback
import numpy
import theano
from theano.compat import PY3
from theano.compat import PY3, izip
from theano.compat.six import reraise
from theano.compat.six.moves import StringIO
from theano.gof import utils
......@@ -354,7 +354,7 @@ class Linker(object):
% (takes, ['argument', 'arguments'][takes > 1], got)
if (len(args) != len(inputs)):
raise TypeError(e_arity(len(inputs), len(args)))
for arg, variable in zip(args, inputs):
for arg, variable in izip(args, inputs):
variable.data = arg
thunk()
if unpack_single:
......@@ -499,7 +499,7 @@ def map_storage(fgraph, order, input_storage, output_storage):
assert len(fgraph.inputs) == len(input_storage)
storage_map = {}
for r, storage in zip(fgraph.inputs, input_storage):
for r, storage in izip(fgraph.inputs, input_storage):
storage_map[r] = storage
# for orphan in fgraph.orphans:
# if not isinstance(orphan, Constant):
......@@ -508,7 +508,7 @@ def map_storage(fgraph, order, input_storage, output_storage):
if output_storage is not None:
assert len(fgraph.outputs) == len(output_storage)
for r, storage in zip(fgraph.outputs, output_storage):
for r, storage in izip(fgraph.outputs, output_storage):
storage_map[r] = storage
for node in order:
......@@ -565,8 +565,8 @@ def streamline(fgraph, thunks, order, post_thunk_old_storage=None,
for x in no_recycling:
x[0] = None
try:
for thunk, node, old_storage in zip(thunks, order,
post_thunk_old_storage):
for thunk, node, old_storage in izip(thunks, order,
post_thunk_old_storage):
thunk()
for old_s in old_storage:
old_s[0] = None
......@@ -574,13 +574,11 @@ def streamline(fgraph, thunks, order, post_thunk_old_storage=None,
raise_with_op(node, thunk)
f = streamline_default_f
elif nice_errors:
thunk_node_list = zip(thunks, order)
def streamline_nice_errors_f():
for x in no_recycling:
x[0] = None
try:
for thunk, node in thunk_node_list:
for thunk, node in izip(thunks, order):
thunk()
except Exception:
raise_with_op(node, thunk)
......@@ -743,9 +741,13 @@ class PerformLinker(LocalLinker):
add_clear_storage(f, computed, storage_map)
f.storage_map = storage_map
return f, [Container(input, storage) for input, storage in zip(fgraph.inputs, input_storage)], \
[Container(output, storage, True) for output, storage in zip(fgraph.outputs, output_storage)], \
thunks, order
return (f,
[Container(input, storage)
for input, storage in izip(fgraph.inputs, input_storage)],
[Container(output, storage, True)
for output, storage in izip(fgraph.outputs, output_storage)],
thunks,
order)
def add_clear_storage(f, computed, storage_map):
......@@ -864,11 +866,11 @@ class WrapLinker(Linker):
inputs0 = input_lists[0]
outputs0 = output_lists[0]
thunk_groups = zip(*thunk_lists)
thunk_groups = list(zip(*thunk_lists))
order = [x[0] for x in zip(*order_lists)]
to_reset = []
for thunks, node in zip(thunk_groups, order):
for thunks, node in izip(thunk_groups, order):
for j, output in enumerate(node.outputs):
if output in no_recycling:
for thunk in thunks:
......@@ -879,13 +881,13 @@ class WrapLinker(Linker):
def f():
for inputs in input_lists[1:]:
for input1, input2 in zip(inputs0, inputs):
for input1, input2 in izip(inputs0, inputs):
input2.storage[0] = copy(input1.storage[0])
for x in to_reset:
x[0] = None
pre(self, [input.data for input in input_lists[0]],
order, thunk_groups)
for i, (thunks, node) in enumerate(zip(thunk_groups, order)):
for i, (thunks, node) in enumerate(izip(thunk_groups, order)):
try:
wrapper(i, node, *thunks)
except Exception:
......
......@@ -253,7 +253,7 @@ class SeqOptimizer(Optimizer, list):
else:
ll.append((opt.name, opt.__class__.__name__,
opts.index(opt)))
lll = zip(prof, ll)
lll = list(zip(prof, ll))
def cmp(a, b):
if a[0] == b[0]:
......@@ -539,7 +539,7 @@ class MergeFeature(object):
continue
# Schedule transfer of clients from node to candidate
pairs = zip(node.outputs, candidate.outputs)
pairs = izip(node.outputs, candidate.outputs)
# transfer names
for node_output, cand_output in pairs:
......
......@@ -595,7 +595,7 @@ class CondMerge(gof.Optimizer):
old_outs += [proposal.outputs]
else:
old_outs += proposal.outputs
pairs = zip(old_outs, new_outs)
pairs = list(zip(old_outs, new_outs))
fgraph.replace_all_validate(pairs, reason='cond_merge')
......@@ -699,7 +699,7 @@ def cond_merge_random_op(main_node):
old_outs += [proposal.outputs]
else:
old_outs += proposal.outputs
pairs = zip(old_outs, new_outs)
pairs = list(zip(old_outs, new_outs))
main_outs = clone(main_node.outputs, replace=pairs)
return main_outs
......
......@@ -235,9 +235,9 @@ class PycudaElemwiseSourceModuleOp(GpuOp):
tuple([n + "[i]" for n in in_name]),
tuple(n + "[i]" for n in out_name), {})
c_code_param = ", ".join([_replace_npy_types(var.type.dtype_specs()[1]) + " *" + name
for var, name in (zip(inputs, in_name) +
zip(out_node.outputs,
out_name))] +
for var, name in (list(zip(inputs, in_name)) +
list(zip(out_node.outputs,
out_name)))] +
["int size"])
mod = SourceModule("""
__global__ void %s(%s)
......@@ -326,8 +326,8 @@ class PycudaElemwiseSourceModuleMakeThunkOp(Op):
tuple(n + "[i]" for n in out_name), {})
c_code_param = ", ".join([_replace_npy_types(var.type.dtype_specs()[1]) + " *" + name
for var, name in
zip(node.inputs, in_name) +
zip(node.outputs, out_name)] + ["int size"])
list(zip(node.inputs, in_name)) +
list(zip(node.outputs, out_name))] + ["int size"])
mod = SourceModule("""
__global__ void %s(%s)
{
......
......@@ -2284,8 +2284,8 @@ def gpuScanOptimization(node):
scan_outs = [safe_to_gpu(x) for x in thescan.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=zip(thescan.inputs,
[safe_to_cpu(x) for x in scan_ins]))
replace=list(zip(thescan.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about cuda ndarray and can not
# handle graphs with inputs being Cuda Ndarrays
......@@ -2330,8 +2330,8 @@ def gpuScanOptimization(node):
scan_outs = [safe_to_gpu(x) for x in thescan.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=zip(thescan.inputs,
[safe_to_cpu(x) for x in scan_ins]))
replace=list(zip(thescan.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about cuda ndarray and can not
......
......@@ -787,8 +787,8 @@ def local_scan_to_gpua(node):
scan_outs = [safe_to_gpu(x) for x in node.op.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=zip(node.op.inputs,
[safe_to_cpu(x) for x in scan_ins]))
replace=list(zip(node.op.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about the gpu and can not
......
......@@ -3293,11 +3293,9 @@ class Composite(ScalarOp):
def init_c_code(self):
"""Return the C code for this Composite Op. """
subd = dict(
zip(self.fgraph.inputs,
["%%(i%i)s" % i for i in xrange(len(self.fgraph.inputs))])
+ zip(self.fgraph.outputs,
["%%(o%i)s" % i for i in xrange(len(self.fgraph.outputs))]))
subd = dict(chain(
((e, "%%(i%i)s" % i) for i, e in enumerate(self.fgraph.inputs)),
((e, "%%(o%i)s" % i) for i, e in enumerate(self.fgraph.outputs))))
for var in self.fgraph.variables:
if var.owner is None:
......
......@@ -346,7 +346,7 @@ class PushOutNonSeqScan(gof.Optimizer):
**dict(return_list=True))[0].owner
fgraph.replace_all_validate_remove(
zip(node.outputs, nw_node.outputs),
list(zip(node.outputs, nw_node.outputs)),
remove=[node],
reason='scanOp_pushout_nonseqs_ops')
return True
......@@ -558,7 +558,7 @@ class PushOutSeqScan(gof.Optimizer):
**dict(return_list=True))[0].owner
fgraph.replace_all_validate_remove(
zip(node.outputs, nw_node.outputs),
list(zip(node.outputs, nw_node.outputs)),
remove=[node],
reason='scanOp_pushout_seqs_ops')
return True
......@@ -921,7 +921,7 @@ class PushOutScanOutput(gof.Optimizer):
new_scan_node.outputs[new_node_new_outputs_idx+nb_new_outs:])
fgraph.replace_all_validate_remove(
zip(old_scan_node.outputs, new_node_old_outputs),
list(zip(old_scan_node.outputs, new_node_old_outputs)),
remove=[old_scan_node],
reason='scanOp_pushout_output')
......@@ -981,7 +981,7 @@ class ScanInplaceOptimizer(Optimizer):
new_outs = new_op(*inputs, **dict(return_list=True))
try:
fgraph.replace_all_validate_remove(
zip(node.outputs, new_outs),
list(zip(node.outputs, new_outs)),
remove=[node],
reason='scanOp_make_inplace')
op = new_op
......@@ -1702,7 +1702,7 @@ class ScanMerge(gof.Optimizer):
if not isinstance(new_outs, (list, tuple)):
new_outs = [new_outs]
return zip(outer_outs, new_outs)
return list(zip(outer_outs, new_outs))
def belongs_to_set(self, node, set_nodes):
"""
......@@ -2126,10 +2126,10 @@ class PushOutDot1(gof.Optimizer):
new_out = tensor.dot(val, out_seq)
pos = node.outputs.index(outer_out)
old_new = zip(node.outputs[:pos], new_outs[:pos])
old_new = list(zip(node.outputs[:pos], new_outs[:pos]))
old = node.outputs[pos].clients[0][0].outputs[0]
old_new.append((old, new_out))
old_new += zip(node.outputs[pos+1:], new_outs[pos:])
old_new += list(zip(node.outputs[pos+1:], new_outs[pos:]))
fgraph.replace_all_validate_remove(
old_new, remove=[node], reason='scan_pushout_dot1')
......
......@@ -4479,7 +4479,7 @@ class ScanGpuTests:
# Compute the cost and take the gradient wrt params
cost = tensor.sum((l2_out - yout) ** 2)
grads = tensor.grad(cost, nparams)
updates = zip(nparams, [n - g for n, g in zip(nparams, grads)])
updates = list(zip(nparams, (n - g for n, g in zip(nparams, grads))))
# Compile the theano function
feval_backprop = theano.function([xin, yout], cost, updates=updates,
......
......@@ -1578,7 +1578,7 @@ class GemmOptimizer(Optimizer):
assert len(new_outputs) == len(node.outputs)
try:
fgraph.replace_all_validate_remove(
zip(node.outputs, new_outputs),
list(zip(node.outputs, new_outputs)),
[old_dot22],
reason='GemmOptimizer',
# For now we disable the warning as we know case
......
......@@ -787,7 +787,7 @@ class Elemwise(OpenMPOp):
super(Elemwise, self).perform(node, inputs, output_storage)
maxsize = max(len(input.shape) for input in inputs)
for dims in izip(*[zip(input.shape, sinput.type.broadcastable)
for dims in izip(*[list(zip(input.shape, sinput.type.broadcastable))
for input, sinput in zip(inputs, node.inputs)]):
if max(d for d, b in dims) != 1 and (1, False) in dims:
# yes there may be more compact ways to write this code,
......@@ -930,7 +930,7 @@ class Elemwise(OpenMPOp):
# assert that inames and inputs order stay consistent.
# This is to protect again futur change of uniq.
assert len(inames) == len(inputs)
ii, iii = zip(*gof.utils.uniq(zip(_inames, node.inputs)))
ii, iii = list(zip(*gof.utils.uniq(list(zip(_inames, node.inputs)))))
assert all([x == y for x, y in zip(ii, inames)])
assert all([x == y for x, y in zip(iii, inputs)])
......@@ -948,8 +948,9 @@ class Elemwise(OpenMPOp):
# These are the outputs that we will need to allocate
# (output, name, name of the c type), transposed
real = zip(*[(r, s, r.type.dtype_specs()[1])
for r, s in izip(node.outputs, onames) if r not in dmap])
real = list(zip(*[(r, s, r.type.dtype_specs()[1])
for r, s in izip(node.outputs, onames)
if r not in dmap]))
if real:
real_outputs, real_onames, real_odtypes = real
else:
......@@ -958,8 +959,9 @@ class Elemwise(OpenMPOp):
# Outputs that are aliased with an input (inplace)
# (output, name), transposed (c type name not needed since we don't
# need to allocate.
aliased = zip(*[(r, s)
for (r, s) in izip(node.outputs, onames) if r in dmap])
aliased = list(zip(*[(r, s)
for (r, s) in izip(node.outputs, onames)
if r in dmap]))
if aliased:
aliased_outputs, aliased_onames = aliased
else:
......@@ -985,7 +987,7 @@ class Elemwise(OpenMPOp):
# Check if all inputs (except broadcasted scalar) are fortran.
# In that case, create an fortran output ndarray.
z = zip(inames, inputs)
z = list(zip(inames, inputs))
alloc_fortran = ' && '.join(["PyArray_ISFORTRAN(%s)" % arr
for arr, var in z
if not all(var.broadcastable)])
......@@ -1156,7 +1158,7 @@ class Elemwise(OpenMPOp):
}
""" % locals()
if contig is not None:
z = zip(inames + onames, inputs + node.outputs)
z = list(zip(inames + onames, inputs + node.outputs))
cond1 = ' && '.join(["PyArray_ISCONTIGUOUS(%s)" % arr
for arr, var in z
if not all(var.broadcastable)])
......
......@@ -236,7 +236,7 @@ def make_loop(loop_orders, dtypes, loop_tasks, sub, openmp=None):
s = ""
for i, (pre_task, task), indices in reversed(zip(xrange(len(loop_tasks) - 1), loop_tasks, zip(*loop_orders))):
for i, (pre_task, task), indices in reversed(list(zip(xrange(len(loop_tasks) - 1), loop_tasks, list(zip(*loop_orders))))):
s = loop_over(preloops.get(i, "") + pre_task, s + task, indices, i)
s += loop_tasks[-1]
......@@ -521,7 +521,7 @@ def make_loop_careduce(loop_orders, dtypes, loop_tasks, sub):
s = preloops.get(0, "")
else:
s = ""
for i, (pre_task, task), indices in reversed(zip(xrange(len(loop_tasks) - 1), loop_tasks, zip(*loop_orders))):
for i, (pre_task, task), indices in reversed(list(zip(xrange(len(loop_tasks) - 1), loop_tasks, list(zip(*loop_orders))))):
s = loop_over(preloops.get(i, "") + pre_task, s + task, indices, i)
s += loop_tasks[-1]
......
......@@ -5776,7 +5776,7 @@ class FusionOptimizer(Optimizer):
assert len(new_outputs) == len(node.outputs)
try:
fgraph.replace_all_validate(
zip(node.outputs, new_outputs),
list(zip(node.outputs, new_outputs)),
reason=self.__class__.__name__)
did_something = True
nb_replacement += 1
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论