提交 af0df9ce authored 作者: David Warde-Farley's avatar David Warde-Farley 提交者: Arnaud Bergeron

dict and dict method related stuff.

上级 37714c93
......@@ -98,7 +98,7 @@ class DefaultOrderedDict(OrderedDict):
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
return type(self), args, None, None, list(self.items())
def copy(self):
return self.__copy__()
......
......@@ -111,7 +111,7 @@ if sys.version_info[:2] < (2, 7):
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def copy(self):
return self.__class__(self)
......
......@@ -10,7 +10,7 @@ import sys
import gc
import logging
import six.moves.copyreg as copyreg
from itertools import product as itertools_product
from itertools import chain, product as itertools_product
from theano.compat import izip
import numpy
......@@ -18,7 +18,7 @@ import numpy
import theano
from theano import gof
from theano.compat import get_unbound_function
from six import string_types, iteritems
from six import string_types, iteritems, itervalues
from six.moves import StringIO, xrange
from theano.gof import (FunctionGraph, graph, utils, link,
ops_with_inner_function)
......@@ -917,7 +917,7 @@ def _check_viewmap(node, storage_map):
for key, val in iteritems(good_alias):
bad_alias.pop(key, None)
if bad_alias:
raise BadViewMap(node, oi, outstorage, bad_alias.values())
raise BadViewMap(node, oi, outstorage, list(bad_alias.values()))
# if its not aliased to input, check output->output aliasing
if not good_alias and _is_used_in_graph(onode):
......@@ -1391,7 +1391,7 @@ def _check_preallocated_output(node, thunk, prealloc_modes, def_val,
dmap = getattr(node.op, 'destroy_map', {})
vmap = getattr(node.op, 'view_map', {})
for i, r in enumerate(node.inputs):
if any(i in v for v in (dmap.values() + vmap.values())):
if any(i in v for v in chain(itervalues(dmap, itervalues(vmap)))):
aliased_inputs.add(r)
_logger.debug('starting preallocated output checking')
......@@ -1803,7 +1803,7 @@ class _Linker(gof.link.LocalLinker):
# use new memory storage when it is needed, in particular for the
# function's outputs. no_recycling_map will be used in f() below.
if self.no_recycling is True:
no_recycling_map = storage_map.values()
no_recycling_map = list(storage_map.values())
no_recycling_map = utils.difference(no_recycling_map,
input_storage)
else:
......@@ -2011,8 +2011,8 @@ class _Linker(gof.link.LocalLinker):
# as viewd are unsafe too, because the
# corresponding output can be
# destroyed.
if any(i in v for v in (dmap.values() +
vmap.values())):
if any(i in v for v in chain(dmap.values(),
vmap.values())):
storage_map[r][0] = _lessbroken_deepcopy(
r_vals[r])
......
......@@ -212,7 +212,7 @@ def function(inputs, outputs=None, mode=None, updates=None, givens=None,
CVM stands for C Virtual Machine.
"""
if isinstance(outputs, dict):
output_items = outputs.items()
output_items = list(outputs.items())
for item_pair in output_items:
assert isinstance(item_pair[0], string_types)
......
......@@ -3,10 +3,11 @@
from __future__ import print_function
import copy
from six import iteritems
from six import string_types, iteritems
from six.moves import xrange
import six.moves.copyreg as copyreg
import six.moves.cPickle as pickle
import itertools
from itertools import chain
import time
import warnings
import numpy
......@@ -15,8 +16,6 @@ import theano
from theano import gof
from functools import partial
from theano.compat import izip
from six import string_types
from six.moves import xrange
import theano.compile.mode
from theano.compile.io import (
In, SymbolicInput, SymbolicInputKit, SymbolicOutput)
......@@ -65,7 +64,7 @@ def view_tree_set(v, treeset):
continue
vmap = getattr(cl.op, 'view_map', {})
dmap = getattr(cl.op, 'destroy_map', {})
for opos, iposlist in vmap.items() + dmap.items():
for opos, iposlist in chain(iteritems(vmap), iteritems(dmap)):
if v_input_pos_to_cl in iposlist:
if cl.outputs[opos] not in treeset:
view_tree_set(cl.outputs[opos], treeset)
......@@ -476,7 +475,7 @@ returned directly?"""
self.n_returned_outputs -= 1
for node in self.maker.fgraph.apply_nodes:
if node.op in ops_with_inner_function.keys():
if node.op in ops_with_inner_function:
self.nodes_with_inner_function.append(node.op)
def __contains__(self, item):
......@@ -715,7 +714,7 @@ returned directly?"""
# 1.no allow_gc return False
# 2.has allow_gc, if allow_gc is False, return True
if not getattr(self.fn, 'allow_gc', True):
for key in self.fn.storage_map.keys():
for key in self.fn.storage_map:
if not isinstance(key, theano.gof.Constant):
self.fn.storage_map[key][0] = None
......@@ -1231,7 +1230,8 @@ class FunctionMaker(object):
if not hasattr(linker, 'accept'):
raise ValueError("'linker' parameter of FunctionMaker should be "
"a Linker with an accept method or one of %s" %
theano.compile.mode.predefined_linkers.keys())
list(theano.compile.mode
.predefined_linkers.keys()))
# the 'no_borrow' outputs are the ones for which that we can't
# return the internal storage pointer.
......
......@@ -74,7 +74,7 @@ class ViewOp(gof.Op):
version = []
# If any of the c code is unversionned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, v) in sorted(self.c_code_and_version.items(),
for t, (c, v) in sorted(iteritems(self.c_code_and_version),
key=lambda pair: str(pair[0])):
if not v:
warnings.warn("Type %s has C code for ViewOp, but it has no "
......@@ -169,7 +169,7 @@ class DeepCopyOp(gof.Op):
version = []
# If any of the c code is unversionned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, v) in sorted(self.c_code_and_version.items(),
for t, (c, v) in sorted(iteritems(self.c_code_and_version),
key=lambda pair: str(pair[0])):
if not v:
warnings.warn("Type %s has C code for DeepCopyOp, but it has "
......@@ -290,7 +290,7 @@ class Shape(gof.Op):
version = []
# If any of the c code is unversionned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, v) in sorted(self.c_code_and_version.items(),
for t, (c, v) in sorted(iteritems(self.c_code_and_version),
key=lambda pair: str(pair[0])):
if not v:
warnings.warn("Type %s has C code for Shape, but it has no "
......@@ -362,7 +362,7 @@ class Shape_i(gof.Op):
version = []
# If any of the c code is unversionned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, ci, v) in sorted(self.c_code_and_version.items(),
for t, (c, ci, v) in sorted(iteritems(self.c_code_and_version),
key=lambda pair: str(pair[0])):
if not v:
warnings.warn("Type %s has C code for Shape_i, but it has "
......@@ -710,7 +710,7 @@ class Rebroadcast(gof.Op):
version = []
# If any of the c code is unversionned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, v) in sorted(self.c_code_and_version.items(),
for t, (c, v) in sorted(iteritems(self.c_code_and_version),
key=lambda pair: str(pair[0])):
if not v:
warnings.warn("Type %s has C code for Rebroadcast, but it "
......@@ -853,7 +853,7 @@ class SpecifyShape(gof.Op):
version = []
# If any of the c code is unversionned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, v, _) in sorted(self.c_code_and_version.items(),
for t, (c, v, _) in sorted(iteritems(self.c_code_and_version),
key=lambda pair: str(pair[0])):
if not v:
warnings.warn("Type %s has C code for SpecifyShape, but it "
......
......@@ -139,7 +139,7 @@ def rebuild_collect_shared(outputs,
if replace is None:
replace = []
try:
replace_pairs = replace.items()
replace_pairs = list(replace.items())
except Exception:
replace_pairs = replace
......@@ -473,7 +473,7 @@ def pfunc(params, outputs=None, mode=None, updates=None, givens=None,
# this typically does nothing, contrary to what one may expect.
in_var_set = set(in_variables)
try:
givens_pairs = givens.items()
givens_pairs = list(givens.items())
except AttributeError:
givens_pairs = givens
for x, y in givens_pairs:
......
......@@ -7,7 +7,7 @@ import warnings
import theano
from theano.gof.link import WrapLinker
from six import string_types, iteritems
from six import string_types, iteritems, itervalues
from theano.compile.mode import (Mode, register_mode,
predefined_modes, predefined_linkers,
predefined_optimizers)
......@@ -129,7 +129,7 @@ class ProfileMode(Mode):
def __get_local_time(self):
rval = 0
for ps in self.profile_stats.values():
for ps in itervalues(self.profile_stats):
rval += sum(ps.apply_time.values())
return rval
local_time = property(__get_local_time)
......@@ -244,27 +244,27 @@ class ProfileMode(Mode):
in self.profile_stats.values()])
fct_call = dict([(fn, ps.fct_callcount)
for (fn, ps) in self.profile_stats.items()])
for (fn, ps) in iteritems(self.profile_stats)])
fct_call_time = dict([(fn, ps.fct_call_time)
for (fn, ps) in self.profile_stats.items()])
for (fn, ps) in iteritems(self.profile_stats)])
apply_time = {}
for fn, ps in self.profile_stats.items():
for fn, ps in iteritems(self.profile_stats):
for (i, node) in enumerate(fn.maker.fgraph.toposort()):
apply_time[(i, node)] = ps.apply_time[node]
for (i, n), t in apply_time.items():
for (i, n), t in iteritems(apply_time):
if t == 0:
print(i, n)
apply_cimpl = {}
for fn, ps in self.profile_stats.items():
for ps in itervalues(self.profile_stats):
apply_cimpl.update(ps.apply_cimpl)
message = self.message
variable_shape = {}
for fn, ps in self.profile_stats.items():
for ps in itervalues(self.profile_stats):
variable_shape.update(ps.variable_shape)
other_time = dict(
......@@ -297,13 +297,13 @@ class ProfileMode(Mode):
def diff_dict(a_time, b_time_):
r = {}
b_time = copy.copy(b_time_)
for a, ta in a_time.items():
for a, ta in iteritems(a_time):
r.setdefault(a, 0)
tb = b_time.pop(a, 0)
r[a] += ta - tb
# they are missing in a
for a, t in b_time.items():
for a, t in iteritems(b_time):
r.setdefault(a, 0)
r[a] += t
return r
......@@ -399,7 +399,7 @@ class ProfileMode(Mode):
print('Theano fct summary:')
print('<% total fct time> <total time> <time per call> <nb call> '
'<fct name>')
for key in fct_call.keys():
for key in fct_call:
if fct_call[key] > 0:
print(' %4.1f%% %.3fs %.2es %d %s' %
(fct_call_time[key]/total_fct_time*100,
......@@ -416,14 +416,14 @@ class ProfileMode(Mode):
op_apply = {}
op_cimpl = {}
sop_apply = {}
for (i, a), t in apply_time.items():
for (i, a), t in iteritems(apply_time):
op = a.op
op_time.setdefault(op, 0)
op_call.setdefault(op, 0)
op_apply.setdefault(op, 0)
sop_apply.setdefault(type(a.op), 0)
op_time[op] += t
nb_call = [v for k, v in fct_call.items()
nb_call = [v for k, v in iteritems(fct_call)
if k.maker.fgraph is a.fgraph][0]
op_cimpl.setdefault(a.op, True)
op_cimpl[a.op] = op_cimpl[a.op] and apply_cimpl.get(a, False)
......@@ -440,7 +440,7 @@ class ProfileMode(Mode):
sop_op = {}
# map each op class to Bool. True iff all applies were done in c.
sop_cimpl = {}
for a, t in op_time.items():
for a, t in iteritems(op_time):
typ = type(a)
sop_time.setdefault(typ, 0)
sop_time[typ] += t
......@@ -457,7 +457,7 @@ class ProfileMode(Mode):
'<self seconds> <cumulative seconds> <time per call> [*] '
'<nb_call> <nb_op> <nb_apply> <Op name>')
sotimes = [(t*100/local_time, t, a, sop_cimpl[a], sop_call[a],
sop_op[a], sop_apply[a]) for a, t in sop_time.items()]
sop_op[a], sop_apply[a]) for a, t in iteritems(sop_time)]
sotimes.sort()
sotimes.reverse()
tot = 0
......@@ -484,7 +484,7 @@ class ProfileMode(Mode):
# The summary per op
op_flops = {}
for a, t in op_time.items():
for a, t in iteritems(op_time):
if hasattr(a, 'flops'):
op_flops[a] = a.flops*op_call[a]/t/1e6
flops_msg = ''
......@@ -502,7 +502,7 @@ class ProfileMode(Mode):
otimes = [(t*100/local_time, t, a, op_cimpl.get(a, 0),
op_call.get(a, 0), op_apply.get(a, 0))
for a, t in op_time.items()]
for a, t in iteritems(op_time)]
otimes.sort()
otimes.reverse()
tot = 0
......@@ -541,9 +541,9 @@ class ProfileMode(Mode):
'<apply time> <cumulative seconds> <time per call> [*] '
'<nb_call> <Apply position> <Apply Op name>')
atimes = [(t*100/local_time, t, a,
[v for k, v in fct_call.items()
[v for k, v in iteritems(fct_call)
if k.maker.fgraph is a[1].fgraph][0])
for a, t in apply_time.items()]
for a, t in iteritems(apply_time)]
atimes.sort()
atimes.reverse()
tot = 0
......
......@@ -253,7 +253,7 @@ class ProfileStats(object):
"""dict op -> total time on thunks"""
# timing is stored by node, we compute timing by class on demand
rval = {}
for node, t in self.apply_time.items():
for node, t in iteritems(self.apply_time):
typ = type(node.op)
rval.setdefault(typ, 0)
rval[typ] += t
......@@ -263,7 +263,7 @@ class ProfileStats(object):
"""dict op -> total number of thunk calls"""
# timing is stored by node, we compute timing by class on demand
rval = {}
for node, count in self.apply_callcount.items():
for node, count in iteritems(self.apply_callcount):
typ = type(node.op)
rval.setdefault(typ, 0)
rval[typ] += count
......@@ -273,7 +273,7 @@ class ProfileStats(object):
"""dict op -> total number of nodes"""
# timing is stored by node, we compute timing by class on demand
rval = {}
for node, count in self.apply_callcount.items():
for node, count in iteritems(self.apply_callcount):
typ = type(node.op)
rval.setdefault(typ, 0)
rval[typ] += 1
......@@ -298,7 +298,7 @@ class ProfileStats(object):
"""dict op -> total time on thunks"""
# timing is stored by node, we compute timing by Op on demand
rval = {}
for node, t in self.apply_time.items():
for node, t in iteritems(self.apply_time):
rval.setdefault(node.op, 0)
rval[node.op] += t
return rval
......@@ -308,8 +308,8 @@ class ProfileStats(object):
# timing is stored by node, we compute total time on demand
total = self.apply_time[node]
for parent in node.get_parents():
if parent.owner in self.apply_time.keys():
if parent.owner not in total_times.keys():
if parent.owner in self.apply_time:
if parent.owner not in total_times:
self.fill_node_total_time(parent.owner, total_times)
total += total_times[parent.owner]
total_times[node] = total
......@@ -317,7 +317,7 @@ class ProfileStats(object):
def compute_total_times(self):
"""dict op -> total time icluding the time for parents"""
rval = {}
for node in self.apply_time.keys():
for node in self.apply_time:
if node not in rval:
self.fill_node_total_time(node, rval)
return rval
......@@ -326,7 +326,7 @@ class ProfileStats(object):
"""dict op -> total number of thunk calls"""
# timing is stored by node, we compute timing by Op on demand
rval = {}
for node, count in self.apply_callcount.items():
for node, count in iteritems(self.apply_callcount):
rval.setdefault(node.op, 0)
rval[node.op] += count
return rval
......@@ -335,7 +335,7 @@ class ProfileStats(object):
"""dict op -> total number of nodes"""
# timing is stored by node, we compute timing by Op on demand
rval = {}
for node, count in self.apply_callcount.items():
for node, count in iteritems(self.apply_callcount):
rval.setdefault(node.op, 0)
rval[node.op] += 1
return rval
......@@ -372,7 +372,7 @@ class ProfileStats(object):
class_impl.get(clas, ' '),
class_call.get(clas, 0),
class_apply.get(clas, 0))
for clas, t in class_time.items()]
for clas, t in iteritems(class_time)]
otimes.sort(key=lambda t: (t[1], t[4], t[5]), reverse=True)
tot = 0
print('Class', file=file)
......@@ -454,7 +454,7 @@ class ProfileStats(object):
op_impl.get(op, ' '),
op_call.get(op, 0),
op_apply.get(op, 0))
for op, t in op_time.items()]
for op, t in iteritems(op_time)]
otimes.sort(key=lambda t: (t[1], t[4], t[5]), reverse=True)
tot = 0
print('Ops', file=file)
......@@ -564,7 +564,7 @@ class ProfileStats(object):
topos = {} # Only do the topo once per fct.
atimes = []
for a, t in self.apply_time.items():
for a, t in iteritems(self.apply_time):
if a.fgraph not in topos:
topo = a.fgraph.toposort()
topos[a.fgraph] = topo
......@@ -664,14 +664,14 @@ class ProfileStats(object):
var_mem = {} # varible->size in bytes; don't include input variables
node_mem = {} # node->total outputs size (only dense outputs)
for node in self.apply_callcount.keys():
for node in self.apply_callcount:
fct_memory.setdefault(node.fgraph, {})
fct_memory[node.fgraph].setdefault(node, [])
fct_shapes.setdefault(node.fgraph, {})
fct_shapes[node.fgraph].setdefault(node, [])
sum_dense = 0
for out in node.outputs:
if out in self.variable_shape.keys():
if out in self.variable_shape:
sh = self.variable_shape[out]
if hasattr(out.type, 'get_size'):
v = out.type.get_size(sh)
......@@ -1033,8 +1033,8 @@ class ProfileStats(object):
for fgraph, nodes_mem in iteritems(fct_memory):
# Sum of the size of all variables in bytes
sum_size = sum([sum([v for v in val if not isinstance(v, str)])
for key, val in iteritems(nodes_mem)])
sum_size = sum(sum(v for v in val if not isinstance(v, str))
for key, val in iteritems(nodes_mem))
order = fgraph.toposort()
# A list of intermediate variable that are not need
......@@ -1181,7 +1181,7 @@ class ProfileStats(object):
" <created/inplace/view>"
" <Apply node>", file=file)
print("", file=file)
items = node_mem.items()
items = list(node_mem.items())
items.sort(key=lambda a: a[1], reverse=True)
for idx, (node, node_outputs_size) in enumerate(items[:N]):
code = ['c'] * len(node.outputs)
......@@ -1273,7 +1273,7 @@ if False: # old code still to be ported from ProfileMode
sop_op = {}
# map each op class to Bool. True iff all applies were done in c.
sop_c = {}
for a, t in op_time.items():
for a, t in iteritems(op_time):
typ = type(a)
sop_time.setdefault(typ, 0)
sop_time[typ] += t
......@@ -1284,7 +1284,7 @@ if False: # old code still to be ported from ProfileMode
sop_call[typ] = sop_call.get(typ, 0) + op_call[a]
print('\nSingle Op-wise summary: <% of local_time spent on this kind of Op> <cumulative %%> <self seconds> <cumulative seconds> <time per call> <nb_call> <nb_op> <nb_op> <Op name>')
sotimes = [(t * 100 / local_time, t, a, sop_c[a],
sop_call[a], sop_op[a]) for a, t in sop_time.items()]
sop_call[a], sop_op[a]) for a, t in iteritems(sop_time)]
sotimes.sort(key=lambda t: (t[1], t[4], t[5]), reverse=True)
tot = 0
for f, t, a, ci, nb_call, nb_op in sotimes[:n_ops_to_print]:
......@@ -1311,7 +1311,7 @@ if False: # old code still to be ported from ProfileMode
other_time = total_time - total_fct_time - compile_time
print()
print('Theano fct summary: <% total fct time> <total time> <time per call> <nb call> <fct name>')
for key in fct_call.keys():
for key in fct_call:
if fct_call[key] > 0:
print(' %4.1f%% %.3fs %.2es %d %s' % (
fct_call_time[key] / total_fct_time * 100,
......@@ -1344,7 +1344,7 @@ if False: # old code still to be ported from ProfileMode
print()
print("List of apply that don't have float64 as input but have float64 in outputs. Usefull to know if we forgot some cast when using floatX=float32 or gpu code.")
print('<Apply> <Apply position> <fct name> <inputs type> <outputs type>')
for fct in fct_call.keys():
for fct in fct_call:
for idx, node in enumerate(fct.maker.fgraph.toposort()):
if any(hasattr(i, 'dtype') and i.dtype == 'float64' for i in node.outputs) and not any(hasattr(i, 'dtype') and i.dtype == 'float64' for i in node.inputs):
print(str(node), idx, fct.name, str([getattr(i, 'dtype', None) for i in node.inputs]), str([getattr(i, 'dtype', None) for i in node.outputs]))
......@@ -1370,7 +1370,7 @@ if False: # old code still to be ported from ProfileMode
print("Theano function input that are float64")
print("<fct name> <input name> <input type> <str input>")
for fct in fct_call.keys():
for fct in fct_call:
for i in fct.input_storage:
if hasattr(i.type, 'dtype') and i.type.dtype == 'float64':
print(fct.name, i.name, i.type, i)
......@@ -1526,13 +1526,13 @@ if False: # old code still to be ported from ProfileMode
def diff_dict(a_time, b_time_):
r = {}
b_time = copy.copy(b_time_)
for a, ta in a_time.items():
for a, ta in iteritems(a_time):
r.setdefault(a, 0)
tb = b_time.pop(a, 0)
r[a] += ta - tb
# they are missing in a
for a, t in b_time.items():
for a, t in iteritems(b_time):
r.setdefault(a, 0)
r[a] += t
return r
......
......@@ -1648,7 +1648,7 @@ class OpWiseCLinker(link.LocalLinker):
node == last_user[input])])
if no_recycling is True:
no_recycling = storage_map.values()
no_recycling = list(storage_map.values())
no_recycling = utils.difference(no_recycling, input_storage)
else:
no_recycling = [storage_map[r]
......
......@@ -1291,7 +1291,7 @@ class ModuleCache(object):
min_age = self.age_thresh_del_unversioned
with compilelock.lock_ctx():
all_key_datas = self.module_hash_to_key_data.values()
all_key_datas = list(self.module_hash_to_key_data.values())
for key_data in all_key_datas:
if not key_data.keys:
# May happen for broken versioned keys.
......
......@@ -3,6 +3,9 @@ Classes and functions for validating graphs that contain view
and inplace operations.
"""
from collections import deque
from six import iteritems
import theano
from . import toolbox
from . import graph
......@@ -348,7 +351,7 @@ if 0:
root_destroyer = {} # root -> destroyer apply
for app in self.destroyers:
for output_idx, input_idx_list in app.op.destroy_map.items():
for output_idx, input_idx_list in iteritems(app.op.destroy_map):
if len(input_idx_list) != 1:
raise NotImplementedError()
input_idx = input_idx_list[0]
......@@ -396,7 +399,7 @@ if 0:
self.destroyers.add(app)
# add this symbol to the forward and backward maps
for o_idx, i_idx_list in getattr(app.op, 'view_map', {}).items():
for o_idx, i_idx_list in iteritems(getattr(app.op, 'view_map', {})):
if len(i_idx_list) > 1:
raise NotImplementedError(
'destroying this output invalidates multiple inputs',
......@@ -433,7 +436,7 @@ if 0:
# deleted on_detach().
# UPDATE self.view_i, self.view_o
for o_idx, i_idx_list in getattr(app.op, 'view_map', {}).items():
for o_idx, i_idx_list in iteritems(getattr(app.op, 'view_map', {})):
if len(i_idx_list) > 1:
# destroying this output invalidates multiple inputs
raise NotImplementedError()
......@@ -466,8 +469,8 @@ if 0:
self.clients[new_r][app] += 1
# UPDATE self.view_i, self.view_o
for o_idx, i_idx_list in getattr(app.op, 'view_map',
{}).items():
for o_idx, i_idx_list in iteritems(getattr(app.op, 'view_map',
{})):
if len(i_idx_list) > 1:
# destroying this output invalidates multiple inputs
raise NotImplementedError()
......@@ -538,7 +541,7 @@ if 0:
# add destroyed variable clients as computational dependencies
for app in self.destroyers:
# for each destroyed input...
for output_idx, input_idx_list in app.op.destroy_map.items():
for output_idx, input_idx_list in iteritems(app.op.destroy_map):
destroyed_idx = input_idx_list[0]
destroyed_variable = app.inputs[destroyed_idx]
root = droot[destroyed_variable]
......@@ -606,8 +609,11 @@ if 0:
# depend on destroyed_input
root_clients = OrderedSet()
for r in root_impact:
assert not [a for a, c in self.clients[r].items() if not c]
root_clients.update([a for a, c in self.clients[r].items() if c])
assert not [a for a, c in
iteritems(self.clients[r]) if not c]
root_clients.update([a for a, c in
iteritems(self.clients[r])
if c])
root_clients.remove(app)
if root_clients:
rval[app] = root_clients
......@@ -751,7 +757,7 @@ class DestroyHandler(toolbox.Bookkeeper):
root_destroyer = OrderedDict() # root -> destroyer apply
for app in self.destroyers:
for output_idx, input_idx_list in app.op.destroy_map.items():
for output_idx, input_idx_list in iteritems(app.op.destroy_map):
if len(input_idx_list) != 1:
raise NotImplementedError()
input_idx = input_idx_list[0]
......@@ -799,7 +805,7 @@ class DestroyHandler(toolbox.Bookkeeper):
self.destroyers.add(app)
# add this symbol to the forward and backward maps
for o_idx, i_idx_list in getattr(app.op, 'view_map', {}).items():
for o_idx, i_idx_list in iteritems(getattr(app.op, 'view_map', {})):
if len(i_idx_list) > 1:
raise NotImplementedError(
'destroying this output invalidates multiple inputs',
......@@ -837,8 +843,8 @@ class DestroyHandler(toolbox.Bookkeeper):
# deleted on_detach().
# UPDATE self.view_i, self.view_o
for o_idx, i_idx_list in getattr(app.op, 'view_map',
OrderedDict()).items():
for o_idx, i_idx_list in iteritems(getattr(app.op, 'view_map',
OrderedDict())):
if len(i_idx_list) > 1:
# destroying this output invalidates multiple inputs
raise NotImplementedError()
......@@ -872,8 +878,8 @@ class DestroyHandler(toolbox.Bookkeeper):
self.clients[new_r][app] += 1
# UPDATE self.view_i, self.view_o
for o_idx, i_idx_list in getattr(app.op, 'view_map',
OrderedDict()).items():
for o_idx, i_idx_list in iteritems(getattr(app.op, 'view_map',
OrderedDict())):
if len(i_idx_list) > 1:
# destroying this output invalidates multiple inputs
raise NotImplementedError()
......@@ -949,7 +955,7 @@ class DestroyHandler(toolbox.Bookkeeper):
# add destroyed variable clients as computational dependencies
for app in self.destroyers:
# for each destroyed input...
for output_idx, input_idx_list in app.op.destroy_map.items():
for output_idx, input_idx_list in iteritems(app.op.destroy_map):
destroyed_idx = input_idx_list[0]
destroyed_variable = app.inputs[destroyed_idx]
root = droot[destroyed_variable]
......
......@@ -16,6 +16,7 @@ from theano import config
import warnings
from theano.compat import OrderedDict
from six import iteritems, itervalues
from six.moves import StringIO
from theano.misc.ordered_set import OrderedSet
......@@ -163,14 +164,14 @@ class FunctionGraph(utils.object2):
if hasattr(node, 'fgraph') and node.fgraph is not self:
raise Exception("%s is already owned by another fgraph" % node)
if (hasattr(node.op, 'view_map') and
not all([isinstance(view, (list, tuple))
for view in node.op.view_map.values()])):
not all(isinstance(view, (list, tuple))
for view in itervalues(node.op.view_map))):
raise Exception("Op '%s' have a bad view map '%s',"
" the values must be tuples or lists." % (
str(node.op), str(node.op.view_map)))
if (hasattr(node.op, 'destroy_map') and
not all([isinstance(destroy, (list, tuple))
for destroy in node.op.destroy_map.values()])):
not all(isinstance(destroy, (list, tuple))
for destroy in itervalues(node.op.destroy_map))):
raise Exception("Op '%s' have a bad destroy map '%s',"
" the values must be tuples or lists." % (
str(node.op), str(node.op.destroy_map)))
......@@ -667,7 +668,7 @@ class FunctionGraph(utils.object2):
str(feature.orderings) +
". Nondeterministic object is " +
str(orderings))
for node, prereqs in orderings.items():
for node, prereqs in iteritems(orderings):
if not isinstance(prereqs, (list, OrderedSet)):
raise TypeError(
"prereqs must be a type with a "
......@@ -675,7 +676,7 @@ class FunctionGraph(utils.object2):
" will be non-deterministic.")
ords.setdefault(node, []).extend(prereqs)
# eliminate duplicate prereqs
for (node, prereqs) in ords.items():
for (node, prereqs) in iteritems(ords):
ords[node] = list(OrderedSet(prereqs))
return ords
......
......@@ -1065,7 +1065,8 @@ def view_roots(r):
if owner is not None:
try:
view_map = owner.op.view_map
view_map = dict([(owner.outputs[o], i) for o, i in view_map.items()])
view_map = dict((owner.outputs[o], i)
for o, i in iteritems(view_map))
except AttributeError:
return [r]
if r in view_map:
......
......@@ -192,7 +192,7 @@ def raise_with_op(node, thunk=None, exc_info=None, storage_map=None):
storage_map_list = []
total_size = 0
total_size_inputs = 0
for k in storage_map.keys():
for k in storage_map:
storage_map_item = []
# storage_map_item[0]: the variable
......@@ -728,7 +728,7 @@ class PerformLinker(LocalLinker):
if no_recycling is True:
# True seems like some special code for *everything*?? -JB
# FunctionMaker always passes a list I think -JB
no_recycling = storage_map.values()
no_recycling = list(storage_map.values())
no_recycling = utils.difference(no_recycling, input_storage)
else:
no_recycling = [storage_map[r] for r in no_recycling if r not in fgraph.inputs]
......
......@@ -18,7 +18,7 @@ import numpy
import theano
from theano import config
from theano.compat import izip
from six import string_types, iteritems
from six import string_types, iteritems, itervalues
from six.moves import reduce
from theano.gof import graph, op, utils, unify, toolbox
from theano.gof.fg import InconsistencyError
......@@ -1501,8 +1501,8 @@ class NavigatorOptimizer(Optimizer):
return False
old_vars = node.outputs
if isinstance(replacements, dict):
old_vars = replacements.keys()
replacements = replacements.values()
old_vars = list(replacements.keys())
replacements = list(replacements.values())
elif not isinstance(replacements, (tuple, list)):
raise TypeError('Optimizer %s gave wrong type of replacement. '
'Expected list or tuple. Got %s' % (
......@@ -1739,7 +1739,7 @@ class EquilibriumOptimizer(NavigatorOptimizer):
yield opt
# if repeat is not a problem we can drop the set
s = set()
for lopt in self.local_optimizers_map.values():
for lopt in itervalues(self.local_optimizers_map):
for opt in lopt:
if opt not in s:
yield opt
......
......@@ -309,7 +309,7 @@ class SequenceDB(DB):
def print_summary(self, stream=sys.stdout):
print(self.__class__.__name__ + " (id %i)" % id(self), file=stream)
positions = self.__position__.items()
positions = list(self.__position__.items())
def c(a, b):
return cmp(a[1], b[1])
......
......@@ -102,7 +102,7 @@ if 0:
tasks[node].extend(lopt for track, i, lopt in self.fetch_tracks0(node.op))
u = self.attach_updater(fgraph, importer, pruner, chin)
print('KEYS', list(map(hash, tasks.keys())))
print('KEYS', list(map(hash, list(tasks.keys()))))
while tasks:
for node in tasks:
todo = tasks.pop(node)
......
from collections import defaultdict
from six import iteritems
from theano.gof.graph import list_of_nodes
from theano.compat import cmp
......@@ -105,7 +106,8 @@ def _toposort(edges):
[2] http://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())
incoming_edges = dict((k, set(val))
for k, val in iteritems(incoming_edges))
S = set((v for v in edges if v not in incoming_edges))
L = []
......
......@@ -6,6 +6,7 @@ import unittest
from nose.plugins.skip import SkipTest
import numpy
from six import itervalues
from theano import function
from theano.gof import vm
......@@ -361,9 +362,9 @@ def test_reallocation():
def check_storage(storage_map):
from theano.tensor.var import TensorConstant
for i in storage_map.keys():
for i in storage_map:
if not isinstance(i, TensorConstant):
keys_copy = storage_map.keys()[:]
keys_copy = list(storage_map.keys())[:]
keys_copy.remove(i)
for o in keys_copy:
if (storage_map[i][0] and
......@@ -372,5 +373,5 @@ def test_reallocation():
return [False, None]
assert check_storage(storage_map)[0]
assert len(set([id(v) for v in
storage_map.values()])) < len(storage_map)
assert len(set(id(v) for v in
itervalues(storage_map))) < len(storage_map)
......@@ -413,7 +413,7 @@ class NoOutputFromInplace(Feature):
node = out.owner
op = node.op
out_idx = node.outputs.index(out)
if hasattr(op, 'destroy_map') and out_idx in op.destroy_map.keys():
if hasattr(op, 'destroy_map') and out_idx in op.destroy_map:
raise theano.gof.InconsistencyError(
"A function graph Feature has requested (probably for ",
"efficiency reasons for scan) that outputs of the graph",
......
......@@ -36,7 +36,9 @@ class Variable:
def __init__(self, name="?"):
self.name = name
def __str__(self):
return self.__class__.__name__ + "(" + ", ".join(["%s=%s" % (key, value) for key, value in self.__dict__.items()]) + ")"
return (self.__class__.__name__ + "(" +
", ".join("%s=%s" % (key, value)
for key, value in iteritems(self.__dict__)) + ")")
def __repr__(self):
return str(self)
......@@ -136,7 +138,7 @@ class Unification:
else:
# Copy all the unification data.
U = Unification(self.inplace)
for var, (best, pool) in self.unif.items():
for var, (best, pool) in iteritems(self.unif):
# The pool of a variable is the set of all the variables that
# are unified to it (all the variables that must have the same
# value). The best is the Variable that represents a set of
......@@ -332,7 +334,7 @@ def unify_walk(d1, d2, U):
"""
Tries to unify values of corresponding keys.
"""
for (k1, v1) in d1.items():
for (k1, v1) in iteritems(d1):
if k1 in d2:
U = unify_walk(v1, d2[k1], U)
if U is False:
......@@ -410,12 +412,12 @@ def unify_merge(l1, l2, U):
@comm_guard(dict, dict)
def unify_merge(d1, d2, U):
d = d1.__class__()
for k1, v1 in d1.items():
for k1, v1 in iteritems(d1):
if k1 in d2:
d[k1] = unify_merge(v1, d2[k1], U)
else:
d[k1] = unify_merge(v1, v1, U)
for k2, v2 in d2.items():
for k2, v2 in iteritems(d2):
if k2 not in d1:
d[k2] = unify_merge(v2, v2, U)
return d
......
......@@ -3,6 +3,8 @@ import linecache
import traceback
import sys
from six import iteritems
from theano import config
......@@ -142,7 +144,7 @@ class scratchpad:
def info(self):
print("<theano.gof.utils.scratchpad instance at %i>" % id(self))
for k, v in self.__dict__.items():
for k, v in iteritems(self.__dict__):
print(" %s: %s" % (k, v))
......@@ -249,14 +251,14 @@ def toposort(prereqs_d):
# all1 = set(prereqs_d.keys())
# all2 = set()
# for x, y in prereqs_d.items():
# for x, y in iteritems(prereqs_d):
# all2.update(y)
# print all1.difference(all2)
seq = []
done = set()
postreqs_d = {}
for x, prereqs in prereqs_d.items():
for x, prereqs in iteritems(prereqs_d):
for prereq in prereqs:
postreqs_d.setdefault(prereq, set()).add(x)
next = set([k for k in prereqs_d if not prereqs_d[k]])
......
......@@ -16,7 +16,7 @@ from theano.configparser import (config, AddConfigVar,
import theano.gof.cmodule
from six import iteritems
from six import iteritems, itervalues
from six.moves import xrange
logger = logging.getLogger(__name__)
......@@ -854,9 +854,9 @@ class VM_Linker(link.LocalLinker):
nodes_idx_inv = {}
vars_idx_inv = {}
for (node, i) in nodes_idx.items():
for (node, i) in iteritems(nodes_idx):
nodes_idx_inv[i] = node
for (var, i) in vars_idx.items():
for (var, i) in iteritems(vars_idx):
vars_idx_inv[i] = var
# put storage_map and compute_map into a int-based scheme
......@@ -892,7 +892,7 @@ class VM_Linker(link.LocalLinker):
# build the var owner array
var_owner = [None] * len(vars_idx)
for (var, i) in vars_idx.items():
for (var, i) in iteritems(vars_idx):
if var.owner:
var_owner[i] = nodes_idx[var.owner]
......@@ -920,7 +920,7 @@ class VM_Linker(link.LocalLinker):
# values of the update expressions).
update_storage = []
update_in_from_out = {}
for (ivar, ovar) in updated_vars.items():
for (ivar, ovar) in iteritems(updated_vars):
update_in_from_out[vars_idx[ovar]] = vars_idx[ivar]
for oidx in output_vars:
if oidx in update_in_from_out:
......@@ -1036,7 +1036,7 @@ class VM_Linker(link.LocalLinker):
lazy = not all([(not th.lazy) for th in thunks])
if not (lazy or (config.profile and config.profile_memory) or
self.use_cloop or self.callback):
for pair in reallocated_info.values():
for pair in itervalues(reallocated_info):
storage_map[pair[1]] = storage_map[pair[0]]
computed, last_user = link.gc_helper(order)
......@@ -1048,7 +1048,7 @@ class VM_Linker(link.LocalLinker):
if (input in computed and
input not in fgraph.outputs and
node == last_user[input] and
input not in reallocated_info.keys()):
input not in reallocated_info):
clear_after_this_thunk.append(storage_map[input])
post_thunk_clear.append(clear_after_this_thunk)
else:
......
......@@ -6,6 +6,7 @@ import time
import warnings
import numpy # for numeric_grad
from six import itervalues
import theano
......@@ -450,7 +451,7 @@ def grad(cost, wrt, consider_constant=None,
if cost is not None:
outputs.append(cost)
if known_grads is not None:
outputs.extend(known_grads.keys())
outputs.extend(list(known_grads.keys()))
var_to_app_to_idx = _populate_var_to_app_to_idx(
outputs, wrt, consider_constant)
......@@ -1011,7 +1012,7 @@ def _populate_grad_dict(var_to_app_to_idx,
# copies of each destroyed input.
try:
dinputs = [node.inputs[x[0]] for x in
node.op.destroy_map.values()]
itervalues(node.op.destroy_map)]
except AttributeError:
dinputs = []
......
......@@ -22,6 +22,7 @@ from theano.tensor import TensorType
from theano import gof
from theano.gof import PureOp, Apply
from six import iteritems
from six.moves import xrange
from theano.compile import optdb
from theano.tensor import opt
......@@ -515,11 +516,11 @@ def cond_merge_ifs_true(node):
ins_t = tval.owner.inputs[1:][:ins_op.n_outs]
replace[idx + 1] = ins_t[tval.owner.outputs.index(tval)]
if len(replace.items()) == 0:
if len(replace) == 0:
return False
old_ins = list(node.inputs)
for pos, var in replace.items():
for pos, var in iteritems(replace):
old_ins[pos] = var
return op(*old_ins, **dict(return_list=True))
......@@ -540,11 +541,11 @@ def cond_merge_ifs_false(node):
replace[idx + 1 + op.n_outs] = \
ins_t[fval.owner.outputs.index(fval)]
if len(replace.items()) == 0:
if len(replace) == 0:
return False
old_ins = list(node.inputs)
for pos, var in replace.items():
for pos, var in iteritems(replace):
old_ins[pos] = var
return op(*old_ins, **dict(return_list=True))
......@@ -618,7 +619,7 @@ def cond_remove_identical(node):
jdx not in out_map):
out_map[jdx] = idx
if len(out_map.keys()) == 0:
if len(out_map) == 0:
return False
nw_ts = []
......@@ -642,7 +643,7 @@ def cond_remove_identical(node):
rval = []
for idx in xrange(len(node.outputs)):
if idx in out_map.keys():
if idx in out_map:
rval += [new_outs[inv_map[out_map[idx]]]]
else:
rval += [new_outs[inv_map[idx]]]
......
......@@ -3,7 +3,7 @@ import six.moves.cPickle as pickle
import os, sys
import theano
from six import iteritems
from six import iteritems, itervalues
DISPLAY_DUPLICATE_KEYS = False
DISPLAY_MOST_FREQUENT_DUPLICATE_CCODE = False
......@@ -52,7 +52,7 @@ if DISPLAY_DUPLICATE_KEYS:
print("Duplicate key (%i copies): %s" % (v, pickle.loads(k)))
nbs_keys = {} # nb seen -> now many key
for val in keys.values():
for val in itervalues(keys):
nbs_keys.setdefault(val, 0)
nbs_keys[val] += 1
......@@ -75,16 +75,16 @@ if DISPLAY_MOST_FREQUENT_DUPLICATE_CCODE:
print(kk)
print("key.pkl histograph")
l = nbs_keys.items()
l = list(nbs_keys.items())
l.sort()
print(l)
print("mod.{cpp,cu} histogram")
l = nbs_mod.items()
l = list(nbs_mod.items())
l.sort()
print(l)
total = sum([len(k) for k in mods.values()])
total = sum(len(k) for k in list(mods.values()))
uniq = len(mods)
useless = total - uniq
print("mod.{cpp,cu} total:", total)
......
......@@ -194,7 +194,7 @@ class PycudaElemwiseSourceModuleOp(GpuOp):
def __str__(self):
if self.name is None:
if self.inplace_pattern:
items = self.inplace_pattern.items()
items = list(self.inplace_pattern.items())
items.sort()
return self.__class__.__name__ + "{%s}%s" % (self.scalar_op,
str(items))
......@@ -288,7 +288,7 @@ class PycudaElemwiseSourceModuleMakeThunkOp(Op):
def __str__(self):
if self.name is None:
if self.inplace_pattern:
items = self.inplace_pattern.items()
items = list(self.inplace_pattern.items())
items.sort()
return self.__class__.__name__ + "{%s}%s" % (self.scalar_op,
str(items))
......
......@@ -46,7 +46,7 @@ def test_dump_zip_names():
foo_3 = theano.shared(2, name='foo')
with open('model.zip', 'wb') as f:
dump((foo_1, foo_2, foo_3, numpy.array(3)), f)
keys = numpy.load('model.zip').keys()
keys = list(numpy.load('model.zip').keys())
assert keys == ['foo', 'foo_2', 'foo_3', 'array_0', 'pkl']
foo_3 = numpy.load('model.zip')['foo_3']
assert foo_3 == numpy.array(2)
......
......@@ -844,7 +844,7 @@ def pydotprint(fct, outfile=None,
astr = apply_name(node)
use_color = None
for opName, color in colorCodes.items():
for opName, color in iteritems(colorCodes):
if opName in node.op.__class__.__name__:
use_color = color
......@@ -1037,7 +1037,7 @@ def pydotprint_variables(vars,
my_list[app] = astr
use_color = None
for opName, color in colorCodes.items():
for opName, color in iteritems(colorCodes):
if opName in app.op.__class__.__name__:
use_color = color
......
......@@ -4,12 +4,12 @@ import logging
import sys
import numpy
from six import iteritems
from six.moves import StringIO, xrange
import theano
from theano import gof, Type, Apply
from theano import tensor, scalar, config
from six.moves import StringIO, xrange
from theano.gradient import grad_undefined
from theano.scalar import Scalar
......@@ -228,7 +228,7 @@ class GpuElemwise(GpuOp):
self.sync == other.sync)
def _rehash(self):
items = self.inplace_pattern.items()
items = list(self.inplace_pattern.items())
items.sort()
tuple_items = [k for k, v in items]
for k, v in items:
......@@ -248,7 +248,7 @@ class GpuElemwise(GpuOp):
def __str__(self):
if self.inplace_pattern:
items = self.inplace_pattern.items()
items = list(self.inplace_pattern.items())
items.sort()
# We need to print the scalar_op, not only the its class name
# to have the full definition of composite op.
......@@ -3827,7 +3827,7 @@ def profile_printer(fct_name, compile_time, fct_call_time, fct_call,
cpu = 0
gpu = 0
trans = 0
for (_, node), t in apply_time.items():
for (_, node), t in iteritems(apply_time):
if isinstance(node.op.__class__.__name__,
(HostFromGpu, GpuFromHost)):
trans += t
......@@ -3843,7 +3843,7 @@ def profile_printer(fct_name, compile_time, fct_call_time, fct_call,
print()
print(" Theano function input that are float64")
print(" <fct name> <input name> <input type> <str input>")
for fct in fct_call.keys():
for fct in fct_call:
for i in fct.input_storage:
if hasattr(i.type, 'dtype') and i.type.dtype == 'float64':
print(' ', fct.name, i.name, i.type, i)
......@@ -3852,7 +3852,7 @@ def profile_printer(fct_name, compile_time, fct_call_time, fct_call,
print(" List of apply that don't have float64 as input but have float64 in outputs")
print(" (Useful to know if we forgot some cast when using floatX=float32 or gpu code)")
print(' <Apply> <Apply position> <fct name> <inputs type> <outputs type>')
for fct in fct_call.keys():
for fct in fct_call:
for idx, node in enumerate(fct.maker.fgraph.toposort()):
if (any(hasattr(i, 'dtype') and i.dtype == 'float64'
for i in node.outputs) and
......
......@@ -2,6 +2,8 @@ import unittest
from theano.compat import izip
from copy import copy, deepcopy
from six import iteritems
import numpy
import theano
import theano.tensor as T
......@@ -104,7 +106,7 @@ def rand_gpuarray(*shape, **kwargs):
dtype = kwargs.pop('dtype', theano.config.floatX)
cls = kwargs.pop('cls', None)
if len(kwargs) != 0:
raise TypeError('Unexpected argument %s', kwargs.keys()[0])
raise TypeError('Unexpected argument %s', list(kwargs.keys())[0])
return gpuarray.array(r, dtype=dtype, cls=cls)
......@@ -133,7 +135,7 @@ def makeTester(name, op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu,
if skip:
raise SkipTest(skip)
for testname, inputs in cases.items():
for testname, inputs in iteritems(cases):
self.run_case(testname, inputs)
def run_case(self, testname, inputs):
......@@ -200,7 +202,7 @@ def makeTester(name, op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu,
self.op, testname, i, inputs, expected,
expected.dtype, variable, variable.dtype))
for description, check in self.checks.items():
for description, check in iteritems(self.checks):
if not check(inputs, variables):
self.fail(("Test %s::%s: Failed check: %s "
"(inputs were %s, ouputs were %s)") %
......
......@@ -3,6 +3,8 @@ import logging
logger = logging.getLogger(__name__)
import numpy
from six import iteritems
from theano.gof import Op, Apply
from theano.tensor import as_tensor_variable, dot, DimShuffle, Dot
......@@ -186,7 +188,7 @@ class HintsFeature(object):
def update_second_from_first(self, r0, r1):
old_hints = self.hints[r0]
new_hints = self.hints[r1]
for k, v in old_hints.items():
for k, v in iteritems(old_hints):
if k in new_hints and new_hints[k] is not v:
raise NotImplementedError()
if k not in new_hints:
......
......@@ -457,7 +457,7 @@ def scan(fn,
# extract still missing inputs (there still might be so) and add them
# as non sequences at the end of our args
fake_nonseqs = [x.type() for x in non_seqs]
fake_outputs = scan_utils.clone(outputs + updates.values(),
fake_outputs = scan_utils.clone(outputs + list(updates.values()),
replace=dict(izip(non_seqs,
fake_nonseqs)))
all_inputs = ifilter(
......
......@@ -17,6 +17,8 @@ import logging
import numpy
from six import iteritems
import theano
from theano import compile
from theano.compat import izip
......@@ -357,7 +359,7 @@ def profile_printer(fct_name, compile_time, fct_call_time, fct_call,
total_super_scan_time = 0
total_scan_fct_time = 0
total_scan_op_time = 0
for (_, node), v in apply_time.items():
for (_, node), v in iteritems(apply_time):
if isinstance(node.op, Scan):
if v > 0:
scan_fct_time = node.op.mode_instance.fn_time
......
......@@ -4,7 +4,7 @@ import theano
import theano.tensor as T
import collections
from six import string_types, add_metaclass
from six import string_types, add_metaclass, iteritems
#import klass
......@@ -26,7 +26,7 @@ class InitGraph(type):
return True
return isinstance(v, theano.Variable) and not k.startswith('_')
r = {}
for key, val in dct.items():
for key, val in iteritems(dct):
if list(filter(key, val)):
r[key] = val
return r
......@@ -34,7 +34,7 @@ class InitGraph(type):
if not isinstance(build_graph_rval, dict):
raise TypeError('%s.build_graph did not return dictionary' % cls)
dct = just_symbolic(build_graph_rval)
for key, val in dct.items():
for key, val in iteritems(dct):
# print ' adding class attribute', key
if isinstance(val, theano.Variable) and val.name is None:
val.name = key
......@@ -311,7 +311,7 @@ if 0:
return deco(dummy)
locals_dict = f()
for key, val in locals_dict.items():
for key, val in iteritems(locals_dict):
if isinstance(val, theano.Variable):
try:
kres = klass.KlassMember(val)
......
......@@ -60,6 +60,7 @@ import logging
import time
import numpy
from six import iteritems
import theano
from theano.compat import exc_message
......@@ -2648,7 +2649,7 @@ def profile_printer(fct_name, compile_time, fct_call_time, fct_call,
total_super_scan_time = 0
total_scan_fct_time = 0
total_scan_op_time = 0
for (_, node), v in apply_time.items():
for (_, node), v in iteritems(apply_time):
if isinstance(node.op, Scan):
if v > 0:
scan_fct_time = node.op.mode_instance.fn_time
......
......@@ -67,7 +67,7 @@ from theano import tensor
from theano.tensor import opt, get_scalar_constant_value
from theano import gof
from theano.compat import OrderedDict
from six import integer_types
from six import integer_types, iteritems
from theano.gof.opt import Optimizer
from theano.gof import toolbox, DestroyHandler, InconsistencyError
from theano.compile import optdb
......@@ -590,7 +590,7 @@ class PushOutSeqScan(gof.Optimizer):
# We need to add one extra dimension to the outputs
if replace_with and len(replace_with) == len(node.outputs):
fgraph.replace_all_validate_remove(
replace_with.items(),
list(replace_with.items()),
remove=[node],
reason='scanOp_pushout_seqs_ops')
return True
......@@ -1410,7 +1410,7 @@ class ScanSaveMem(gof.Optimizer):
(inps, outs, info, node_ins, compress_map) = \
scan_utils.compress_outs(op, not_required, nw_inputs)
inv_compress_map = OrderedDict()
for k, v in compress_map.items():
for k, v in iteritems(compress_map):
inv_compress_map[v] = k
node_ins = [pre_greedy_local_optimizer(list_opt_slice, x) for x in
......
......@@ -216,7 +216,7 @@ def clone(output,
share_inputs = copy_inputs
if isinstance(replace, dict):
items = replace.items()
items = list(replace.items())
elif isinstance(replace, (list, tuple)):
items = replace
elif replace is None:
......@@ -610,8 +610,8 @@ class Validator(object):
# Mapping from invalid variables to equivalent valid ones.
self.valid_equivalent = valid_equivalent.copy()
self.valid.update(valid_equivalent.values())
self.invalid.update(valid_equivalent.keys())
self.valid.update(list(valid_equivalent.values()))
self.invalid.update(list(valid_equivalent.keys()))
def check(self, out):
'''
......
......@@ -67,7 +67,7 @@ class SparseType(gof.Type):
self.format = format
else:
raise NotImplementedError('unsupported format "%s" not in list' %
format, self.format_cls.keys())
format, list(self.format_cls.keys()))
def filter(self, value, strict=False, allow_downcast=None):
if isinstance(value, self.format_cls[self.format])\
......
......@@ -562,7 +562,7 @@ class Elemwise(OpenMPOp):
# inplace_pattern maps output idx -> input idx
inplace_pattern = self.inplace_pattern
if inplace_pattern:
for overwriter, overwritten in inplace_pattern.items():
for overwriter, overwritten in iteritems(inplace_pattern):
for ob, ib in izip(out_broadcastables[overwriter],
inputs[overwritten].type.broadcastable):
if ib and not ob:
......@@ -584,8 +584,8 @@ class Elemwise(OpenMPOp):
def __eq__(self, other):
if type(self) == type(other):
items = self.inplace_pattern.items()
other_items = other.inplace_pattern.items()
items = list(self.inplace_pattern.items())
other_items = list(other.inplace_pattern.items())
items.sort()
other_items.sort()
rval = ((self.scalar_op == other.scalar_op)
......@@ -605,7 +605,7 @@ class Elemwise(OpenMPOp):
def __str__(self):
if self.name is None:
if self.inplace_pattern:
items = self.inplace_pattern.items()
items = list(self.inplace_pattern.items())
items.sort()
return "Elemwise{%s}%s" % (self.scalar_op, str(items))
else:
......
......@@ -2882,14 +2882,14 @@ def local_useless_rebroadcast(node):
else:
# Keep the flags that modify something
new_axis = {}
for dim, bc in node.op.axis.items():
for dim, bc in list(node.op.axis.items()):
if x.broadcastable[dim] != bc:
new_axis[dim] = bc
if new_axis == node.op.axis:
# All flags are useful
return
else:
return [T.Rebroadcast(*new_axis.items())(x)]
return [T.Rebroadcast(*list(new_axis.items()))(x)]
@register_canonicalize
......@@ -2915,7 +2915,7 @@ def local_rebroadcast_lift(node):
# by the `unbroadcast` function before we are in the actual function
# compilation phase.
if hasattr(input, 'clients') and len(input.clients) == 1:
rval = inode.op.make_node(T.Rebroadcast(*op.axis.items())(
rval = inode.op.make_node(T.Rebroadcast(*list(op.axis.items()))(
inode.inputs[0])).outputs
return rval
if inode and isinstance(inode.op, T.Rebroadcast):
......@@ -2924,7 +2924,7 @@ def local_rebroadcast_lift(node):
axis = inode.op.axis.copy()
axis.update(op.axis)
iinput = inode.inputs[0]
rval = [T.Rebroadcast(*axis.items())(iinput)]
rval = [T.Rebroadcast(*list(axis.items()))(iinput)]
return rval
......
......@@ -10,6 +10,7 @@ import warnings
from copy import copy, deepcopy
# Import builtin min to be able to use it after importing the tensor version.
from theano.compat import izip
from six import iteritems
from six.moves.builtins import min as builtin_min
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
......@@ -335,7 +336,7 @@ def makeTester(name, op, expected, checks=None, good=None, bad_build=None,
good = self.add_memmap_values(self.good)
for testname, inputs in good.items():
for testname, inputs in iteritems(good):
inputs = [copy(input) for input in inputs]
inputrs = [TensorType(
dtype=input.dtype,
......@@ -409,7 +410,7 @@ def makeTester(name, op, expected, checks=None, good=None, bad_build=None,
atol=eps, rtol=eps),
numpy.allclose(variable, expected)))
for description, check in self.checks.items():
for description, check in iteritems(self.checks):
if not check(inputs, variables):
self.fail(("Test %s::%s: Failed check: %s (inputs"
" were %s, outputs were %s)") % (
......@@ -419,7 +420,7 @@ def makeTester(name, op, expected, checks=None, good=None, bad_build=None,
def test_bad_build(self):
if skip:
raise SkipTest(skip)
for testname, inputs in self.bad_build.items():
for testname, inputs in iteritems(self.bad_build):
inputs = [copy(input) for input in inputs]
inputrs = [shared(input) for input in inputs]
self.assertRaises(Exception,
......@@ -431,7 +432,7 @@ def makeTester(name, op, expected, checks=None, good=None, bad_build=None,
def test_bad_runtime(self):
if skip:
raise SkipTest(skip)
for testname, inputs in self.bad_runtime.items():
for testname, inputs in iteritems(self.bad_runtime):
inputrs = [shared(input) for input in inputs]
try:
node = safe_make_node(self.op, *inputrs)
......@@ -463,7 +464,7 @@ def makeTester(name, op, expected, checks=None, good=None, bad_build=None,
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
for testname, inputs in self.grad.items():
for testname, inputs in iteritems(self.grad):
inputs = [copy(input) for input in inputs]
try:
utt.verify_grad(self.op, inputs,
......@@ -491,7 +492,7 @@ def makeTester(name, op, expected, checks=None, good=None, bad_build=None,
# This is not actually an Op
return
for testname, inputs in self.good.items():
for testname, inputs in iteritems(self.good):
inputs = [copy(input) for input in inputs]
inputrs = [TensorType(
dtype=input.dtype,
......@@ -829,7 +830,7 @@ def copymod(dct, without=None, **kwargs):
for a in without:
if a in rval:
del rval[a]
for kw, val in kwargs.items():
for kw, val in iteritems(kwargs):
rval[kw] = val
return rval
......
......@@ -42,7 +42,7 @@ def hash_from_dict(d):
hashable.
"""
items = d.items()
items = list(d.items())
items.sort()
first_part = [k for k, v in items]
second_part = []
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论