提交 6d7e0403 authored 作者: David Warde-Farley's avatar David Warde-Farley 提交者: Arnaud Bergeron

Convert map() to list(map()) where necessary.

上级 12d97411
......@@ -762,7 +762,7 @@ def _optcheck_fgraph(input_specs, output_specs, accept_inplace=False):
for feature in std_fgraph.features:
fgraph.attach_feature(feature())
return fgraph, map(SymbolicOutput, updates), equivalence_tracker
return fgraph, list(map(SymbolicOutput, updates)), equivalence_tracker
class DataDestroyed():
......
......@@ -165,7 +165,7 @@ def std_fgraph(input_specs, output_specs, accept_inplace=False):
# If named nodes are replaced, keep the name
for feature in std_fgraph.features:
fgraph.attach_feature(feature())
return fgraph, map(SymbolicOutput, updates)
return fgraph, list(map(SymbolicOutput, updates))
std_fgraph.features = [gof.toolbox.PreserveNames]
......@@ -1502,10 +1502,10 @@ def orig_function(inputs, outputs, mode=None, accept_inplace=False,
t1 = time.time()
mode = theano.compile.mode.get_mode(mode)
inputs = map(convert_function_input, inputs)
inputs = list(map(convert_function_input, inputs))
if outputs is not None:
if isinstance(outputs, (list, tuple)):
outputs = map(FunctionMaker.wrap_out, outputs)
outputs = list(map(FunctionMaker.wrap_out, outputs))
else:
outputs = FunctionMaker.wrap_out(outputs)
......
......@@ -376,7 +376,7 @@ def get_module_hash(src_code, key):
to_hash = [l.strip() for l in src_code.split('\n')]
# Get the version part of the key (ignore if unversioned).
if key[0]:
to_hash += map(str, key[0])
to_hash += list(map(str, key[0]))
c_link_key = key[1]
# Currently, in order to catch potential bugs early, we are very
# convervative about the structure of the key and raise an exception
......
......@@ -927,7 +927,7 @@ def is_same_graph(var1, var2, givens=None, debug=False):
in_xs = []
in_ys = []
# Compute the sets of all variables found in each computational graph.
inputs_var = map(inputs, ([var1], [var2]))
inputs_var = list(map(inputs, ([var1], [var2])))
all_vars = [set(variables(v_i, v_o))
for v_i, v_o in ((inputs_var[0], [var1]),
(inputs_var[1], [var2]))]
......
......@@ -756,7 +756,7 @@ def pre_constant_merge(vars):
var.owner.inputs[idx] = recursive_merge(inp)
return var
return map(recursive_merge, vars)
return list(map(recursive_merge, vars))
########################
......
......@@ -102,7 +102,7 @@ if 0:
tasks[node].extend(lopt for track, i, lopt in self.fetch_tracks0(node.op))
u = self.attach_updater(fgraph, importer, pruner, chin)
print('KEYS', map(hash, tasks.keys()))
print('KEYS', list(map(hash, tasks.keys())))
while tasks:
for node in tasks.iterkeys():
todo = tasks.pop(node)
......
......@@ -408,7 +408,7 @@ def give_variables_names(variables):
""" Gives unique names to an iterable of variables. Modifies input.
This function is idempotent."""
names = map(lambda var: var.name, variables)
names = [var.name for var in variables]
h = hist(names)
def bad_var(var):
......@@ -417,7 +417,7 @@ def give_variables_names(variables):
for i, var in enumerate(filter(bad_var, variables)):
var.name = (var.name or "") + "_%d" % i
if not unique(map(str, variables)):
if not unique(list(map(str, variables))):
raise ValueError("Not all variables have unique names. Maybe you've "
"named some of the variables identically")
return variables
......
......@@ -23,7 +23,7 @@ def runScript(N):
if err:
print(err)
sys.exit()
return map(float, out.split(" "))
return list(map(float, out.split(" ")))
if __name__ == '__main__':
options, arguments = parser.parse_args(sys.argv)
......
......@@ -889,8 +889,8 @@ class GpuJoin(HideC, Join):
def make_node(self, axis, *tensors):
node = Join.make_node(self, axis, *tensors)
return Apply(self, [node.inputs[0]] + map(as_gpuarray_variable,
tensors),
return Apply(self, [node.inputs[0]] + list(map(as_gpuarray_variable,
tensors)),
[GpuArrayType(broadcastable=node.outputs[0].broadcastable,
dtype=node.outputs[0].dtype)()])
......
......@@ -1904,7 +1904,7 @@ class Clip(ScalarOp):
return v.zeros_like().astype(config.floatX)
return v
return map(handle_int, [gx, gmn, gmx])
return list(map(handle_int, [gx, gmn, gmx]))
# Don't allow complex even if numpy do
# As there is no mathematical reason for this function on complex
......
......@@ -47,13 +47,13 @@ python_any = any
python_all = all
# Define common subsets of dtypes (as strings).
complex_dtypes = map(str, scal.complex_types)
continuous_dtypes = map(str, scal.continuous_types)
float_dtypes = map(str, scal.float_types)
discrete_dtypes = map(str, scal.discrete_types)
all_dtypes = map(str, scal.all_types)
int_dtypes = map(str, scal.int_types)
uint_dtypes = map(str, scal.uint_types)
complex_dtypes = list(map(str, scal.complex_types))
continuous_dtypes = list(map(str, scal.continuous_types))
float_dtypes = list(map(str, scal.float_types))
discrete_dtypes = list(map(str, scal.discrete_types))
all_dtypes = list(map(str, scal.all_types))
int_dtypes = list(map(str, scal.int_types))
uint_dtypes = list(map(str, scal.uint_types))
class ShapeError(Exception):
......@@ -3877,7 +3877,7 @@ def stack(*tensors):
t.ndim == 0)
for t in tensors]):
# in case there is direct int
tensors = map(as_tensor_variable, tensors)
tensors = list(map(as_tensor_variable, tensors))
dtype = scal.upcast(*[i.dtype for i in tensors])
return theano.tensor.opt.MakeVector(dtype)(*tensors)
return join(0, *[shape_padleft(t, 1) for t in tensors])
......@@ -4838,7 +4838,7 @@ class Dot(Op):
# graph. See Dot22 in tensor.blas for details.
def make_node(self, *inputs):
inputs = map(as_tensor_variable, inputs)
inputs = list(map(as_tensor_variable, inputs))
if len(inputs) != 2:
raise TypeError(
......@@ -5424,7 +5424,7 @@ def stacklists(arg):
(2, 2, 4, 4)
"""
if isinstance(arg, (tuple, list)):
return stack(*map(stacklists, arg))
return stack(*list(map(stacklists, arg)))
else:
return arg
......
......@@ -965,7 +965,7 @@ class Gemm(GemmRelated):
return dict(inplace=self.inplace)
def make_node(self, *inputs):
inputs = map(T.as_tensor_variable, inputs)
inputs = list(map(T.as_tensor_variable, inputs))
if len(inputs) != 5:
raise TypeError(
"Wrong number of inputs for %s (expected 5, got %s)" %
......
......@@ -20,8 +20,8 @@ config = theano.config
# We cannot import discrete_dtypes or float_dtypes from tensor.basic yet,
# so we redefine them here
discrete_dtypes = map(str, scalar.discrete_types)
float_dtypes = map(str, scalar.float_types)
discrete_dtypes = list(map(str, scalar.discrete_types))
float_dtypes = list(map(str, scalar.float_types))
# tensor depends on elemwise to provide definitions for several ops
......@@ -525,7 +525,7 @@ class Elemwise(OpenMPOp):
is left-completed to the greatest number of dimensions with 1s
using DimShuffle.
"""
inputs = map(as_tensor_variable, inputs)
inputs = list(map(as_tensor_variable, inputs))
shadow = self.scalar_op.make_node(
*[get_scalar_type(dtype=i.type.dtype).make_variable()
for i in inputs])
......@@ -733,8 +733,8 @@ class Elemwise(OpenMPOp):
return t
return get_scalar_type(t.type.dtype)()
scalar_inputs = map(as_scalar, inputs)
scalar_ograds = map(as_scalar, ograds)
scalar_inputs = list(map(as_scalar, inputs))
scalar_ograds = list(map(as_scalar, ograds))
scalar_igrads = self.scalar_op.grad(scalar_inputs, scalar_ograds)
for igrad in scalar_igrads:
assert igrad is not None, self.scalar_op
......
......@@ -614,7 +614,7 @@ def parse_mul_tree(root):
return [not neg, sub_tree]
else:
# Recurse into inputs.
return [False, map(parse_mul_tree, mul_info)]
return [False, list(map(parse_mul_tree, mul_info))]
def replace_leaf(arg, leaves, new_leaves, op, neg):
......@@ -708,7 +708,7 @@ def compute_mul(tree):
'call `simplify_mul` on the tree first?')
elif isinstance(inputs, list):
# Recurse through inputs.
rval = tensor.mul(*map(compute_mul, inputs))
rval = tensor.mul(*list(map(compute_mul, inputs)))
else:
rval = inputs
if neg:
......
......@@ -599,7 +599,7 @@ class MakeVector(T.Op):
return hash(type(self)) ^ hash(self.dtype)
def make_node(self, *inputs):
inputs = map(T.as_tensor_variable, inputs)
inputs = list(map(T.as_tensor_variable, inputs))
if not all(a.type == inputs[0].type for a in inputs) or (
len(inputs) > 0 and inputs[0].dtype != self.dtype):
dtype = theano.scalar.upcast(self.dtype,
......@@ -1430,7 +1430,7 @@ def local_subtensor_make_vector(node):
except NotScalarConstantError:
pass
elif idx.ndim == 1 and isinstance(idx, T.Constant):
values = map(int, list(idx.value))
values = list(map(int, list(idx.value)))
return [make_vector(*[x.owner.inputs[v] for v in values])]
else:
raise TypeError('case not expected')
......@@ -4824,8 +4824,8 @@ def attempt_distribution(factor, num, denum, out_type):
pos, neg = local_add_canonizer.get_num_denum(factor)
if len(pos) == 1 and not neg:
return False, factor, num, denum
pos_pairs = map(local_mul_canonizer.get_num_denum, pos)
neg_pairs = map(local_mul_canonizer.get_num_denum, neg)
pos_pairs = list(map(local_mul_canonizer.get_num_denum, pos))
neg_pairs = list(map(local_mul_canonizer.get_num_denum, neg))
change = False
for n in list(num):
success, pos_pairs, neg_pairs = distribute_greedy(pos_pairs,
......
......@@ -203,7 +203,7 @@ class RandomFunction(gof.Op):
# convert args to TensorType instances
# and append enough None's to match the length of self.args
args = map(tensor.as_tensor_variable, args)
args = list(map(tensor.as_tensor_variable, args))
return gof.Apply(self,
[r, shape] + args,
......
......@@ -432,7 +432,7 @@ class Subtensor(Op):
else:
raise
return map(conv, real_idx)
return list(map(conv, real_idx))
def __init__(self, idx_list):
self.idx_list = tuple(map(self.convert, idx_list))
......@@ -1180,7 +1180,7 @@ class IncSubtensor(Op):
destroyhandler_tolerate_aliased=None):
if destroyhandler_tolerate_aliased is None:
destroyhandler_tolerate_aliased = []
self.idx_list = map(Subtensor.convert, idx_list)
self.idx_list = list(map(Subtensor.convert, idx_list))
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
......
......@@ -6118,10 +6118,10 @@ class test_arithmetic_cast(unittest.TestCase):
('i_scalar', 'i_scalar'),
):
theano_args = map(eval,
['theano_%s' % c for c in combo])
numpy_args = map(eval,
['numpy_%s' % c for c in combo])
theano_args = list(map(eval,
['theano_%s' % c for c in combo]))
numpy_args = list(map(eval,
['numpy_%s' % c for c in combo]))
try:
theano_dtype = op(
theano_args[0](a_type),
......@@ -6136,8 +6136,17 @@ class test_arithmetic_cast(unittest.TestCase):
config.int_division == 'raise')
# This is the expected behavior.
continue
numpy_dtype = op(numpy_args[0](a_type),
numpy_args[1](b_type)).dtype
# For numpy we have a problem:
# http://projects.scipy.org/numpy/ticket/1827
# As a result we only consider the highest data
# type that numpy may return.
numpy_dtypes = [
op(numpy_args[0](a_type),
numpy_args[1](b_type)).dtype,
op(numpy_args[1](b_type),
numpy_args[0](a_type)).dtype]
numpy_dtype = theano.scalar.upcast(
*list(map(str, numpy_dtypes)))
if numpy_dtype == theano_dtype:
# Same data type found, all is good!
continue
......
......@@ -32,7 +32,7 @@ class MakeSlice(Op):
else:
inp = [slc, stop, step]
return Apply(self,
map(as_int_none_variable, inp),
list(map(as_int_none_variable, inp)),
[slicetype()])
def perform(self, node, inp, out_):
......
......@@ -179,7 +179,7 @@ def run(stdout, stderr, argv, theano_nose, batch_size, time_profile,
stderr.flush()
test_range = range(test_id, min(test_id + batch_size, n_tests + 1))
cmd = ([python, theano_nose, '--with-id'] +
map(str, test_range) +
list(map(str, test_range)) +
argv)
subprocess_extra_args = dict(stdin=dummy_in.fileno())
if not display_batch_output:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论