提交 6d2d49c5 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Semantic and logical fixes for the python2/3 compat PR.

上级 ab606357
......@@ -34,7 +34,7 @@ def Op_to_RoutineDoc(op, routine_doc, module_name=None):
routine_doc.posarg_defaults = [None] * len(args)
# Set the routine's line number.
if hasattr(func, 'func_code'):
if hasattr(func, '__code__'):
routine_doc.lineno = func.__code__.co_firstlineno
else:
# [XX] I should probably use UNKNOWN here??
......
......@@ -5,7 +5,6 @@
# * Add download_url
import os
import sys
import subprocess
import codecs
from fnmatch import fnmatchcase
......@@ -14,18 +13,6 @@ try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
from distutils.command.build_py import build_py
from distutils.command.build_scripts import build_scripts
else:
exclude_fixers = ['fix_next', 'fix_filter']
from distutils.util import Mixin2to3
from lib2to3.refactor import get_fixers_from_package
Mixin2to3.fixer_names = [f for f in get_fixers_from_package('lib2to3.fixes')
if f.rsplit('.', 1)[-1] not in exclude_fixers]
from distutils.command.build_scripts import build_scripts_2to3 as build_scripts
CLASSIFIERS = """\
......@@ -54,8 +41,8 @@ MAINTAINER = "LISA laboratory, University of Montreal"
MAINTAINER_EMAIL = "theano-dev@googlegroups.com"
DESCRIPTION = ('Optimizing compiler for evaluating mathematical ' +
'expressions on CPUs and GPUs.')
LONG_DESCRIPTION = (codecs.open("DESCRIPTION.txt",encoding='utf-8').read() + "\n\n" +
codecs.open("NEWS.txt",encoding='utf-8').read())
LONG_DESCRIPTION = (codecs.open("DESCRIPTION.txt", encoding='utf-8').read() +
"\n\n" + codecs.open("NEWS.txt", encoding='utf-8').read())
URL = "http://deeplearning.net/software/theano/"
DOWNLOAD_URL = ""
LICENSE = 'BSD'
......
......@@ -2290,8 +2290,9 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
inputs = [inputs]
# Wrap them in In or Out instances if needed.
inputs, outputs = (list(map(self.wrap_in, inputs)),
list(map(self.wrap_out, outputs)))
inputs = [self.wrap_in(i) for i in inputs]
outputs = [self.wrap_out(o) for o in outputs]
_inputs = gof.graph.inputs([o.variable for o in outputs] +
[i.update for i in inputs
if getattr(i, 'update', False)])
......
......@@ -14,7 +14,6 @@ from six.moves import xrange
import numpy
import collections
def register_view_op_c_code(type, code, version=()):
......@@ -571,8 +570,7 @@ def as_op(itypes, otypes, infer_shape=None):
itypes = list(itypes)
otypes = list(otypes)
if infer_shape is not None and not isinstance(infer_shape,
collections.Callable):
if infer_shape is not None and not callable(infer_shape):
raise TypeError("infer_shape needs to be a callable")
def make_op(fn):
......
......@@ -267,7 +267,7 @@ def AddConfigVar(name, doc, configparam, root=config, in_c_key=True):
configparam.in_c_key = in_c_key
# Trigger a read of the value from config files and env vars
# This allow to filter wrong value from the user.
if not isinstance(configparam.default, collections.Callable):
if not callable(configparam.default):
configparam.__get__()
else:
# We do not want to evaluate now the default value
......@@ -311,7 +311,7 @@ class ConfigParam(object):
for v in self.default():
val_str = v
self.__set__(None, val_str)
elif isinstance(self.default, collections.Callable):
elif callable(self.default):
val_str = self.default()
else:
val_str = self.default
......@@ -367,7 +367,7 @@ class TypedParam(ConfigParam):
def filter(val):
cast_val = mytype(val)
if isinstance(is_valid, collections.Callable):
if callable(is_valid):
if is_valid(cast_val):
return cast_val
else:
......
import errno
import logging
import os
from six.moves import reload_module as reload
import sys
import warnings
......@@ -9,7 +10,6 @@ import theano
from theano import config
from theano.gof.compilelock import get_lock, release_lock
from theano.gof import cmodule
import imp
_logger = logging.getLogger('theano.gof.lazylinker_c')
......@@ -27,7 +27,7 @@ def try_import():
def try_reload():
sys.path[0:0] = [config.compiledir]
imp.reload(lazylinker_ext)
reload(lazylinker_ext)
del sys.path[0]
try:
......
......@@ -102,7 +102,7 @@ if 0:
tasks[node].extend(lopt for track, i, lopt in self.fetch_tracks0(node.op))
u = self.attach_updater(fgraph, importer, pruner, chin)
print('KEYS', list(map(hash, list(tasks.keys()))))
print('KEYS', [hash(t) for t in tasks.keys()])
while tasks:
for node in tasks:
todo = tasks.pop(node)
......
......@@ -39,7 +39,7 @@ class MyOp(Op):
def make_node(self, *inputs):
assert len(inputs) == self.nin
inputs = list(map(as_variable, inputs))
inputs = [as_variable(i) for i in inputs]
for input in inputs:
if input.type is not tdouble:
raise Exception("Error 1")
......
......@@ -248,7 +248,7 @@ class TestMakeThunk(unittest.TestCase):
def test_test_value_python_objects():
for x in (list(range(3)), 0, 0.5, 1):
for x in ([0, 1, 2], 0, 0.5, 1):
assert (op.get_test_value(x) == x).all()
......
......@@ -419,7 +419,7 @@ def give_variables_names(variables):
for i, var in enumerate(filter(bad_var, variables)):
var.name = (var.name or "") + "_%d" % i
if not unique(list(map(str, variables))):
if not unique([str(v) for v in variables]):
raise ValueError("Not all variables have unique names. Maybe you've "
"named some of the variables identically")
return variables
......
compat/python2x.py
compat/__init__.py
compile/debugmode.py
compile/function_module.py
gof/cc.py
gradient.py
ifelse.py
sandbox/cuda/tests/test_mlp.py
sandbox/gpuarray/elemwise.py
sandbox/gpuarray/tests/test_basic_ops.py
sandbox/scan.py
sandbox/scan_module/scan.py
sandbox/scan_module/scan_op.py
sandbox/scan_module/scan_utils.py
scalar/basic.py
scan_module/scan.py
scan_module/scan_op.py
scan_module/scan_utils.py
sparse/opt.py
tensor/basic.py
tensor/elemwise.py
tensor/nnet/sigm.py
tensor/nnet/tests/test_sigm.py
tensor/opt.py
tensor/subtensor.py
tensor/tests/test_basic.py
tensor/tests/test_elemwise.py
tensor/tests/test_subtensor.py
......@@ -12,7 +12,6 @@ import warnings
import hashlib
import numpy as np
import collections
from six import string_types, integer_types, iteritems
try:
......@@ -212,7 +211,7 @@ N.B.:
def _print_fn(op, xin):
for attr in op.attrs:
temp = getattr(xin, attr)
if isinstance(temp, collections.Callable):
if callable(temp):
pmsg = temp()
else:
pmsg = temp
......
......@@ -10,7 +10,7 @@ from __future__ import print_function
# so state is ignored
# since this job is not restartable, channel is also ignored
import logging, io, time, sys
import logging, time, sys
import numpy
from six.moves import xrange
......
......@@ -2,7 +2,6 @@ from __future__ import print_function
import copy, inspect
import theano
import theano.tensor as T
import collections
from six import string_types, add_metaclass, iteritems
from six.moves import xrange
......@@ -39,7 +38,7 @@ class InitGraph(type):
# print ' adding class attribute', key
if isinstance(val, theano.Variable) and val.name is None:
val.name = key
if isinstance(val, collections.Callable):
if callable(val):
setattr(cls, key, staticmethod(val))
else:
setattr(cls, key, val)
......@@ -319,7 +318,7 @@ if 0:
except Exception:
kres = klass.KlassVariable(val)
setattr(SymMod, key, kres)
elif isinstance(val, collections.Callable) and getattr(val, '__is_symbolic'):
elif callable(val) and getattr(val, '__is_symbolic'):
setattr(SymMod, key, val)
return SymMod()
......
import errno
import logging
import os
from six.moves import reload_module as reload
import sys
import warnings
......@@ -11,7 +12,6 @@ from theano import config
from theano.compat import reload
from theano.gof.compilelock import get_lock, release_lock
from theano.gof import cmodule
import imp
_logger = logging.getLogger('theano.scan_module.scan_perform')
......@@ -31,7 +31,7 @@ def try_import():
def try_reload():
sys.path[0:0] = [config.compiledir]
imp.reload(scan_perform)
reload(scan_perform)
del sys.path[0]
try:
......
......@@ -246,7 +246,7 @@ class NumpyAutocaster(object):
def __call__(self, x):
# Make sure we only deal with scalars.
assert (isinstance(x, int) or
assert (isinstance(x, integer_types) or
isinstance(x, float) or
(isinstance(x, numpy.ndarray) and x.ndim == 0))
......@@ -354,23 +354,15 @@ def constant_or_value(x, rtype, name=None, ndim=None, dtype=None):
# In this case, this function should infer the dtype according to the
# autocasting rules. See autocasting above.
x_ = None
if rtype is TensorConstant and isinstance(x, int):
x_ = autocast_int(x)
elif rtype is TensorConstant and isinstance(x, float):
x_ = autocast_float(x)
elif rtype is TensorConstant and isinstance(x, integer_types):
# We need to address the case where a long number is used in a
# Theano graph, because on Windows 64, all shapes are expressed
# with longs.
# If a long fits in int64, we convert it into an int64, like
# numpy.asarray() does up to 1.7. NumPy 1.7.1 upcasts to int64
# if possible, but falls back to uint64 if int64 isn't possible but
# uint64 is. We always do as NumPy 1.7.1 here.
# If x is too big, an OverflowError will be raised by numpy.
if rtype is TensorConstant and isinstance(x, integer_types):
try:
x_ = theano._asarray(x, dtype='int64')
x_ = autocast_int(x)
except OverflowError:
# This is to imitate numpy behavior which tries to fit
# bigger numbers into a uint64.
x_ = theano._asarray(x, dtype='uint64')
elif rtype is TensorConstant and isinstance(x, float):
x_ = autocast_float(x)
elif isinstance(x, numpy.ndarray):
x_ = x
# Currently we do not have a bool dtype in Theano.
......
......@@ -75,6 +75,12 @@ else:
### seed random number generator so that unittests are deterministic ###
utt.seed_rng()
if PY3:
def L(i):
return i
else:
def L(i):
return long(i)
def inplace_func(inputs, outputs, mode=None, allow_input_downcast=False,
on_unused_input='raise', name=None):
......@@ -4982,7 +4988,7 @@ class T_reshape(utt.InferShapeTester, utt.TestOptimizationMixin):
def test_reshape_long_in_shape(self):
v = dvector('v')
r = v.reshape((v.shape[0], 1))
r = v.reshape((v.shape[0], L(1)))
print(r.eval({v: numpy.arange(5.)}))
assert numpy.allclose(r.eval({v: numpy.arange(5.)}).T,
numpy.arange(5.))
......@@ -6024,7 +6030,7 @@ def _test_autocast_numpy():
def ok(z):
assert tensor.constant(z).dtype == numpy.asarray(z).dtype
for x in ([2 ** i for i in xrange(63)] +
[0, 0, 1, 2 ** 63 - 1] +
[0, L(0), L(1), L(2 ** 63 - 1)] +
[0., 1., 1.1, 1.5]):
n_x = numpy.asarray(x)
# Make sure the data type is the same as the one found by numpy.
......@@ -6057,7 +6063,7 @@ def _test_autocast_numpy_floatX():
# into int64, as that is the maximal integer type that Theano
# supports, and that is the maximal type in Python indexing.
for x in ([2 ** i - 1 for i in xrange(64)] +
[0, 0, 1, 2 ** 63 - 1] +
[0, L(0), L(1), L(2 ** 63 - 1)] +
[0., 1., 1.1, 1.5]):
ok(x, floatX)
ok(-x, floatX)
......@@ -6226,7 +6232,7 @@ class test_arithmetic_cast(unittest.TestCase):
class T_long_tensor(unittest.TestCase):
def test_fit_int64(self):
for exp in xrange(64):
val = 2 ** exp - 1
val = L(2 ** exp - 1)
scalar_ct = constant(val)
assert scalar_ct.dtype.startswith('int')
assert scalar_ct.value == val
......@@ -6240,7 +6246,7 @@ class T_long_tensor(unittest.TestCase):
assert numpy.all(matrix_ct.value == val)
def test_too_big(self):
val = 2 ** 63
val = L(2 ** 63)
# NumPy 1.7 this will raise an exception
# NumPy 1.7.1 this will work
try:
......@@ -6267,7 +6273,7 @@ class T_long_tensor(unittest.TestCase):
except TypeError:
pass
val = 2 ** 64
val = L(2 ** 64)
# This fail for all NumPy version.
self.assertRaises(Exception, constant, val)
self.assertRaises(Exception, constant, [val, val])
......
......@@ -9,7 +9,7 @@ from six import StringIO
from six.moves import xrange
import theano
from theano.compat import exc_message, izip
from theano.compat import exc_message, izip, PY3
from theano.compile import DeepCopyOp
from theano import config
from theano import gof
......@@ -35,6 +35,13 @@ from theano.tensor import (as_tensor_variable, _shared,
ctensor3, dtensor4)
from theano.tensor.tests.test_basic import rand, randint_ranged, inplace_func
if PY3:
def L(i):
return i
else:
def L(i):
return long(i)
class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
"""
......@@ -303,7 +310,7 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
def test_long(self):
n = self.shared(numpy.arange(12, dtype=self.dtype).reshape((4, 3)))
t = n[1:4:2, 1]
t = n[L(1):L(4):L(2), L(1)]
self.assertTrue(isinstance(t.owner.op, Subtensor))
tval = self.eval_output_and_check(t)
self.assertTrue(tval.shape == (2,))
......@@ -313,7 +320,7 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
# Currently, we cast Python longs to int64 when used for indexing.
# This test checks that using a long that does not fit raises an error.
n = self.shared(numpy.arange(12, dtype=self.dtype).reshape((4, 3)))
self.assertRaises(Exception, lambda: n[:(2 ** 63)])
self.assertRaises(Exception, lambda: n[:L(2 ** 63)])
def test_list_slice(self):
x = theano.tensor.arange(100).reshape((5, 5, 4))
......
......@@ -2,7 +2,7 @@ from __future__ import print_function
from copy import copy, deepcopy
from functools import wraps
import logging
from io import StringIO
from six.moves import StringIO
import sys
import unittest
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论