提交 ec9142b0 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Use six.integer_types instead of int to check types.

上级 5f4eab92
...@@ -11,7 +11,7 @@ import warnings ...@@ -11,7 +11,7 @@ import warnings
import theano import theano
from theano import gof from theano import gof
from theano.compat import OrderedDict from theano.compat import OrderedDict
from six import iteritems from six import iteritems, integer_types
from six.moves import xrange from six.moves import xrange
...@@ -647,7 +647,7 @@ class Rebroadcast(gof.Op): ...@@ -647,7 +647,7 @@ class Rebroadcast(gof.Op):
items = sorted(axis) items = sorted(axis)
self.axis = OrderedDict(items) self.axis = OrderedDict(items)
for axis, broad in iteritems(self.axis): for axis, broad in iteritems(self.axis):
if not isinstance(axis, (numpy.integer, int)): if not isinstance(axis, (numpy.integer, integer_types)):
raise TypeError("Rebroadcast needs integer axes. " raise TypeError("Rebroadcast needs integer axes. "
"Got {}".format(axis)) "Got {}".format(axis))
......
import numpy as np import numpy as np
from six import integer_types
import theano as th import theano as th
import theano.tensor as T import theano.tensor as T
...@@ -9,7 +10,7 @@ class Mlp(object): ...@@ -9,7 +10,7 @@ class Mlp(object):
def __init__(self, nfeatures=100, noutputs=10, nhiddens=50, rng=None): def __init__(self, nfeatures=100, noutputs=10, nhiddens=50, rng=None):
if rng is None: if rng is None:
rng = 0 rng = 0
if isinstance(rng, int): if isinstance(rng, integer_types):
rng = np.random.RandomState(rng) rng = np.random.RandomState(rng)
self.rng = rng self.rng = rng
self.nfeatures = nfeatures self.nfeatures = nfeatures
......
...@@ -383,7 +383,7 @@ class Variable(Node): ...@@ -383,7 +383,7 @@ class Variable(Node):
if owner is not None and not isinstance(owner, Apply): if owner is not None and not isinstance(owner, Apply):
raise TypeError("owner must be an Apply instance", owner) raise TypeError("owner must be an Apply instance", owner)
self.owner = owner self.owner = owner
if index is not None and not isinstance(index, int): if index is not None and not isinstance(index, integer_types):
raise TypeError("index must be an int", index) raise TypeError("index must be an int", index)
self.index = index self.index = index
if name is not None and not isinstance(name, string_types): if name is not None and not isinstance(name, string_types):
......
...@@ -19,7 +19,7 @@ import numpy ...@@ -19,7 +19,7 @@ import numpy
import theano import theano
from theano import config from theano import config
from theano.compat import izip, OrderedDict from theano.compat import izip, OrderedDict
from six import string_types, iteritems, itervalues from six import string_types, iteritems, itervalues, integer_types
from six.moves import reduce from six.moves import reduce
from theano.gof import graph, op, utils, unify, toolbox from theano.gof import graph, op, utils, unify, toolbox
from theano.gof.fg import InconsistencyError from theano.gof.fg import InconsistencyError
...@@ -1507,7 +1507,7 @@ class PatternSub(LocalOptimizer): ...@@ -1507,7 +1507,7 @@ class PatternSub(LocalOptimizer):
return retry_with_equiv() return retry_with_equiv()
else: else:
u = u.merge(expr, v) u = u.merge(expr, v)
elif (isinstance(pattern, (int, float)) and elif (isinstance(pattern, (integer_types, float)) and
isinstance(expr, graph.Constant)): isinstance(expr, graph.Constant)):
if numpy.all( if numpy.all(
theano.tensor.constant(pattern).value == expr.value): theano.tensor.constant(pattern).value == expr.value):
...@@ -1534,7 +1534,7 @@ class PatternSub(LocalOptimizer): ...@@ -1534,7 +1534,7 @@ class PatternSub(LocalOptimizer):
return pattern[0](*args) return pattern[0](*args)
elif isinstance(pattern, string_types): elif isinstance(pattern, string_types):
return u[unify.Var(pattern)] return u[unify.Var(pattern)]
elif isinstance(pattern, (int, float)): elif isinstance(pattern, (integer_types, float)):
return pattern return pattern
else: else:
return pattern.clone() return pattern.clone()
......
...@@ -4,7 +4,7 @@ import sys ...@@ -4,7 +4,7 @@ import sys
from theano.compat import DefaultOrderedDict from theano.compat import DefaultOrderedDict
from theano.misc.ordered_set import OrderedSet from theano.misc.ordered_set import OrderedSet
from six import StringIO from six import StringIO, integer_types
from theano.gof import opt from theano.gof import opt
from theano import config from theano import config
...@@ -310,7 +310,7 @@ class SequenceDB(DB): ...@@ -310,7 +310,7 @@ class SequenceDB(DB):
def register(self, name, obj, position, *tags): def register(self, name, obj, position, *tags):
super(SequenceDB, self).register(name, obj, *tags) super(SequenceDB, self).register(name, obj, *tags)
assert isinstance(position, (int, float)) assert isinstance(position, (integer_types, float))
self.__position__[name] = position self.__position__[name] = position
def query(self, *tags, **kwtags): def query(self, *tags, **kwtags):
......
...@@ -3,7 +3,7 @@ import linecache ...@@ -3,7 +3,7 @@ import linecache
import sys import sys
import numpy import numpy
from six import iteritems from six import iteritems, integer_types, string_types
from theano import config from theano import config
from theano.compat import OrderedDict, PY3 from theano.compat import OrderedDict, PY3
...@@ -326,7 +326,7 @@ RETRY = Keyword("RETRY", False) ...@@ -326,7 +326,7 @@ RETRY = Keyword("RETRY", False)
FAILURE = Keyword("FAILURE", False) FAILURE = Keyword("FAILURE", False)
simple_types = (int, float, str, bool, None.__class__, Keyword) simple_types = integer_types + string_types + (float, bool, None.__class__, Keyword)
ANY_TYPE = Keyword("ANY_TYPE") ANY_TYPE = Keyword("ANY_TYPE")
...@@ -521,7 +521,7 @@ def hash_from_dict(d): ...@@ -521,7 +521,7 @@ def hash_from_dict(d):
first_part = [k for k, v in items] first_part = [k for k, v in items]
second_part = [] second_part = []
for k, v in items: for k, v in items:
assert isinstance(k, (str, int, float)) assert isinstance(k, (string_types, integer_types, float))
if isinstance(v, (tuple, list)): if isinstance(v, (tuple, list)):
second_part += [tuple(v)] second_part += [tuple(v)]
else: else:
......
...@@ -13,11 +13,11 @@ import hashlib ...@@ -13,11 +13,11 @@ import hashlib
import numpy as np import numpy as np
from six import string_types, integer_types, iteritems from six import string_types, integer_types, iteritems
from six.moves import StringIO, reduce
import theano import theano
from theano import gof from theano import gof
from theano import config from theano import config
from six.moves import StringIO, reduce
from theano.gof import Op, Apply from theano.gof import Op, Apply
from theano.compile import Function, debugmode, SharedVariable from theano.compile import Function, debugmode, SharedVariable
from theano.compile.profilemode import ProfileMode from theano.compile.profilemode import ProfileMode
...@@ -88,7 +88,7 @@ def debugprint(obj, depth=-1, print_type=False, ...@@ -88,7 +88,7 @@ def debugprint(obj, depth=-1, print_type=False,
to the Apply's identifier, to indicate which output a line corresponds to. to the Apply's identifier, to indicate which output a line corresponds to.
""" """
if not isinstance(depth, int): if not isinstance(depth, integer_types):
raise Exception("depth parameter must be an int") raise Exception("depth parameter must be an int")
if file == 'str': if file == 'str':
_file = StringIO() _file = StringIO()
......
...@@ -4,10 +4,12 @@ import os ...@@ -4,10 +4,12 @@ import os
import logging import logging
_logger = logging.getLogger(__name__) _logger = logging.getLogger(__name__)
from six import integer_types
from six.moves import StringIO, reduce
import theano import theano
from theano import Apply from theano import Apply
from theano import tensor from theano import tensor
from six.moves import StringIO, reduce
from theano.sandbox.cuda.type import CudaNdarrayType from theano.sandbox.cuda.type import CudaNdarrayType
from theano.sandbox.cuda import GpuOp from theano.sandbox.cuda import GpuOp
from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable, from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable,
...@@ -874,7 +876,7 @@ class BaseGpuCorrMM(GpuOp): ...@@ -874,7 +876,7 @@ class BaseGpuCorrMM(GpuOp):
if border_mode != "valid": if border_mode != "valid":
raise ValueError("border_mode must be 'valid' if pad is given") raise ValueError("border_mode must be 'valid' if pad is given")
border_mode = pad border_mode = pad
if isinstance(border_mode, int): if isinstance(border_mode, integer_types):
border_mode = (border_mode, border_mode) border_mode = (border_mode, border_mode)
if isinstance(border_mode, tuple): if isinstance(border_mode, tuple):
pad_h, pad_w = map(int, border_mode) pad_h, pad_w = map(int, border_mode)
......
...@@ -2,6 +2,8 @@ import os ...@@ -2,6 +2,8 @@ import os
import numpy import numpy
import warnings import warnings
from six import integer_types
import theano import theano
from theano import Apply, tensor, config, Variable from theano import Apply, tensor, config, Variable
from theano.scalar import as_scalar, constant, Log from theano.scalar import as_scalar, constant, Log
...@@ -129,7 +131,7 @@ class GpuDnnConvDesc(GpuOp): ...@@ -129,7 +131,7 @@ class GpuDnnConvDesc(GpuOp):
def __init__(self, border_mode, subsample=(1, 1), conv_mode='conv', def __init__(self, border_mode, subsample=(1, 1), conv_mode='conv',
precision="float32"): precision="float32"):
if isinstance(border_mode, int): if isinstance(border_mode, integer_types):
border_mode = (border_mode,) * len(subsample) border_mode = (border_mode,) * len(subsample)
if isinstance(border_mode, tuple): if isinstance(border_mode, tuple):
assert len(border_mode) == len(subsample) assert len(border_mode) == len(subsample)
......
import os import os
import numpy
import warnings import warnings
import numpy
from six import integer_types
import theano import theano
from theano import Op, Apply, tensor, config, Variable from theano import Op, Apply, tensor, config, Variable
from theano.scalar import as_scalar, constant, Log from theano.scalar import as_scalar, constant, Log
...@@ -281,7 +283,7 @@ class GpuDnnConvDesc(COp): ...@@ -281,7 +283,7 @@ class GpuDnnConvDesc(COp):
precision="float32"): precision="float32"):
COp.__init__(self, ["conv_desc.c"], "APPLY_SPECIFIC(conv_desc)") COp.__init__(self, ["conv_desc.c"], "APPLY_SPECIFIC(conv_desc)")
if isinstance(border_mode, int): if isinstance(border_mode, integer_types):
border_mode = (border_mode,) * len(subsample) border_mode = (border_mode,) * len(subsample)
if isinstance(border_mode, tuple): if isinstance(border_mode, tuple):
assert len(border_mode) == len(subsample) assert len(border_mode) == len(subsample)
......
...@@ -4,10 +4,11 @@ import os ...@@ -4,10 +4,11 @@ import os
import copy import copy
import numpy import numpy
from six import integer_types
from six.moves import StringIO
import theano import theano
from theano import tensor, gof from theano import tensor, gof
from six.moves import StringIO
from theano.tensor.subtensor import IncSubtensor, Subtensor, get_idx_list from theano.tensor.subtensor import IncSubtensor, Subtensor, get_idx_list
import theano.tensor.inplace import theano.tensor.inplace
...@@ -116,7 +117,7 @@ class GpuSubtensor(HideC, Subtensor): ...@@ -116,7 +117,7 @@ class GpuSubtensor(HideC, Subtensor):
def fix_idx(idx): def fix_idx(idx):
if idx is None: if idx is None:
return "0", 1 return "0", 1
elif isinstance(idx, (numpy.integer, int)): elif isinstance(idx, (numpy.integer, integer_types)):
return str(idx), 0 return str(idx), 0
elif isinstance(idx, gof.Type): elif isinstance(idx, gof.Type):
return indices.pop(0), 0 return indices.pop(0), 0
...@@ -143,7 +144,7 @@ class GpuSubtensor(HideC, Subtensor): ...@@ -143,7 +144,7 @@ class GpuSubtensor(HideC, Subtensor):
else: else:
if isinstance(idx, gof.Type): if isinstance(idx, gof.Type):
start = indices.pop(0) start = indices.pop(0)
elif isinstance(idx, (numpy.integer, int)): elif isinstance(idx, (numpy.integer, integer_types)):
start = idx start = idx
else: else:
assert 0, idx assert 0, idx
......
...@@ -3,7 +3,7 @@ import logging ...@@ -3,7 +3,7 @@ import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import numpy import numpy
from six import iteritems from six import iteritems, integer_types
from six.moves import xrange from six.moves import xrange
from theano.gof import Op, Apply from theano.gof import Op, Apply
...@@ -412,7 +412,7 @@ def spectral_radius_bound(X, log2_exponent): ...@@ -412,7 +412,7 @@ def spectral_radius_bound(X, log2_exponent):
""" """
if X.type.ndim != 2: if X.type.ndim != 2:
raise TypeError('spectral_radius_bound requires a matrix argument', X) raise TypeError('spectral_radius_bound requires a matrix argument', X)
if not isinstance(log2_exponent, int): if not isinstance(log2_exponent, integer_types):
raise TypeError('spectral_radius_bound requires an integer exponent', raise TypeError('spectral_radius_bound requires an integer exponent',
log2_exponent) log2_exponent)
if log2_exponent <= 0: if log2_exponent <= 0:
......
...@@ -9,6 +9,7 @@ from __future__ import print_function ...@@ -9,6 +9,7 @@ from __future__ import print_function
import warnings import warnings
import numpy import numpy
from six import integer_types
from six.moves import xrange from six.moves import xrange
from theano import Op, Apply, shared, config, Variable from theano import Op, Apply, shared, config, Variable
...@@ -1060,7 +1061,7 @@ def guess_n_streams(size, warn=False): ...@@ -1060,7 +1061,7 @@ def guess_n_streams(size, warn=False):
# Note that this code was moved out of `MRG_RandomStreams` so that it can # Note that this code was moved out of `MRG_RandomStreams` so that it can
# be easily accessed from tests, where we want to disable the warning. # be easily accessed from tests, where we want to disable the warning.
if (isinstance(size, (tuple, list)) and if (isinstance(size, (tuple, list)) and
all([isinstance(i, int) for i in size])): all([isinstance(i, integer_types) for i in size])):
# We can make a guess. # We can make a guess.
r = 1 r = 1
for s in size: for s in size:
...@@ -1127,7 +1128,7 @@ class MRG_RandomStreams(object): ...@@ -1127,7 +1128,7 @@ class MRG_RandomStreams(object):
def set_rstate(self, seed): def set_rstate(self, seed):
# TODO : need description for method, parameter # TODO : need description for method, parameter
if isinstance(seed, int): if isinstance(seed, integer_types):
if seed == 0: if seed == 0:
raise ValueError('seed should not be 0', seed) raise ValueError('seed should not be 0', seed)
elif seed >= M2: elif seed >= M2:
...@@ -1289,9 +1290,9 @@ class MRG_RandomStreams(object): ...@@ -1289,9 +1290,9 @@ class MRG_RandomStreams(object):
if isinstance(size, tuple): if isinstance(size, tuple):
msg = "size must be a tuple of int or a Theano variable" msg = "size must be a tuple of int or a Theano variable"
assert all([isinstance(i, (numpy.integer, int, Variable)) assert all([isinstance(i, (numpy.integer, integer_types, Variable))
for i in size]), msg for i in size]), msg
if any([isinstance(i, (numpy.integer, int)) and i <= 0 if any([isinstance(i, (numpy.integer, integer_types)) and i <= 0
for i in size]): for i in size]):
raise ValueError( raise ValueError(
"The specified size contains a dimension with value <= 0", "The specified size contains a dimension with value <= 0",
...@@ -1377,7 +1378,7 @@ class MRG_RandomStreams(object): ...@@ -1377,7 +1378,7 @@ class MRG_RandomStreams(object):
raise TypeError("You have to specify pvals") raise TypeError("You have to specify pvals")
pvals = as_tensor_variable(pvals) pvals = as_tensor_variable(pvals)
if size is not None: if size is not None:
if any([isinstance(i, int) and i <= 0 for i in size]): if any([isinstance(i, integer_types) and i <= 0 for i in size]):
raise ValueError( raise ValueError(
"The specified size contains a dimension with value <= 0", "The specified size contains a dimension with value <= 0",
size) size)
...@@ -1483,7 +1484,7 @@ class MRG_RandomStreams(object): ...@@ -1483,7 +1484,7 @@ class MRG_RandomStreams(object):
evened = False evened = False
constant = False constant = False
if (isinstance(size, tuple) and if (isinstance(size, tuple) and
all([isinstance(i, (numpy.integer, int)) for i in size])): all([isinstance(i, (numpy.integer, integer_types)) for i in size])):
constant = True constant = True
# Force dtype because it defaults to float when size is empty # Force dtype because it defaults to float when size is empty
n_samples = numpy.prod(size, dtype='int64') n_samples = numpy.prod(size, dtype='int64')
......
...@@ -17,6 +17,8 @@ way (as scan does) to create a shared variable of this kind. ...@@ -17,6 +17,8 @@ way (as scan does) to create a shared variable of this kind.
""" """
import numpy import numpy
from six import integer_types
from theano.compile import SharedVariable from theano.compile import SharedVariable
from .basic import Scalar, _scalar_py_operators from .basic import Scalar, _scalar_py_operators
...@@ -46,7 +48,7 @@ def shared(value, name=None, strict=False, allow_downcast=None): ...@@ -46,7 +48,7 @@ def shared(value, name=None, strict=False, allow_downcast=None):
We implement this using 0-d tensors for now. We implement this using 0-d tensors for now.
""" """
if not isinstance(value, (numpy.number, float, int, complex)): if not isinstance(value, (numpy.number, float, integer_types, complex)):
raise TypeError() raise TypeError()
try: try:
dtype = value.dtype dtype = value.dtype
......
...@@ -372,7 +372,7 @@ def scan(fn, ...@@ -372,7 +372,7 @@ def scan(fn,
# To do that we check here to see the nature of n_steps # To do that we check here to see the nature of n_steps
n_fixed_steps = None n_fixed_steps = None
if isinstance(n_steps, (float, int)): if isinstance(n_steps, (float, integer_types)):
n_fixed_steps = int(n_steps) n_fixed_steps = int(n_steps)
else: else:
try: try:
......
...@@ -62,7 +62,7 @@ import logging ...@@ -62,7 +62,7 @@ import logging
import time import time
import numpy import numpy
from six import iteritems from six import iteritems, integer_types
from six.moves import xrange from six.moves import xrange
import theano import theano
...@@ -837,7 +837,7 @@ class Scan(PureOp): ...@@ -837,7 +837,7 @@ class Scan(PureOp):
profile = None profile = None
if (theano.config.profile or if (theano.config.profile or
(isinstance(self.profile, (string_types, bool, int)) (isinstance(self.profile, (string_types, bool, integer_types))
and self.profile)): and self.profile)):
if isinstance(self.profile, string_types): if isinstance(self.profile, string_types):
profile = ScanProfileStats(name=self.profile) profile = ScanProfileStats(name=self.profile)
......
...@@ -14,6 +14,7 @@ import sys ...@@ -14,6 +14,7 @@ import sys
import numpy import numpy
from numpy.lib.stride_tricks import as_strided from numpy.lib.stride_tricks import as_strided
from six import integer_types
from six.moves import xrange from six.moves import xrange
import scipy.sparse import scipy.sparse
...@@ -1402,7 +1403,7 @@ class GetItemScalar(gof.op.Op): ...@@ -1402,7 +1403,7 @@ class GetItemScalar(gof.op.Op):
raise Exception("GetItemScalar called with a slice as index!") raise Exception("GetItemScalar called with a slice as index!")
# in case of indexing using int instead of theano variable # in case of indexing using int instead of theano variable
elif isinstance(ind, int): elif isinstance(ind, integer_types):
ind = theano.tensor.constant(ind) ind = theano.tensor.constant(ind)
input_op += [ind] input_op += [ind]
......
...@@ -4,6 +4,7 @@ import sys ...@@ -4,6 +4,7 @@ import sys
import warnings import warnings
import numpy import numpy
from six import integer_types
from six.moves import xrange from six.moves import xrange
import numbers import numbers
...@@ -21,7 +22,6 @@ from theano.tensor.type import TensorType, values_eq_approx_always_true ...@@ -21,7 +22,6 @@ from theano.tensor.type import TensorType, values_eq_approx_always_true
from theano.tensor.type_other import NoneConst from theano.tensor.type_other import NoneConst
from theano import scalar as scal from theano import scalar as scal
from functools import partial from functools import partial
from six import integer_types
from theano import compile, printing from theano import compile, printing
from theano.printing import pprint, min_informative_str from theano.printing import pprint, min_informative_str
# For history # For history
...@@ -606,7 +606,7 @@ def get_scalar_constant_value(orig_v, elemwise=True, ...@@ -606,7 +606,7 @@ def get_scalar_constant_value(orig_v, elemwise=True,
# to depend on passing it None) # to depend on passing it None)
raise NotScalarConstantError() raise NotScalarConstantError()
if isinstance(v, (numpy.integer, int, float)): if isinstance(v, (numpy.integer, integer_types, float)):
return numpy.asarray(v) return numpy.asarray(v)
if isinstance(v, numpy.ndarray): if isinstance(v, numpy.ndarray):
...@@ -786,7 +786,7 @@ def tensor(*args, **kwargs): ...@@ -786,7 +786,7 @@ def tensor(*args, **kwargs):
def _multi(*fns): def _multi(*fns):
def f2(f, *names): def f2(f, *names):
if names and isinstance(names[0], int): if names and isinstance(names[0], integer_types):
if names == 1: if names == 1:
return f() return f()
else: else:
...@@ -1290,7 +1290,7 @@ class MaxAndArgmax(Op): ...@@ -1290,7 +1290,7 @@ class MaxAndArgmax(Op):
def make_node(self, x, axis=None): def make_node(self, x, axis=None):
x = _as_tensor_variable(x) x = _as_tensor_variable(x)
if isinstance(axis, (int, numpy.integer)): if isinstance(axis, (integer_types, numpy.integer)):
axis = [int(axis)] axis = [int(axis)]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0: elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)] axis = [int(axis)]
...@@ -1307,7 +1307,7 @@ class MaxAndArgmax(Op): ...@@ -1307,7 +1307,7 @@ class MaxAndArgmax(Op):
else: else:
assert (axis.dtype.startswith("int") or assert (axis.dtype.startswith("int") or
axis.dtype.startswith("uint")) axis.dtype.startswith("uint"))
if isinstance(axis.data, (int, numpy.integer)) or \ if isinstance(axis.data, (integer_types, numpy.integer)) or \
(isinstance(axis.data, numpy.ndarray) and (isinstance(axis.data, numpy.ndarray) and
axis.data.ndim == 0): axis.data.ndim == 0):
axis = [int(axis.data)] axis = [int(axis.data)]
...@@ -1536,7 +1536,7 @@ def makeKeepDims(x, y, axis): ...@@ -1536,7 +1536,7 @@ def makeKeepDims(x, y, axis):
if axis is None: if axis is None:
axis = list(range(x.type.ndim)) axis = list(range(x.type.ndim))
elif isinstance(axis, (int, numpy.integer)): elif isinstance(axis, (integer_types, numpy.integer)):
axis = [axis] axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0: elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)] axis = [int(axis)]
...@@ -1544,7 +1544,7 @@ def makeKeepDims(x, y, axis): ...@@ -1544,7 +1544,7 @@ def makeKeepDims(x, y, axis):
axis = [int(a) for a in axis] axis = [int(a) for a in axis]
newaxis = [] newaxis = []
for a in axis: for a in axis:
if not isinstance(a, int): if not isinstance(a, integer_types):
raise ValueError( raise ValueError(
"keepdims option can be used only with constant axis") "keepdims option can be used only with constant axis")
if a < 0: if a < 0:
...@@ -3082,7 +3082,7 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False, ...@@ -3082,7 +3082,7 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False,
if axis is None: if axis is None:
axis = list(range(input.ndim)) axis = list(range(input.ndim))
elif isinstance(axis, (int, numpy.integer)): elif isinstance(axis, (integer_types, numpy.integer)):
axis = [axis] axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0: elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)] axis = [int(axis)]
...@@ -3126,7 +3126,7 @@ def var(input, axis=None, keepdims=False): ...@@ -3126,7 +3126,7 @@ def var(input, axis=None, keepdims=False):
input_ndim = input.type.ndim input_ndim = input.type.ndim
if axis is None: if axis is None:
axis = list(range(input_ndim)) axis = list(range(input_ndim))
elif isinstance(axis, (int, numpy.integer)): elif isinstance(axis, (integer_types, numpy.integer)):
axis = [axis] axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0: elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)] axis = [int(axis)]
...@@ -3769,7 +3769,7 @@ class Join(Op): ...@@ -3769,7 +3769,7 @@ class Join(Op):
as_tensor_variable_args[0].type.broadcastable) as_tensor_variable_args[0].type.broadcastable)
ndim = len(bcastable) ndim = len(bcastable)
# Axis can also be a constant # Axis can also be a constant
if not isinstance(axis, int): if not isinstance(axis, integer_types):
try: try:
# Note : `get_scalar_constant_value` returns a ndarray not # Note : `get_scalar_constant_value` returns a ndarray not
# an int # an int
...@@ -3777,7 +3777,7 @@ class Join(Op): ...@@ -3777,7 +3777,7 @@ class Join(Op):
except NotScalarConstantError: except NotScalarConstantError:
pass pass
if isinstance(axis, int): if isinstance(axis, integer_types):
# Basically, broadcastable -> length 1, but the # Basically, broadcastable -> length 1, but the
# converse does not hold. So we permit e.g. T/F/T # converse does not hold. So we permit e.g. T/F/T
# joins, and if they fail at runtime they fail, but if # joins, and if they fail at runtime they fail, but if
......
...@@ -3,12 +3,12 @@ import sys ...@@ -3,12 +3,12 @@ import sys
from copy import copy from copy import copy
import numpy import numpy
from six import iteritems, integer_types
from six.moves import xrange
import theano import theano
from theano import gof from theano import gof
from theano.compat import izip from theano.compat import izip
from six import iteritems
from six.moves import xrange
from theano.gof import Apply, Op, OpenMPOp from theano.gof import Apply, Op, OpenMPOp
from theano import scalar from theano import scalar
from theano.scalar import get_scalar_type from theano.scalar import get_scalar_type
...@@ -135,10 +135,11 @@ class DimShuffle(Op): ...@@ -135,10 +135,11 @@ class DimShuffle(Op):
for i, j in enumerate(new_order): for i, j in enumerate(new_order):
if j != 'x': if j != 'x':
# There is a bug in numpy that results in isinstance(x, int) # There is a bug in numpy that results in
# returning False for numpy integers. # isinstance(x, integer_types) returning False for
# See <http://projects.scipy.org/numpy/ticket/2235>. # numpy integers. See
if not isinstance(j, (int, numpy.integer)): # <http://projects.scipy.org/numpy/ticket/2235>.
if not isinstance(j, (integer_types, numpy.integer)):
raise TypeError("DimShuffle indices must be python ints.") raise TypeError("DimShuffle indices must be python ints.")
if j >= len(input_broadcastable): if j >= len(input_broadcastable):
raise ValueError(("new_order[%d] is %d, but the input " raise ValueError(("new_order[%d] is %d, but the input "
...@@ -1325,10 +1326,10 @@ class CAReduce(Op): ...@@ -1325,10 +1326,10 @@ class CAReduce(Op):
if axis is None: if axis is None:
self.axis = axis self.axis = axis
# There is a bug in numpy that results in isinstance(x, int) returning # There is a bug in numpy that results in isinstance(x,
# False for numpy integers. # integer_types) returning False for numpy integers. See
# See <http://projects.scipy.org/numpy/ticket/2235>. # <http://projects.scipy.org/numpy/ticket/2235>.
elif isinstance(axis, (int, numpy.integer)): elif isinstance(axis, (integer_types, numpy.integer)):
self.axis = (axis,) self.axis = (axis,)
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0: elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
self.axis = (int(axis),) self.axis = (int(axis),)
......
...@@ -4,7 +4,7 @@ Abstract conv interface ...@@ -4,7 +4,7 @@ Abstract conv interface
import numpy as np import numpy as np
import logging import logging
from six import reraise from six import reraise, integer_types
import sys import sys
import theano import theano
...@@ -625,7 +625,7 @@ class BaseAbstractConv2d(Op): ...@@ -625,7 +625,7 @@ class BaseAbstractConv2d(Op):
border_mode="valid", subsample=(1, 1), border_mode="valid", subsample=(1, 1),
filter_flip=True): filter_flip=True):
if isinstance(border_mode, int): if isinstance(border_mode, integer_types):
border_mode = (border_mode, border_mode) border_mode = (border_mode, border_mode)
if isinstance(border_mode, tuple): if isinstance(border_mode, tuple):
pad_h, pad_w = map(int, border_mode) pad_h, pad_w = map(int, border_mode)
......
import os import os
import logging import logging
from six import integer_types
import theano import theano
from theano import Apply from theano import Apply
from theano import gof from theano import gof
...@@ -30,7 +32,7 @@ class BaseCorrMM(gof.Op): ...@@ -30,7 +32,7 @@ class BaseCorrMM(gof.Op):
__props__ = ('border_mode', 'subsample') __props__ = ('border_mode', 'subsample')
def __init__(self, border_mode="valid", subsample=(1, 1)): def __init__(self, border_mode="valid", subsample=(1, 1)):
if isinstance(border_mode, int): if isinstance(border_mode, integer_types):
if border_mode < 0: if border_mode < 0:
raise ValueError( raise ValueError(
'invalid border_mode {}, which must be a ' 'invalid border_mode {}, which must be a '
......
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr from nose.plugins.attrib import attr
import numpy import numpy
from six import integer_types
import theano import theano
import theano.tensor as T import theano.tensor as T
...@@ -92,7 +93,7 @@ class TestCorr2D(utt.InferShapeTester): ...@@ -92,7 +93,7 @@ class TestCorr2D(utt.InferShapeTester):
padHW = numpy.floor(fil_shape2d / 2).astype('int32') padHW = numpy.floor(fil_shape2d / 2).astype('int32')
elif isinstance(border_mode, tuple): elif isinstance(border_mode, tuple):
padHW = numpy.array(border_mode) padHW = numpy.array(border_mode)
elif isinstance(border_mode, int): elif isinstance(border_mode, integer_types):
padHW = numpy.array([border_mode, border_mode]) padHW = numpy.array([border_mode, border_mode])
else: else:
raise NotImplementedError('Unsupported border_mode {}'.format(border_mode)) raise NotImplementedError('Unsupported border_mode {}'.format(border_mode))
......
...@@ -14,14 +14,12 @@ import traceback ...@@ -14,14 +14,12 @@ import traceback
import warnings import warnings
import numpy import numpy
import numpy as N # guys... please don't do this in the library :( from six import integer_types, iteritems
from six.moves import xrange from six.moves import reduce, xrange
import theano import theano
from theano import gof from theano import gof
from theano.compat import izip from theano.compat import izip
from six import integer_types, iteritems
from six.moves import reduce
from theano.gof import opt, InconsistencyError, TopoOptimizer, graph from theano.gof import opt, InconsistencyError, TopoOptimizer, graph
from theano.gof import Variable, Constant from theano.gof import Variable, Constant
from theano.gof.utils import MethodNotDefined from theano.gof.utils import MethodNotDefined
...@@ -1169,11 +1167,11 @@ class ShapeFeature(object): ...@@ -1169,11 +1167,11 @@ class ShapeFeature(object):
# - Shape_i(i)(other_r); # - Shape_i(i)(other_r);
# - Shape_i(i)(r). # - Shape_i(i)(r).
merged_shape.append(r_shape[i]) merged_shape.append(r_shape[i])
elif isinstance(r_shape[i], (Constant, int)): elif isinstance(r_shape[i], (Constant, integer_types)):
# We do this to call less often ancestors and make # We do this to call less often ancestors and make
# sure we have the simplest shape possible. # sure we have the simplest shape possible.
merged_shape.append(r_shape[i]) merged_shape.append(r_shape[i])
elif isinstance(other_shape[i], (Constant, int)): elif isinstance(other_shape[i], (Constant, integer_types)):
# We do this to call less often ancestors and make # We do this to call less often ancestors and make
# sure we have the simplest shape possible. # sure we have the simplest shape possible.
merged_shape.append(other_shape[i]) merged_shape.append(other_shape[i])
...@@ -1826,7 +1824,7 @@ def local_subtensor_make_vector(node): ...@@ -1826,7 +1824,7 @@ def local_subtensor_make_vector(node):
else: else:
return return
if isinstance(idx, (int, numpy.integer)): if isinstance(idx, (integer_types, numpy.integer)):
# We don't need to copy over any stack traces here # We don't need to copy over any stack traces here
return [x.owner.inputs[idx]] return [x.owner.inputs[idx]]
elif isinstance(idx, Variable): elif isinstance(idx, Variable):
...@@ -2452,7 +2450,7 @@ def local_useless_subtensor(node): ...@@ -2452,7 +2450,7 @@ def local_useless_subtensor(node):
length_pos = shape_of[node.inputs[0]][pos] length_pos = shape_of[node.inputs[0]][pos]
if isinstance(idx.stop, (int, numpy.integer)): if isinstance(idx.stop, (integer_types, numpy.integer)):
length_pos_data = sys.maxsize length_pos_data = sys.maxsize
try: try:
length_pos_data = get_scalar_constant_value(length_pos) length_pos_data = get_scalar_constant_value(length_pos)
...@@ -4497,7 +4495,7 @@ class Canonizer(gof.LocalOptimizer): ...@@ -4497,7 +4495,7 @@ class Canonizer(gof.LocalOptimizer):
num, denum = self.simplify(list(orig_num), list(orig_denum), out.type) num, denum = self.simplify(list(orig_num), list(orig_denum), out.type)
def same(x, y): def same(x, y):
return len(x) == len(y) and all(N.all(xe == ye) for xe, ye in return len(x) == len(y) and all(numpy.all(xe == ye) for xe, ye in
zip(x, y)) zip(x, y))
if same(orig_num, num) and same(orig_denum, denum): if same(orig_num, num) and same(orig_denum, denum):
...@@ -4538,7 +4536,7 @@ def mul_calculate(num, denum, aslist=False, out_type=None): ...@@ -4538,7 +4536,7 @@ def mul_calculate(num, denum, aslist=False, out_type=None):
if aslist: if aslist:
return [] return []
else: else:
return N.int8(1) return numpy.int8(1)
# Make sure we do not accidently upcast data types. # Make sure we do not accidently upcast data types.
if out_type is None: if out_type is None:
...@@ -4547,9 +4545,9 @@ def mul_calculate(num, denum, aslist=False, out_type=None): ...@@ -4547,9 +4545,9 @@ def mul_calculate(num, denum, aslist=False, out_type=None):
out_dtype = out_type.dtype out_dtype = out_type.dtype
one = theano._asarray(1, dtype=out_dtype) one = theano._asarray(1, dtype=out_dtype)
v = reduce(N.multiply, num, one) / reduce(N.multiply, denum, one) v = reduce(numpy.multiply, num, one) / reduce(numpy.multiply, denum, one)
if aslist: if aslist:
if N.all(v == 1): if numpy.all(v == 1):
return [] return []
else: else:
return [v] return [v]
...@@ -5205,7 +5203,7 @@ register_canonicalize(local_mul_zero) ...@@ -5205,7 +5203,7 @@ register_canonicalize(local_mul_zero)
@gof.local_optimizer([T.true_div]) @gof.local_optimizer([T.true_div])
def local_div_to_inv(node): def local_div_to_inv(node):
if node.op == T.true_div and N.all( if node.op == T.true_div and numpy.all(
local_mul_canonizer.get_constant(node.inputs[0]) == 1.0): local_mul_canonizer.get_constant(node.inputs[0]) == 1.0):
out = node.outputs[0] out = node.outputs[0]
new_out = T.inv(local_mul_canonizer.merge_num_denum(node.inputs[1:], new_out = T.inv(local_mul_canonizer.merge_num_denum(node.inputs[1:],
...@@ -5286,19 +5284,19 @@ def local_pow_specialize(node): ...@@ -5286,19 +5284,19 @@ def local_pow_specialize(node):
ysym.type.broadcastable): ysym.type.broadcastable):
rval = None rval = None
if N.all(y == 2): if numpy.all(y == 2):
rval = [T.sqr(xsym)] rval = [T.sqr(xsym)]
if N.all(y == 1): if numpy.all(y == 1):
rval = [xsym] rval = [xsym]
if N.all(y == 0): if numpy.all(y == 0):
rval = [T.fill(xsym, numpy.asarray(1, dtype=odtype))] rval = [T.fill(xsym, numpy.asarray(1, dtype=odtype))]
if N.all(y == 0.5): if numpy.all(y == 0.5):
rval = [T.sqrt(xsym)] rval = [T.sqrt(xsym)]
if N.all(y == -0.5): if numpy.all(y == -0.5):
rval = [T.inv(T.sqrt(xsym))] rval = [T.inv(T.sqrt(xsym))]
if N.all(y == -1): if numpy.all(y == -1):
rval = [T.inv(xsym)] rval = [T.inv(xsym)]
if N.all(y == -2): if numpy.all(y == -2):
rval = [T.inv(T.sqr(xsym))] rval = [T.inv(T.sqr(xsym))]
if rval: if rval:
rval[0] = T.cast(rval[0], odtype) rval[0] = T.cast(rval[0], odtype)
...@@ -5637,9 +5635,9 @@ def add_calculate(num, denum, aslist=False, out_type=None): ...@@ -5637,9 +5635,9 @@ def add_calculate(num, denum, aslist=False, out_type=None):
zero = theano._asarray(0, dtype=out_type.dtype) zero = theano._asarray(0, dtype=out_type.dtype)
# zero = 0.0 if out_type is None else theano._asarray(0, # zero = 0.0 if out_type is None else theano._asarray(0,
# dtype=out_type.dtype) # dtype=out_type.dtype)
v = reduce(N.add, num, zero) - reduce(N.add, denum, zero) v = reduce(numpy.add, num, zero) - reduce(numpy.add, denum, zero)
if aslist: if aslist:
if N.all(v == 0): if numpy.all(v == 0):
return [] return []
else: else:
return [v] return [v]
......
...@@ -5,14 +5,14 @@ import sys ...@@ -5,14 +5,14 @@ import sys
from copy import copy from copy import copy
import numpy import numpy
from six import string_types
from six.moves import reduce, xrange
# local imports # local imports
import theano import theano
from six.moves import reduce, xrange
from theano import tensor from theano import tensor
from theano.tensor import opt from theano.tensor import opt
from theano import gof from theano import gof
from six import string_types
from theano.compile import optdb from theano.compile import optdb
__docformat__ = "restructuredtext en" __docformat__ = "restructuredtext en"
......
import traceback import traceback
import numpy import numpy
from six import integer_types
import theano.tensor.basic import theano.tensor.basic
from theano.tensor.basic import TensorType, _tensor_py_operators from theano.tensor.basic import TensorType, _tensor_py_operators
...@@ -84,7 +85,7 @@ def scalar_constructor(value, name=None, strict=False, allow_downcast=None, ...@@ -84,7 +85,7 @@ def scalar_constructor(value, name=None, strict=False, allow_downcast=None,
if target != 'cpu': if target != 'cpu':
raise TypeError('not for cpu') raise TypeError('not for cpu')
if not isinstance(value, (numpy.number, float, int, complex)): if not isinstance(value, (numpy.number, float, integer_types, complex)):
raise TypeError() raise TypeError()
try: try:
dtype = value.dtype dtype = value.dtype
......
...@@ -6,11 +6,12 @@ Pool, DownsampleAvg, DownsampleSoftmax. ...@@ -6,11 +6,12 @@ Pool, DownsampleAvg, DownsampleSoftmax.
""" """
from __future__ import print_function from __future__ import print_function
# This file should move along with conv.py # This file should move along with conv.py
from six.moves import xrange
import six.moves.builtins as builtins
import warnings import warnings
import numpy import numpy
from six import integer_types
from six.moves import xrange
import six.moves.builtins as builtins
import theano import theano
from theano import gof, Op, tensor, Variable, Apply from theano import gof, Op, tensor, Variable, Apply
...@@ -233,7 +234,7 @@ class Pool(Op): ...@@ -233,7 +234,7 @@ class Pool(Op):
def __init__(self, ds, ignore_border=False, st=None, padding=(0, 0), def __init__(self, ds, ignore_border=False, st=None, padding=(0, 0),
mode='max'): mode='max'):
self.ds = tuple(ds) self.ds = tuple(ds)
if not all([isinstance(d, int) for d in ds]): if not all([isinstance(d, integer_types) for d in ds]):
raise ValueError( raise ValueError(
"Pool downsample parameters must be ints." "Pool downsample parameters must be ints."
" Got %s" % str(ds)) " Got %s" % str(ds))
...@@ -890,7 +891,7 @@ class DownsampleFactorMaxGradGrad(Op): ...@@ -890,7 +891,7 @@ class DownsampleFactorMaxGradGrad(Op):
def __init__(self, ds, ignore_border, st=None, padding=(0, 0), mode='max'): def __init__(self, ds, ignore_border, st=None, padding=(0, 0), mode='max'):
self.ds = tuple(ds) self.ds = tuple(ds)
if not all([isinstance(d, int) for d in ds]): if not all([isinstance(d, integer_types) for d in ds]):
raise ValueError( raise ValueError(
"Pool downsample parameters must be ints." "Pool downsample parameters must be ints."
" Got %s" % str(ds)) " Got %s" % str(ds))
......
...@@ -5,11 +5,11 @@ import warnings ...@@ -5,11 +5,11 @@ import warnings
import logging import logging
import numpy import numpy
from six import integer_types
from six.moves import xrange from six.moves import xrange
import theano import theano
from theano.compat import izip from theano.compat import izip
from six import integer_types
from theano.gradient import DisconnectedType from theano.gradient import DisconnectedType
from theano import gof from theano import gof
from theano.gof import Apply, Constant, hashtype, Op, Type, MethodNotDefined from theano.gof import Apply, Constant, hashtype, Op, Type, MethodNotDefined
...@@ -513,7 +513,7 @@ class Subtensor(Op): ...@@ -513,7 +513,7 @@ class Subtensor(Op):
if start is None: if start is None:
start = 0 start = 0
if (p.stop is None or if (p.stop is None or
(isinstance(p.stop, (int, numpy.integer, (isinstance(p.stop, (integer_types, numpy.integer,
numpy.ndarray)) and numpy.ndarray)) and
p.stop > start)): p.stop > start)):
broadcastable.append(True) broadcastable.append(True)
...@@ -680,7 +680,7 @@ class Subtensor(Op): ...@@ -680,7 +680,7 @@ class Subtensor(Op):
return pos[1] return pos[1]
def init_entry(entry, depth=0): def init_entry(entry, depth=0):
if isinstance(entry, (numpy.integer, int)): if isinstance(entry, (numpy.integer, integer_types)):
init_cmds.append( init_cmds.append(
"subtensor_spec[%i] = %i;" % (spec_pos(), "subtensor_spec[%i] = %i;" % (spec_pos(),
entry)) entry))
...@@ -972,7 +972,7 @@ class SubtensorPrinter: ...@@ -972,7 +972,7 @@ class SubtensorPrinter:
sidxs = [] sidxs = []
inbrack_pstate = pstate.clone(precedence=-1000) inbrack_pstate = pstate.clone(precedence=-1000)
for entry in idxs: for entry in idxs:
if isinstance(entry, int): if isinstance(entry, integer_types):
sidxs.append(str(entry)) sidxs.append(str(entry))
elif isinstance(entry, scal.Scalar): elif isinstance(entry, scal.Scalar):
sidxs.append(inbrack_pstate.pprinter.process(inputs.pop())) sidxs.append(inbrack_pstate.pprinter.process(inputs.pop()))
......
import unittest import unittest
import numpy import numpy
from six import integer_types
import theano import theano
from theano import tensor, function from theano import tensor, function
...@@ -14,7 +15,7 @@ class TestKeepDims(unittest.TestCase): ...@@ -14,7 +15,7 @@ class TestKeepDims(unittest.TestCase):
def makeKeepDims_local(self, x, y, axis): def makeKeepDims_local(self, x, y, axis):
if axis is None: if axis is None:
newaxis = list(range(x.ndim)) newaxis = list(range(x.ndim))
elif isinstance(axis, int): elif isinstance(axis, integer_types):
if axis < 0: if axis < 0:
newaxis = [axis + x.type.ndim] newaxis = [axis + x.type.ndim]
else: else:
......
...@@ -3,6 +3,7 @@ import traceback as tb ...@@ -3,6 +3,7 @@ import traceback as tb
import warnings import warnings
import numpy import numpy
from six import integer_types
from six.moves import xrange from six.moves import xrange
import theano import theano
...@@ -319,7 +320,7 @@ class _tensor_py_operators(object): ...@@ -319,7 +320,7 @@ class _tensor_py_operators(object):
""" """
if ndim is not None: if ndim is not None:
if not isinstance(ndim, int): if not isinstance(ndim, integer_types):
raise ValueError("Expected ndim to be an integer, is " + raise ValueError("Expected ndim to be an integer, is " +
str(type(ndim))) str(type(ndim)))
......
...@@ -2,10 +2,12 @@ from __future__ import print_function ...@@ -2,10 +2,12 @@ from __future__ import print_function
from copy import copy, deepcopy from copy import copy, deepcopy
from functools import wraps from functools import wraps
import logging import logging
from six.moves import StringIO
import sys import sys
import unittest import unittest
from six import integer_types
from six.moves import StringIO
try: try:
from nose.plugins.attrib import attr from nose.plugins.attrib import attr
except ImportError: except ImportError:
...@@ -206,7 +208,7 @@ class InferShapeTester(unittest.TestCase): ...@@ -206,7 +208,7 @@ class InferShapeTester(unittest.TestCase):
mode = mode.excluding(*excluding) mode = mode.excluding(*excluding)
if warn: if warn:
for var, inp in zip(inputs, numeric_inputs): for var, inp in zip(inputs, numeric_inputs):
if isinstance(inp, (int, float, list, tuple)): if isinstance(inp, (integer_types, float, list, tuple)):
inp = var.type.filter(inp) inp = var.type.filter(inp)
if not hasattr(inp, "shape"): if not hasattr(inp, "shape"):
continue continue
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论