提交 fea9e021 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #4214 from abergeron/fix_cudnn_mac

Fix windows test problems.
......@@ -11,7 +11,7 @@ import warnings
import theano
from theano import gof
from theano.compat import OrderedDict
from six import iteritems
from six import iteritems, integer_types
from six.moves import xrange
......@@ -647,7 +647,7 @@ class Rebroadcast(gof.Op):
items = sorted(axis)
self.axis = OrderedDict(items)
for axis, broad in iteritems(self.axis):
if not isinstance(axis, (numpy.integer, int)):
if not isinstance(axis, (numpy.integer, integer_types)):
raise TypeError("Rebroadcast needs integer axes. "
"Got {}".format(axis))
......
import numpy as np
from six import integer_types
import theano as th
import theano.tensor as T
......@@ -9,7 +10,7 @@ class Mlp(object):
def __init__(self, nfeatures=100, noutputs=10, nhiddens=50, rng=None):
if rng is None:
rng = 0
if isinstance(rng, int):
if isinstance(rng, integer_types):
rng = np.random.RandomState(rng)
self.rng = rng
self.nfeatures = nfeatures
......
......@@ -383,7 +383,7 @@ class Variable(Node):
if owner is not None and not isinstance(owner, Apply):
raise TypeError("owner must be an Apply instance", owner)
self.owner = owner
if index is not None and not isinstance(index, int):
if index is not None and not isinstance(index, integer_types):
raise TypeError("index must be an int", index)
self.index = index
if name is not None and not isinstance(name, string_types):
......
......@@ -19,7 +19,7 @@ import numpy
import theano
from theano import config
from theano.compat import izip, OrderedDict
from six import string_types, iteritems, itervalues
from six import string_types, iteritems, itervalues, integer_types
from six.moves import reduce
from theano.gof import graph, op, utils, unify, toolbox
from theano.gof.fg import InconsistencyError
......@@ -1507,7 +1507,7 @@ class PatternSub(LocalOptimizer):
return retry_with_equiv()
else:
u = u.merge(expr, v)
elif (isinstance(pattern, (int, float)) and
elif (isinstance(pattern, (integer_types, float)) and
isinstance(expr, graph.Constant)):
if numpy.all(
theano.tensor.constant(pattern).value == expr.value):
......@@ -1534,7 +1534,7 @@ class PatternSub(LocalOptimizer):
return pattern[0](*args)
elif isinstance(pattern, string_types):
return u[unify.Var(pattern)]
elif isinstance(pattern, (int, float)):
elif isinstance(pattern, (integer_types, float)):
return pattern
else:
return pattern.clone()
......
......@@ -4,7 +4,7 @@ import sys
from theano.compat import DefaultOrderedDict
from theano.misc.ordered_set import OrderedSet
from six import StringIO
from six import StringIO, integer_types
from theano.gof import opt
from theano import config
......@@ -310,7 +310,7 @@ class SequenceDB(DB):
def register(self, name, obj, position, *tags):
super(SequenceDB, self).register(name, obj, *tags)
assert isinstance(position, (int, float))
assert isinstance(position, (integer_types, float))
self.__position__[name] = position
def query(self, *tags, **kwtags):
......
......@@ -3,7 +3,7 @@ import linecache
import sys
import numpy
from six import iteritems
from six import iteritems, integer_types, string_types
from theano import config
from theano.compat import OrderedDict, PY3
......@@ -326,7 +326,7 @@ RETRY = Keyword("RETRY", False)
FAILURE = Keyword("FAILURE", False)
simple_types = (int, float, str, bool, None.__class__, Keyword)
simple_types = integer_types + string_types + (float, bool, None.__class__, Keyword)
ANY_TYPE = Keyword("ANY_TYPE")
......@@ -521,7 +521,7 @@ def hash_from_dict(d):
first_part = [k for k, v in items]
second_part = []
for k, v in items:
assert isinstance(k, (str, int, float))
assert isinstance(k, (string_types, integer_types, float))
if isinstance(v, (tuple, list)):
second_part += [tuple(v)]
else:
......
......@@ -13,11 +13,11 @@ import hashlib
import numpy as np
from six import string_types, integer_types, iteritems
from six.moves import StringIO, reduce
import theano
from theano import gof
from theano import config
from six.moves import StringIO, reduce
from theano.gof import Op, Apply
from theano.compile import Function, debugmode, SharedVariable
from theano.compile.profilemode import ProfileMode
......@@ -88,7 +88,7 @@ def debugprint(obj, depth=-1, print_type=False,
to the Apply's identifier, to indicate which output a line corresponds to.
"""
if not isinstance(depth, int):
if not isinstance(depth, integer_types):
raise Exception("depth parameter must be an int")
if file == 'str':
_file = StringIO()
......
......@@ -4,10 +4,12 @@ import os
import logging
_logger = logging.getLogger(__name__)
from six import integer_types
from six.moves import StringIO, reduce
import theano
from theano import Apply
from theano import tensor
from six.moves import StringIO, reduce
from theano.sandbox.cuda.type import CudaNdarrayType
from theano.sandbox.cuda import GpuOp
from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable,
......@@ -874,7 +876,7 @@ class BaseGpuCorrMM(GpuOp):
if border_mode != "valid":
raise ValueError("border_mode must be 'valid' if pad is given")
border_mode = pad
if isinstance(border_mode, int):
if isinstance(border_mode, integer_types):
border_mode = (border_mode, border_mode)
if isinstance(border_mode, tuple):
pad_h, pad_w = map(int, border_mode)
......
......@@ -2,6 +2,8 @@ import os
import numpy
import warnings
from six import integer_types
import theano
from theano import Apply, tensor, config, Variable
from theano.scalar import as_scalar, constant, Log
......@@ -127,7 +129,7 @@ class GpuDnnConvDesc(GpuOp):
def __init__(self, border_mode, subsample=(1, 1), conv_mode='conv',
precision="float32"):
if isinstance(border_mode, int):
if isinstance(border_mode, integer_types):
border_mode = (border_mode,) * len(subsample)
if isinstance(border_mode, tuple):
assert len(border_mode) == len(subsample)
......
......@@ -217,19 +217,29 @@ def _params_allgood(ishape, kshape, mode, subsample=(1, 1), img_stride=(1, 1),
assert [(sh == 1) is br for
sh, br in zip(cpuval.shape[:2], op.type.broadcastable[:2])]
if (t2 is not None):
if (t2 is not None and verbose > 0):
if mode == 'valid':
approx_fp = cpuval.size * ishape[1] * kshape[2] * kshape[3] * 2
else:
approx_fp = (ishape[0] * kshape[0] * kshape[1] * kshape[2] *
kshape[3] * ishape[2] * ishape[3] * 2)
approx_fp /= 1e6
cpu_mflops = approx_fp / (t1 - t0)
gpu_mflops = approx_fp / (t3 - t2)
if verbose > 0:
print('%15s' % str(ishape), '%15s' % str(kshape), end=' ', file=sys.stdout)
print('%12.5f %7.2f %7.2f %7.1f' % (approx_fp,
cpu_mflops, gpu_mflops, (t1 - t0) / (t2 - t1)), file=sys.stdout)
if t1 - t0 != 0:
cpu_mflops = approx_fp / (t1 - t0)
else:
cpu_mflops = float('inf')
if t3 - t2 != 0:
gpu_mflops = approx_fp / (t3 - t2)
else:
gpu_mflops = float('inf')
if t2 - t1 != 0:
div = (t1 - t0) / (t2 - t1)
else:
div = float('inf')
print('%15s' % str(ishape), '%15s' % str(kshape), end=' ')
print('%12.5f %7.2f %7.2f %7.1f' % (
approx_fp, cpu_mflops, gpu_mflops, div))
def exec_conv(version, shapes, verbose, random, mode,
......
import os
import numpy
import warnings
import numpy
from six import integer_types
import theano
from theano import Op, Apply, tensor, config, Variable
from theano.scalar import as_scalar, constant, Log
......@@ -281,7 +283,7 @@ class GpuDnnConvDesc(COp):
precision="float32"):
COp.__init__(self, ["conv_desc.c"], "APPLY_SPECIFIC(conv_desc)")
if isinstance(border_mode, int):
if isinstance(border_mode, integer_types):
border_mode = (border_mode,) * len(subsample)
if isinstance(border_mode, tuple):
assert len(border_mode) == len(subsample)
......
......@@ -4,10 +4,11 @@ import os
import copy
import numpy
from six import integer_types
from six.moves import StringIO
import theano
from theano import tensor, gof
from six.moves import StringIO
from theano.tensor.subtensor import IncSubtensor, Subtensor, get_idx_list
import theano.tensor.inplace
......@@ -116,7 +117,7 @@ class GpuSubtensor(HideC, Subtensor):
def fix_idx(idx):
if idx is None:
return "0", 1
elif isinstance(idx, (numpy.integer, int)):
elif isinstance(idx, (numpy.integer, integer_types)):
return str(idx), 0
elif isinstance(idx, gof.Type):
return indices.pop(0), 0
......@@ -143,7 +144,7 @@ class GpuSubtensor(HideC, Subtensor):
else:
if isinstance(idx, gof.Type):
start = indices.pop(0)
elif isinstance(idx, (numpy.integer, int)):
elif isinstance(idx, (numpy.integer, integer_types)):
start = idx
else:
assert 0, idx
......
......@@ -3,7 +3,7 @@ import logging
logger = logging.getLogger(__name__)
import numpy
from six import iteritems
from six import iteritems, integer_types
from six.moves import xrange
from theano.gof import Op, Apply
......@@ -412,7 +412,7 @@ def spectral_radius_bound(X, log2_exponent):
"""
if X.type.ndim != 2:
raise TypeError('spectral_radius_bound requires a matrix argument', X)
if not isinstance(log2_exponent, int):
if not isinstance(log2_exponent, integer_types):
raise TypeError('spectral_radius_bound requires an integer exponent',
log2_exponent)
if log2_exponent <= 0:
......
......@@ -9,6 +9,7 @@ from __future__ import print_function
import warnings
import numpy
from six import integer_types
from six.moves import xrange
from theano import Op, Apply, shared, config, Variable
......@@ -396,7 +397,8 @@ class mrg_uniform(mrg_uniform_base):
NORM = '4.656612873077392578125e-10'
return """
//////// <code generated by mrg_uniform>
npy_int64 odims[%(ndim)s];
// The +1 is to avoid odims[0] which fails on windows
npy_int64 odims[%(ndim)s+1];
npy_int64 n_elements = 1;
int n_streams = 0;
int must_alloc_sample = ((NULL == %(o_sample)s)
......@@ -667,7 +669,8 @@ class GPU_mrg_uniform(mrg_uniform_base, GpuOp):
return """
//////// <code generated by mrg_uniform>
npy_int64 M1 = 2147483647; //2^31 - 1
npy_int64 odims[%(ndim)s];
// The +1 is to avoid odims[0] which fails on windows
npy_int64 odims[%(ndim)s+1];
npy_int64 n_elements = 1;
int n_streams, n_streams_used_in_this_call;
int must_alloc_sample = ((NULL == %(o_sample)s)
......@@ -931,7 +934,8 @@ class GPUA_mrg_uniform(GpuKernelBase, mrg_uniform_base):
return """
npy_int64 M1 = 2147483647; //2^31 - 1
size_t odims[%(ndim)s];
// The +1 is to avoid odims[0] which fails on windows
size_t odims[%(ndim)s+1];
size_t n_elements = 1;
unsigned int n_streams;
int must_alloc_sample = ((NULL == %(o_sample)s)
......@@ -1060,7 +1064,7 @@ def guess_n_streams(size, warn=False):
# Note that this code was moved out of `MRG_RandomStreams` so that it can
# be easily accessed from tests, where we want to disable the warning.
if (isinstance(size, (tuple, list)) and
all([isinstance(i, int) for i in size])):
all([isinstance(i, integer_types) for i in size])):
# We can make a guess.
r = 1
for s in size:
......@@ -1127,7 +1131,7 @@ class MRG_RandomStreams(object):
def set_rstate(self, seed):
# TODO : need description for method, parameter
if isinstance(seed, int):
if isinstance(seed, integer_types):
if seed == 0:
raise ValueError('seed should not be 0', seed)
elif seed >= M2:
......@@ -1289,9 +1293,9 @@ class MRG_RandomStreams(object):
if isinstance(size, tuple):
msg = "size must be a tuple of int or a Theano variable"
assert all([isinstance(i, (numpy.integer, int, Variable))
assert all([isinstance(i, (numpy.integer, integer_types, Variable))
for i in size]), msg
if any([isinstance(i, (numpy.integer, int)) and i <= 0
if any([isinstance(i, (numpy.integer, integer_types)) and i <= 0
for i in size]):
raise ValueError(
"The specified size contains a dimension with value <= 0",
......@@ -1377,7 +1381,7 @@ class MRG_RandomStreams(object):
raise TypeError("You have to specify pvals")
pvals = as_tensor_variable(pvals)
if size is not None:
if any([isinstance(i, int) and i <= 0 for i in size]):
if any([isinstance(i, integer_types) and i <= 0 for i in size]):
raise ValueError(
"The specified size contains a dimension with value <= 0",
size)
......@@ -1483,7 +1487,7 @@ class MRG_RandomStreams(object):
evened = False
constant = False
if (isinstance(size, tuple) and
all([isinstance(i, (numpy.integer, int)) for i in size])):
all([isinstance(i, (numpy.integer, integer_types)) for i in size])):
constant = True
# Force dtype because it defaults to float when size is empty
n_samples = numpy.prod(size, dtype='int64')
......
......@@ -17,6 +17,8 @@ way (as scan does) to create a shared variable of this kind.
"""
import numpy
from six import integer_types
from theano.compile import SharedVariable
from .basic import Scalar, _scalar_py_operators
......@@ -46,7 +48,7 @@ def shared(value, name=None, strict=False, allow_downcast=None):
We implement this using 0-d tensors for now.
"""
if not isinstance(value, (numpy.number, float, int, complex)):
if not isinstance(value, (numpy.number, float, integer_types, complex)):
raise TypeError()
try:
dtype = value.dtype
......
......@@ -48,7 +48,7 @@ import numpy
import warnings
from theano.compat import ifilter, izip
from six import iteritems
from six import iteritems, integer_types
from six.moves import xrange
from theano.compile import SharedVariable, function
from theano import compile
......@@ -372,7 +372,7 @@ def scan(fn,
# To do that we check here to see the nature of n_steps
n_fixed_steps = None
if isinstance(n_steps, (float, int)):
if isinstance(n_steps, (float, integer_types)):
n_fixed_steps = int(n_steps)
else:
try:
......
......@@ -62,7 +62,7 @@ import logging
import time
import numpy
from six import iteritems
from six import iteritems, integer_types
from six.moves import xrange
import theano
......@@ -837,7 +837,7 @@ class Scan(PureOp):
profile = None
if (theano.config.profile or
(isinstance(self.profile, (string_types, bool, int))
(isinstance(self.profile, (string_types, bool, integer_types))
and self.profile)):
if isinstance(self.profile, string_types):
profile = ScanProfileStats(name=self.profile)
......
......@@ -14,6 +14,7 @@ import sys
import numpy
from numpy.lib.stride_tricks import as_strided
from six import integer_types
from six.moves import xrange
import scipy.sparse
......@@ -1402,7 +1403,7 @@ class GetItemScalar(gof.op.Op):
raise Exception("GetItemScalar called with a slice as index!")
# in case of indexing using int instead of theano variable
elif isinstance(ind, int):
elif isinstance(ind, integer_types):
ind = theano.tensor.constant(ind)
input_op += [ind]
......
......@@ -4,6 +4,7 @@ import sys
import warnings
import numpy
from six import integer_types
from six.moves import xrange
import numbers
......@@ -21,7 +22,6 @@ from theano.tensor.type import TensorType, values_eq_approx_always_true
from theano.tensor.type_other import NoneConst
from theano import scalar as scal
from functools import partial
from six import integer_types
from theano import compile, printing
from theano.printing import pprint, min_informative_str
# For history
......@@ -606,7 +606,7 @@ def get_scalar_constant_value(orig_v, elemwise=True,
# to depend on passing it None)
raise NotScalarConstantError()
if isinstance(v, (numpy.integer, int, float)):
if isinstance(v, (numpy.integer, integer_types, float)):
return numpy.asarray(v)
if isinstance(v, numpy.ndarray):
......@@ -786,7 +786,7 @@ def tensor(*args, **kwargs):
def _multi(*fns):
def f2(f, *names):
if names and isinstance(names[0], int):
if names and isinstance(names[0], integer_types):
if names == 1:
return f()
else:
......@@ -1290,7 +1290,7 @@ class MaxAndArgmax(Op):
def make_node(self, x, axis=None):
x = _as_tensor_variable(x)
if isinstance(axis, (int, numpy.integer)):
if isinstance(axis, (integer_types, numpy.integer)):
axis = [int(axis)]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
......@@ -1307,7 +1307,7 @@ class MaxAndArgmax(Op):
else:
assert (axis.dtype.startswith("int") or
axis.dtype.startswith("uint"))
if isinstance(axis.data, (int, numpy.integer)) or \
if isinstance(axis.data, (integer_types, numpy.integer)) or \
(isinstance(axis.data, numpy.ndarray) and
axis.data.ndim == 0):
axis = [int(axis.data)]
......@@ -1536,7 +1536,7 @@ def makeKeepDims(x, y, axis):
if axis is None:
axis = list(range(x.type.ndim))
elif isinstance(axis, (int, numpy.integer)):
elif isinstance(axis, (integer_types, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
......@@ -1544,7 +1544,7 @@ def makeKeepDims(x, y, axis):
axis = [int(a) for a in axis]
newaxis = []
for a in axis:
if not isinstance(a, int):
if not isinstance(a, integer_types):
raise ValueError(
"keepdims option can be used only with constant axis")
if a < 0:
......@@ -3082,7 +3082,7 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False,
if axis is None:
axis = list(range(input.ndim))
elif isinstance(axis, (int, numpy.integer)):
elif isinstance(axis, (integer_types, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
......@@ -3126,7 +3126,7 @@ def var(input, axis=None, keepdims=False):
input_ndim = input.type.ndim
if axis is None:
axis = list(range(input_ndim))
elif isinstance(axis, (int, numpy.integer)):
elif isinstance(axis, (integer_types, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
......@@ -3769,7 +3769,7 @@ class Join(Op):
as_tensor_variable_args[0].type.broadcastable)
ndim = len(bcastable)
# Axis can also be a constant
if not isinstance(axis, int):
if not isinstance(axis, integer_types):
try:
# Note : `get_scalar_constant_value` returns a ndarray not
# an int
......@@ -3777,7 +3777,7 @@ class Join(Op):
except NotScalarConstantError:
pass
if isinstance(axis, int):
if isinstance(axis, integer_types):
# Basically, broadcastable -> length 1, but the
# converse does not hold. So we permit e.g. T/F/T
# joins, and if they fail at runtime they fail, but if
......
......@@ -3,12 +3,12 @@ import sys
from copy import copy
import numpy
from six import iteritems, integer_types
from six.moves import xrange
import theano
from theano import gof
from theano.compat import izip
from six import iteritems
from six.moves import xrange
from theano.gof import Apply, Op, OpenMPOp
from theano import scalar
from theano.scalar import get_scalar_type
......@@ -135,10 +135,11 @@ class DimShuffle(Op):
for i, j in enumerate(new_order):
if j != 'x':
# There is a bug in numpy that results in isinstance(x, int)
# returning False for numpy integers.
# See <http://projects.scipy.org/numpy/ticket/2235>.
if not isinstance(j, (int, numpy.integer)):
# There is a bug in numpy that results in
# isinstance(x, integer_types) returning False for
# numpy integers. See
# <http://projects.scipy.org/numpy/ticket/2235>.
if not isinstance(j, (integer_types, numpy.integer)):
raise TypeError("DimShuffle indices must be python ints.")
if j >= len(input_broadcastable):
raise ValueError(("new_order[%d] is %d, but the input "
......@@ -1325,10 +1326,10 @@ class CAReduce(Op):
if axis is None:
self.axis = axis
# There is a bug in numpy that results in isinstance(x, int) returning
# False for numpy integers.
# See <http://projects.scipy.org/numpy/ticket/2235>.
elif isinstance(axis, (int, numpy.integer)):
# There is a bug in numpy that results in isinstance(x,
# integer_types) returning False for numpy integers. See
# <http://projects.scipy.org/numpy/ticket/2235>.
elif isinstance(axis, (integer_types, numpy.integer)):
self.axis = (axis,)
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
self.axis = (int(axis),)
......
......@@ -4,7 +4,7 @@ Abstract conv interface
import numpy as np
import logging
from six import reraise
from six import reraise, integer_types
import sys
import theano
......@@ -238,17 +238,17 @@ def conv2d_grad_wrt_inputs(output_grad,
# checking the type of input_shape
for dim in [0, 1]:
assert isinstance(input_shape[dim], (theano.tensor.TensorConstant,
int, type(None)))
integer_types, type(None)))
for dim in [2, 3]:
assert isinstance(input_shape[dim], (theano.tensor.TensorVariable,
theano.tensor.TensorConstant,
int))
integer_types))
# checking the type of filter_shape
if filter_shape is not None:
for dim in [0, 1, 2, 3]:
assert isinstance(filter_shape[dim], (theano.tensor.TensorConstant,
int, type(None)))
integer_types, type(None)))
# setting the last two dimensions of input_shape to None, if
# the type of these dimensions is TensorVariable.
......@@ -361,17 +361,17 @@ def conv2d_grad_wrt_weights(input,
# checking the type of filter_shape
for dim in [0, 1]:
assert isinstance(filter_shape[dim], (theano.tensor.TensorConstant,
int, type(None)))
integer_types, type(None)))
for dim in [2, 3]:
assert isinstance(filter_shape[dim], (theano.tensor.TensorVariable,
theano.tensor.TensorConstant,
int))
integer_types))
# checking the type of input_shape
if input_shape is not None:
for dim in [0, 1, 2, 3]:
assert isinstance(input_shape[dim], (theano.tensor.TensorConstant,
int, type(None)))
integer_types, type(None)))
# setting the last two dimensions of filter_shape to None, if
# the type of these dimensions is TensorVariable.
......@@ -625,7 +625,7 @@ class BaseAbstractConv2d(Op):
border_mode="valid", subsample=(1, 1),
filter_flip=True):
if isinstance(border_mode, int):
if isinstance(border_mode, integer_types):
border_mode = (border_mode, border_mode)
if isinstance(border_mode, tuple):
pad_h, pad_w = map(int, border_mode)
......
import os
import logging
from six import integer_types
import theano
from theano import Apply
from theano import gof
......@@ -30,7 +32,7 @@ class BaseCorrMM(gof.Op):
__props__ = ('border_mode', 'subsample')
def __init__(self, border_mode="valid", subsample=(1, 1)):
if isinstance(border_mode, int):
if isinstance(border_mode, integer_types):
if border_mode < 0:
raise ValueError(
'invalid border_mode {}, which must be a '
......
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
import numpy
from six import integer_types
import theano
import theano.tensor as T
......@@ -92,7 +93,7 @@ class TestCorr2D(utt.InferShapeTester):
padHW = numpy.floor(fil_shape2d / 2).astype('int32')
elif isinstance(border_mode, tuple):
padHW = numpy.array(border_mode)
elif isinstance(border_mode, int):
elif isinstance(border_mode, integer_types):
padHW = numpy.array([border_mode, border_mode])
else:
raise NotImplementedError('Unsupported border_mode {}'.format(border_mode))
......
......@@ -14,14 +14,12 @@ import traceback
import warnings
import numpy
import numpy as N # guys... please don't do this in the library :(
from six.moves import xrange
from six import integer_types, iteritems
from six.moves import reduce, xrange
import theano
from theano import gof
from theano.compat import izip
from six import integer_types, iteritems
from six.moves import reduce
from theano.gof import opt, InconsistencyError, TopoOptimizer, graph
from theano.gof import Variable, Constant
from theano.gof.utils import MethodNotDefined
......@@ -1169,11 +1167,11 @@ class ShapeFeature(object):
# - Shape_i(i)(other_r);
# - Shape_i(i)(r).
merged_shape.append(r_shape[i])
elif isinstance(r_shape[i], (Constant, int)):
elif isinstance(r_shape[i], (Constant, integer_types)):
# We do this to call less often ancestors and make
# sure we have the simplest shape possible.
merged_shape.append(r_shape[i])
elif isinstance(other_shape[i], (Constant, int)):
elif isinstance(other_shape[i], (Constant, integer_types)):
# We do this to call less often ancestors and make
# sure we have the simplest shape possible.
merged_shape.append(other_shape[i])
......@@ -1826,7 +1824,7 @@ def local_subtensor_make_vector(node):
else:
return
if isinstance(idx, (int, numpy.integer)):
if isinstance(idx, (integer_types, numpy.integer)):
# We don't need to copy over any stack traces here
return [x.owner.inputs[idx]]
elif isinstance(idx, Variable):
......@@ -2452,7 +2450,7 @@ def local_useless_subtensor(node):
length_pos = shape_of[node.inputs[0]][pos]
if isinstance(idx.stop, (int, numpy.integer)):
if isinstance(idx.stop, (integer_types, numpy.integer)):
length_pos_data = sys.maxsize
try:
length_pos_data = get_scalar_constant_value(length_pos)
......@@ -4497,7 +4495,7 @@ class Canonizer(gof.LocalOptimizer):
num, denum = self.simplify(list(orig_num), list(orig_denum), out.type)
def same(x, y):
return len(x) == len(y) and all(N.all(xe == ye) for xe, ye in
return len(x) == len(y) and all(numpy.all(xe == ye) for xe, ye in
zip(x, y))
if same(orig_num, num) and same(orig_denum, denum):
......@@ -4538,7 +4536,7 @@ def mul_calculate(num, denum, aslist=False, out_type=None):
if aslist:
return []
else:
return N.int8(1)
return numpy.int8(1)
# Make sure we do not accidently upcast data types.
if out_type is None:
......@@ -4547,9 +4545,9 @@ def mul_calculate(num, denum, aslist=False, out_type=None):
out_dtype = out_type.dtype
one = theano._asarray(1, dtype=out_dtype)
v = reduce(N.multiply, num, one) / reduce(N.multiply, denum, one)
v = reduce(numpy.multiply, num, one) / reduce(numpy.multiply, denum, one)
if aslist:
if N.all(v == 1):
if numpy.all(v == 1):
return []
else:
return [v]
......@@ -5205,7 +5203,7 @@ register_canonicalize(local_mul_zero)
@gof.local_optimizer([T.true_div])
def local_div_to_inv(node):
if node.op == T.true_div and N.all(
if node.op == T.true_div and numpy.all(
local_mul_canonizer.get_constant(node.inputs[0]) == 1.0):
out = node.outputs[0]
new_out = T.inv(local_mul_canonizer.merge_num_denum(node.inputs[1:],
......@@ -5286,19 +5284,19 @@ def local_pow_specialize(node):
ysym.type.broadcastable):
rval = None
if N.all(y == 2):
if numpy.all(y == 2):
rval = [T.sqr(xsym)]
if N.all(y == 1):
if numpy.all(y == 1):
rval = [xsym]
if N.all(y == 0):
if numpy.all(y == 0):
rval = [T.fill(xsym, numpy.asarray(1, dtype=odtype))]
if N.all(y == 0.5):
if numpy.all(y == 0.5):
rval = [T.sqrt(xsym)]
if N.all(y == -0.5):
if numpy.all(y == -0.5):
rval = [T.inv(T.sqrt(xsym))]
if N.all(y == -1):
if numpy.all(y == -1):
rval = [T.inv(xsym)]
if N.all(y == -2):
if numpy.all(y == -2):
rval = [T.inv(T.sqr(xsym))]
if rval:
rval[0] = T.cast(rval[0], odtype)
......@@ -5637,9 +5635,9 @@ def add_calculate(num, denum, aslist=False, out_type=None):
zero = theano._asarray(0, dtype=out_type.dtype)
# zero = 0.0 if out_type is None else theano._asarray(0,
# dtype=out_type.dtype)
v = reduce(N.add, num, zero) - reduce(N.add, denum, zero)
v = reduce(numpy.add, num, zero) - reduce(numpy.add, denum, zero)
if aslist:
if N.all(v == 0):
if numpy.all(v == 0):
return []
else:
return [v]
......
......@@ -5,14 +5,14 @@ import sys
from copy import copy
import numpy
from six import string_types
from six.moves import reduce, xrange
# local imports
import theano
from six.moves import reduce, xrange
from theano import tensor
from theano.tensor import opt
from theano import gof
from six import string_types
from theano.compile import optdb
__docformat__ = "restructuredtext en"
......
import traceback
import numpy
from six import integer_types
import theano.tensor.basic
from theano.tensor.basic import TensorType, _tensor_py_operators
......@@ -84,7 +85,7 @@ def scalar_constructor(value, name=None, strict=False, allow_downcast=None,
if target != 'cpu':
raise TypeError('not for cpu')
if not isinstance(value, (numpy.number, float, int, complex)):
if not isinstance(value, (numpy.number, float, integer_types, complex)):
raise TypeError()
try:
dtype = value.dtype
......
......@@ -6,11 +6,12 @@ Pool, DownsampleAvg, DownsampleSoftmax.
"""
from __future__ import print_function
# This file should move along with conv.py
from six.moves import xrange
import six.moves.builtins as builtins
import warnings
import numpy
from six import integer_types
from six.moves import xrange
import six.moves.builtins as builtins
import theano
from theano import gof, Op, tensor, Variable, Apply
......@@ -233,7 +234,7 @@ class Pool(Op):
def __init__(self, ds, ignore_border=False, st=None, padding=(0, 0),
mode='max'):
self.ds = tuple(ds)
if not all([isinstance(d, int) for d in ds]):
if not all([isinstance(d, integer_types) for d in ds]):
raise ValueError(
"Pool downsample parameters must be ints."
" Got %s" % str(ds))
......@@ -890,7 +891,7 @@ class DownsampleFactorMaxGradGrad(Op):
def __init__(self, ds, ignore_border, st=None, padding=(0, 0), mode='max'):
self.ds = tuple(ds)
if not all([isinstance(d, int) for d in ds]):
if not all([isinstance(d, integer_types) for d in ds]):
raise ValueError(
"Pool downsample parameters must be ints."
" Got %s" % str(ds))
......
......@@ -5,11 +5,11 @@ import warnings
import logging
import numpy
from six import integer_types
from six.moves import xrange
import theano
from theano.compat import izip
from six import integer_types
from theano.gradient import DisconnectedType
from theano import gof
from theano.gof import Apply, Constant, hashtype, Op, Type, MethodNotDefined
......@@ -513,7 +513,7 @@ class Subtensor(Op):
if start is None:
start = 0
if (p.stop is None or
(isinstance(p.stop, (int, numpy.integer,
(isinstance(p.stop, (integer_types, numpy.integer,
numpy.ndarray)) and
p.stop > start)):
broadcastable.append(True)
......@@ -680,7 +680,7 @@ class Subtensor(Op):
return pos[1]
def init_entry(entry, depth=0):
if isinstance(entry, (numpy.integer, int)):
if isinstance(entry, (numpy.integer, integer_types)):
init_cmds.append(
"subtensor_spec[%i] = %i;" % (spec_pos(),
entry))
......@@ -972,7 +972,7 @@ class SubtensorPrinter:
sidxs = []
inbrack_pstate = pstate.clone(precedence=-1000)
for entry in idxs:
if isinstance(entry, int):
if isinstance(entry, integer_types):
sidxs.append(str(entry))
elif isinstance(entry, scal.Scalar):
sidxs.append(inbrack_pstate.pprinter.process(inputs.pop()))
......
import unittest
import numpy
from six import integer_types
import theano
from theano import tensor, function
......@@ -14,7 +15,7 @@ class TestKeepDims(unittest.TestCase):
def makeKeepDims_local(self, x, y, axis):
if axis is None:
newaxis = list(range(x.ndim))
elif isinstance(axis, int):
elif isinstance(axis, integer_types):
if axis < 0:
newaxis = [axis + x.type.ndim]
else:
......
......@@ -3,6 +3,7 @@ import traceback as tb
import warnings
import numpy
from six import integer_types
from six.moves import xrange
import theano
......@@ -319,7 +320,7 @@ class _tensor_py_operators(object):
"""
if ndim is not None:
if not isinstance(ndim, int):
if not isinstance(ndim, integer_types):
raise ValueError("Expected ndim to be an integer, is " +
str(type(ndim)))
......
......@@ -2,10 +2,12 @@ from __future__ import print_function
from copy import copy, deepcopy
from functools import wraps
import logging
from six.moves import StringIO
import sys
import unittest
from six import integer_types
from six.moves import StringIO
try:
from nose.plugins.attrib import attr
except ImportError:
......@@ -204,7 +206,7 @@ class InferShapeTester(unittest.TestCase):
mode = mode.excluding(*excluding)
if warn:
for var, inp in zip(inputs, numeric_inputs):
if isinstance(inp, (int, float, list, tuple)):
if isinstance(inp, (integer_types, float, list, tuple)):
inp = var.type.filter(inp)
if not hasattr(inp, "shape"):
continue
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论