提交 ef9f6efc authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #5763 from Amrithasuresh/master

Updated numpy as np #4218
差异被折叠。
......@@ -2,7 +2,7 @@
Implementations of BLAS Ops based on scipy's BLAS bindings.
"""
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
from theano.tensor.blas import Ger, ger, ger_destructive, have_fblas
from theano.tensor.blas import blas_optdb, optdb, local_optimizer
......@@ -13,10 +13,10 @@ from theano.tensor.opt import in2out
if have_fblas:
from theano.tensor.blas import fblas
_blas_ger_fns = {
numpy.dtype('float32'): fblas.sger,
numpy.dtype('float64'): fblas.dger,
numpy.dtype('complex64'): fblas.cgeru,
numpy.dtype('complex128'): fblas.zgeru,
np.dtype('float32'): fblas.sger,
np.dtype('float64'): fblas.dger,
np.dtype('complex64'): fblas.cgeru,
np.dtype('complex128'): fblas.zgeru,
}
......@@ -24,7 +24,7 @@ class ScipyGer(Ger):
def prepare_node(self, node, storage_map, compute_map, impl):
if impl == 'py':
node.tag.local_ger = _blas_ger_fns[numpy.dtype(
node.tag.local_ger = _blas_ger_fns[np.dtype(
node.inputs[0].type.dtype)]
def perform(self, node, inputs, output_storage):
......
......@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
import sys
from copy import copy
import numpy
import numpy as np
from six import iteritems, integer_types
from six.moves import xrange
......@@ -21,7 +21,7 @@ from theano.misc.frozendict import frozendict
config = theano.config
_numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
_numpy_ver = [int(n) for n in np.__version__.split('.')[:2]]
# tensor depends on elemwise to provide definitions for several ops
......@@ -148,7 +148,7 @@ class DimShuffle(Op):
# isinstance(x, integer_types) returning False for
# numpy integers. See
# <http://projects.scipy.org/numpy/ticket/2235>.
if not isinstance(j, (integer_types, numpy.integer)):
if not isinstance(j, (integer_types, np.integer)):
raise TypeError(
"DimShuffle indices must be python ints. "
"Got: '%s' of type '%s'.",
......@@ -228,7 +228,7 @@ class DimShuffle(Op):
storage, = out
# drop
res = input
if type(res) != numpy.ndarray and type(res) != numpy.memmap:
if type(res) != np.ndarray and type(res) != np.memmap:
raise TypeError(res)
# transpose
......@@ -242,9 +242,9 @@ class DimShuffle(Op):
# copy (if not inplace)
if not self.inplace:
res = numpy.copy(res)
res = np.copy(res)
storage[0] = numpy.asarray(res) # asarray puts scalars back into array
storage[0] = np.asarray(res) # asarray puts scalars back into array
def infer_shape(self, node, shapes):
ishp, = shapes
......@@ -487,7 +487,7 @@ second dimension
nfunc_spec = getattr(scalar_op, 'nfunc_spec', None)
self.nfunc_spec = nfunc_spec
if nfunc_spec:
self.nfunc = getattr(numpy, nfunc_spec[0])
self.nfunc = getattr(np, nfunc_spec[0])
super(Elemwise, self).__init__(openmp=openmp)
......@@ -504,11 +504,11 @@ second dimension
self.nfunc = None
self.inplace_pattern = frozendict(self.inplace_pattern)
if getattr(self, 'nfunc_spec', None):
self.nfunc = getattr(numpy, self.nfunc_spec[0])
self.nfunc = getattr(np, self.nfunc_spec[0])
elif 0 < self.scalar_op.nin < 32:
self.ufunc = numpy.frompyfunc(self.scalar_op.impl,
self.scalar_op.nin,
self.scalar_op.nout)
self.ufunc = np.frompyfunc(self.scalar_op.impl,
self.scalar_op.nin,
self.scalar_op.nout)
def get_output_info(self, dim_shuffle, *inputs):
"""Return the outputs dtype and broadcastable pattern and the
......@@ -723,7 +723,7 @@ second dimension
# the gradient contains a constant, translate it as
# an equivalent TensorType of size 1 and proper number of
# dimensions
res = theano.tensor.constant(numpy.asarray(r.data),
res = theano.tensor.constant(np.asarray(r.data),
dtype=r.type.dtype)
return DimShuffle((), ['x'] * nd)(res)
......@@ -750,9 +750,9 @@ second dimension
self.ufunc is None and
impl == 'py'):
ufunc = numpy.frompyfunc(self.scalar_op.impl,
len(node.inputs),
self.scalar_op.nout)
ufunc = np.frompyfunc(self.scalar_op.impl,
len(node.inputs),
self.scalar_op.nout)
if self.scalar_op.nin > 0:
# We can reuse it for many nodes
self.ufunc = ufunc
......@@ -772,9 +772,9 @@ second dimension
# when the input is complex. So add it only when inputs is int.
out_dtype = node.outputs[0].dtype
if (out_dtype in theano.tensor.float_dtypes and
isinstance(self.nfunc, numpy.ufunc) and
isinstance(self.nfunc, np.ufunc) and
node.inputs[0].dtype in theano.tensor.discrete_dtypes):
char = numpy.sctype2char(out_dtype)
char = np.sctype2char(out_dtype)
sig = char * node.nin + '->' + char * node.nout
node.tag.sig = sig
node.tag.fake_node = Apply(
......@@ -870,7 +870,7 @@ second dimension
if getattr(variable, "dtype", "") == 'object':
# Since numpy 1.6, function created with numpy.frompyfunc
# always return an ndarray with dtype object
variable = numpy.asarray(variable, dtype=nout.dtype)
variable = np.asarray(variable, dtype=nout.dtype)
if i in self.inplace_pattern:
odat = inputs[self.inplace_pattern[i]]
......@@ -879,15 +879,15 @@ second dimension
# Sometimes NumPy return a Python type.
# Some Theano op return a different dtype like floor, ceil,
# trunc, eq, ...
elif (not isinstance(variable, numpy.ndarray) or
elif (not isinstance(variable, np.ndarray) or
variable.dtype != nout.dtype):
variable = numpy.asarray(variable, nout.dtype)
variable = np.asarray(variable, nout.dtype)
# The next line is needed for numpy 1.9. Otherwise
# there are tests that fail in DebugMode.
# Normally we would call theano.misc._asarray, but it
# is faster to inline the code. We know that the dtype
# are the same string, just different typenum.
if numpy.dtype(nout.dtype).num != variable.dtype.num:
if np.dtype(nout.dtype).num != variable.dtype.num:
variable = variable.view(dtype=nout.dtype)
storage[0] = variable
# numpy.real return a view!
......@@ -1302,9 +1302,9 @@ class CAReduce(Op):
# There is a bug in numpy that results in isinstance(x,
# integer_types) returning False for numpy integers. See
# <http://projects.scipy.org/numpy/ticket/2235>.
elif isinstance(axis, (integer_types, numpy.integer)):
elif isinstance(axis, (integer_types, np.integer)):
self.axis = (axis,)
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
elif isinstance(axis, np.ndarray) and axis.ndim == 0:
self.axis = (int(axis),)
else:
self.axis = list(set(int(a) for a in axis))
......@@ -1316,26 +1316,26 @@ class CAReduce(Op):
def set_ufunc(self, scalar_op):
# This is probably a speed up of the implementation
if isinstance(scalar_op, theano.scalar.basic.Add):
self.ufunc = numpy.add
self.ufunc = np.add
elif isinstance(scalar_op, theano.scalar.basic.Mul):
self.ufunc = numpy.multiply
self.ufunc = np.multiply
elif isinstance(scalar_op, theano.scalar.basic.Maximum):
self.ufunc = numpy.maximum
self.ufunc = np.maximum
elif isinstance(scalar_op, theano.scalar.basic.Minimum):
self.ufunc = numpy.minimum
self.ufunc = np.minimum
elif (isinstance(scalar_op, theano.scalar.basic.AND) and
_numpy_ver >= [1, 12]):
# numpy.bitwise_and.identity was incorrect for versions before
# 1.12 (it was 1 instead of -1), so we skip it in that case.
# We will fall back to the "else:" case, which defines a
# ufunc without identity.
self.ufunc = numpy.bitwise_and
self.ufunc = np.bitwise_and
elif isinstance(scalar_op, theano.scalar.basic.OR):
self.ufunc = numpy.bitwise_or
self.ufunc = np.bitwise_or
elif isinstance(scalar_op, theano.scalar.basic.XOR):
self.ufunc = numpy.bitwise_xor
self.ufunc = np.bitwise_xor
else:
self.ufunc = numpy.frompyfunc(scalar_op.impl, 2, 1)
self.ufunc = np.frompyfunc(scalar_op.impl, 2, 1)
def _output_dtype(self, input_dtype):
return input_dtype
......@@ -1415,8 +1415,8 @@ class CAReduce(Op):
# Compute the shape of the output
v_shape = list(variable.shape)
del v_shape[dimension]
variable = numpy.empty(tuple(v_shape),
dtype=acc_dtype)
variable = np.empty(tuple(v_shape),
dtype=acc_dtype)
variable.fill(self.scalar_op.identity)
else:
raise ValueError((
......@@ -1427,8 +1427,8 @@ class CAReduce(Op):
variable = self.ufunc.reduce(variable, dimension,
dtype=acc_dtype)
variable = numpy.asarray(variable)
if numpy.may_share_memory(variable, input):
variable = np.asarray(variable)
if np.may_share_memory(variable, input):
# perhaps numpy is clever for reductions of size 1?
# We don't want this.
variable = variable.copy()
......@@ -1436,8 +1436,8 @@ class CAReduce(Op):
dtype=node.outputs[0].type.dtype)
else:
# Force a copy
output[0] = numpy.array(variable, copy=True,
dtype=node.outputs[0].type.dtype)
output[0] = np.array(variable, copy=True,
dtype=node.outputs[0].type.dtype)
def infer_shape(self, node, shapes):
ishape, = shapes
......
from __future__ import absolute_import, print_function, division
import numpy as np
import numpy
from six.moves import xrange
import theano
......@@ -778,7 +777,7 @@ def repeat(x, repeats, axis=None):
shape[axis] = shape[axis] * repeats
# dims_ is the dimension of that intermediate tensor.
dims_ = list(numpy.arange(x.ndim))
dims_ = list(np.arange(x.ndim))
dims_.insert(axis + 1, 'x')
# After the original tensor is duplicated along the additional
......@@ -806,7 +805,7 @@ class Bartlett(gof.Op):
def perform(self, node, inputs, out_):
M = inputs[0]
out, = out_
out[0] = numpy.bartlett(M)
out[0] = np.bartlett(M)
def infer_shape(self, node, in_shapes):
temp = node.inputs[0]
......@@ -882,7 +881,7 @@ class FillDiagonal(gof.Op):
# Write the value out into the diagonal.
a.flat[:end:step] = val
else:
numpy.fill_diagonal(a, val)
np.fill_diagonal(a, val)
output_storage[0][0] = a
......@@ -1132,7 +1131,7 @@ class Unique(theano.Op):
self.return_index = return_index
self.return_inverse = return_inverse
self.return_counts = return_counts
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
numpy_ver = [int(n) for n in np.__version__.split('.')[:2]]
if self.return_counts and bool(numpy_ver < [1, 9]):
raise RuntimeError(
"Numpy version = " + np.__version__ +
......
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import math
from theano import gof, tensor
......@@ -98,7 +98,7 @@ class Fourier(gof.Op):
a = inputs[0]
n = inputs[1]
axis = inputs[2]
output_storage[0][0] = numpy.fft.fft(a, n=int(n), axis=axis.item())
output_storage[0][0] = np.fft.fft(a, n=int(n), axis=axis.item())
def grad(self, inputs, cost_grad):
"""
......@@ -128,7 +128,7 @@ class Fourier(gof.Op):
# tensor.set_subtensor(res[...,n::], 0, False, False), res)
# Instead we resort to that to account for truncation:
flip_shape = list(numpy.arange(0, a.ndim)[::-1])
flip_shape = list(np.arange(0, a.ndim)[::-1])
res = res.dimshuffle(flip_shape)
res = tensor.switch(tensor.lt(n, tensor.shape(a)[axis]),
tensor.set_subtensor(res[n::, ], 0, False, False),
......@@ -136,8 +136,8 @@ class Fourier(gof.Op):
res = res.dimshuffle(flip_shape)
# insures that gradient shape conforms to input shape:
out_shape = list(numpy.arange(0, axis)) + [a.ndim - 1] +\
list(numpy.arange(axis, a.ndim - 1))
out_shape = list(np.arange(0, axis)) + [a.ndim - 1] +\
list(np.arange(axis, a.ndim - 1))
res = res.dimshuffle(*out_shape)
return [res, None, None]
......
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
from theano import gof
from theano.gof import Constant, Generic, Op
from theano.gof.sched import key_to_cmp
......@@ -27,7 +27,7 @@ class LoadFromDisk(Op):
__props__ = ("dtype", "broadcastable", "mmap_mode")
def __init__(self, dtype, broadcastable, mmap_mode=None):
self.dtype = numpy.dtype(dtype) # turn "float64" into numpy.float64
self.dtype = np.dtype(dtype) # turn "float64" into np.float64
self.broadcastable = broadcastable
if mmap_mode not in (None, 'c'):
raise ValueError("The only supported values for mmap_mode "
......@@ -44,7 +44,7 @@ class LoadFromDisk(Op):
path = inp[0]
if (path.split('.')[-1] == 'npz'):
raise ValueError("Expected a .npy file, got %s instead" % path)
result = numpy.load(path, mmap_mode=self.mmap_mode)
result = np.load(path, mmap_mode=self.mmap_mode)
if result.dtype != self.dtype:
raise TypeError("Expected an array of type %s, got %s instead" %
(self.dtype, result.dtype))
......@@ -125,7 +125,7 @@ class MPIRecv(Op):
self.source = source
self.tag = tag
self.shape = shape
self.dtype = numpy.dtype(dtype) # turn "float64" into numpy.float64
self.dtype = np.dtype(dtype) # turn "float64" into numpy.float64
self.broadcastable = (False,) * len(shape)
def make_node(self):
......@@ -135,7 +135,7 @@ class MPIRecv(Op):
def perform(self, node, inp, out):
data = numpy.zeros(self.shape, dtype=self.dtype)
data = np.zeros(self.shape, dtype=self.dtype)
request = comm.Irecv(data, self.source, self.tag)
out[0][0] = request
......
......@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
import logging
import warnings
import numpy
import numpy as np
from six.moves import xrange
import theano
......@@ -44,7 +44,7 @@ class MatrixPinv(Op):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = numpy.linalg.pinv(x).astype(x.dtype)
z[0] = np.linalg.pinv(x).astype(x.dtype)
pinv = MatrixPinv()
......@@ -76,7 +76,7 @@ class MatrixInverse(Op):
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = numpy.linalg.inv(x).astype(x.dtype)
z[0] = np.linalg.inv(x).astype(x.dtype)
def grad(self, inputs, g_outputs):
r"""The gradient function should return
......@@ -162,7 +162,7 @@ class AllocDiag(Op):
(z,) = outputs
if x.ndim != 1:
raise TypeError(x)
z[0] = numpy.diag(x)
z[0] = np.diag(x)
def infer_shape(self, node, shapes):
x_s, = shapes
......@@ -289,7 +289,7 @@ class Det(Op):
(x,) = inputs
(z,) = outputs
try:
z[0] = numpy.asarray(numpy.linalg.det(x), dtype=x.dtype)
z[0] = np.asarray(np.linalg.det(x), dtype=x.dtype)
except Exception:
print('Failed to compute determinant', x)
raise
......@@ -313,7 +313,7 @@ class Eig(Op):
"""
_numop = staticmethod(numpy.linalg.eig)
_numop = staticmethod(np.linalg.eig)
__props__ = ()
def make_node(self, x):
......@@ -341,7 +341,7 @@ class Eigh(Eig):
"""
_numop = staticmethod(numpy.linalg.eigh)
_numop = staticmethod(np.linalg.eigh)
__props__ = ('UPLO',)
def __init__(self, UPLO='L'):
......@@ -356,7 +356,7 @@ class Eigh(Eig):
# LAPACK. Rather than trying to reproduce the (rather
# involved) logic, we just probe linalg.eigh with a trivial
# input.
w_dtype = self._numop([[numpy.dtype(x.dtype).type()]])[0].dtype.name
w_dtype = self._numop([[np.dtype(x.dtype).type()]])[0].dtype.name
w = theano.tensor.vector(dtype=w_dtype)
v = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [w, v])
......@@ -419,11 +419,11 @@ class EighGrad(Op):
assert UPLO in ['L', 'U']
self.UPLO = UPLO
if UPLO == 'L':
self.tri0 = numpy.tril
self.tri1 = lambda a: numpy.triu(a, 1)
self.tri0 = np.tril
self.tri1 = lambda a: np.triu(a, 1)
else:
self.tri0 = numpy.triu
self.tri1 = lambda a: numpy.tril(a, -1)
self.tri0 = np.triu
self.tri1 = lambda a: np.tril(a, -1)
def make_node(self, x, w, v, gw, gv):
x, w, v, gw, gv = map(as_tensor_variable, (x, w, v, gw, gv))
......@@ -445,7 +445,7 @@ class EighGrad(Op):
"""
x, w, v, W, V = inputs
N = x.shape[0]
outer = numpy.outer
outer = np.outer
def G(n):
return sum(v[:, m] * V.T[n].dot(v[:, m]) / (w[n] - w[m])
......@@ -466,7 +466,7 @@ class EighGrad(Op):
# Make sure we return the right dtype even if NumPy performed
# upcasting in self.tri0.
outputs[0][0] = numpy.asarray(out, dtype=node.outputs[0].dtype)
outputs[0][0] = np.asarray(out, dtype=node.outputs[0].dtype)
def infer_shape(self, node, shapes):
return [shapes[0]]
......@@ -486,7 +486,7 @@ class QRFull(Op):
"""
_numop = staticmethod(numpy.linalg.qr)
_numop = staticmethod(np.linalg.qr)
__props__ = ('mode',)
def __init__(self, mode):
......@@ -519,7 +519,7 @@ class QRIncomplete(Op):
"""
_numop = staticmethod(numpy.linalg.qr)
_numop = staticmethod(np.linalg.qr)
__props__ = ('mode',)
def __init__(self, mode):
......@@ -583,7 +583,7 @@ def qr(a, mode="reduced"):
"""
x = [[2, 1], [3, 4]]
if isinstance(numpy.linalg.qr(x, mode), tuple):
if isinstance(np.linalg.qr(x, mode), tuple):
return QRFull(mode)(a)
else:
return QRIncomplete(mode)(a)
......@@ -606,7 +606,7 @@ class SVD(Op):
"""
# See doc in the docstring of the function just after this class.
_numop = staticmethod(numpy.linalg.svd)
_numop = staticmethod(np.linalg.svd)
__props__ = ('full_matrices', 'compute_uv')
def __init__(self, full_matrices=True, compute_uv=True):
......@@ -666,10 +666,10 @@ class lstsq(Op):
theano.tensor.lscalar(), theano.tensor.dvector()])
def perform(self, node, inputs, outputs):
zz = numpy.linalg.lstsq(inputs[0], inputs[1], inputs[2])
zz = np.linalg.lstsq(inputs[0], inputs[1], inputs[2])
outputs[0][0] = zz[0]
outputs[1][0] = zz[1]
outputs[2][0] = numpy.array(zz[2])
outputs[2][0] = np.array(zz[2])
outputs[3][0] = zz[3]
......@@ -730,7 +730,7 @@ class TensorInv(Op):
Class wrapper for tensorinv() function;
Theano utilization of numpy.linalg.tensorinv;
"""
_numop = staticmethod(numpy.linalg.tensorinv)
_numop = staticmethod(np.linalg.tensorinv)
__props__ = ('ind',)
def __init__(self, ind=2):
......@@ -790,7 +790,7 @@ class TensorSolve(Op):
Class wrapper for tensorsolve function.
"""
_numop = staticmethod(numpy.linalg.tensorsolve)
_numop = staticmethod(np.linalg.tensorsolve)
__props__ = ('axes', )
def __init__(self, axes=None):
......
差异被折叠。
......@@ -4,7 +4,7 @@ from __future__ import absolute_import, print_function, division
import sys
from copy import copy
import numpy
import numpy as np
from six import string_types
from six.moves import reduce, xrange
......@@ -38,7 +38,7 @@ class RandomStateType(gof.Type):
raise TypeError()
def is_valid_value(self, a):
return type(a) == numpy.random.RandomState
return type(a) == np.random.RandomState
def values_eq(self, a, b):
sa = a.get_state()
......@@ -47,7 +47,7 @@ class RandomStateType(gof.Type):
if sa[0] != sb[0]:
return False
# 1-D array of 624 unsigned integer keys
if not numpy.all(sa[1] == sb[1]):
if not np.all(sa[1] == sb[1]):
return False
# integer "pos" representing the position in the array
if sa[2] != sb[2]:
......@@ -67,17 +67,17 @@ class RandomStateType(gof.Type):
def get_size(self, shape_info):
# The size is the data, that have constant size.
state = numpy.random.RandomState().get_state()
state = np.random.RandomState().get_state()
size = 0
for elem in state:
if isinstance(elem, str):
size += len(elem)
elif isinstance(elem, numpy.ndarray):
elif isinstance(elem, np.ndarray):
size += elem.size * elem.itemsize
elif isinstance(elem, int):
size += numpy.dtype("int").itemsize
size += np.dtype("int").itemsize
elif isinstance(elem, float):
size += numpy.dtype("float").itemsize
size += np.dtype("float").itemsize
else:
raise NotImplementedError()
return size
......@@ -151,7 +151,7 @@ class RandomFunction(gof.Op):
fn, outtype, inplace, ndim_added = state
self.fn = fn
if isinstance(fn, string_types):
self.exec_fn = getattr(numpy.random.RandomState, fn)
self.exec_fn = getattr(np.random.RandomState, fn)
else:
self.exec_fn = fn
self.outtype = outtype
......@@ -240,7 +240,7 @@ class RandomFunction(gof.Op):
# Numbers are drawn from r if self.inplace is True, and from a
# copy of r if self.inplace is False
r, shape, args = inputs[0], inputs[1], inputs[2:]
assert type(r) == numpy.random.RandomState, (type(r), r)
assert type(r) == np.random.RandomState, (type(r), r)
# If shape == [], that means no shape is enforced, and numpy is
# trusted to draw the appropriate number of samples, numpy uses
......@@ -260,7 +260,7 @@ class RandomFunction(gof.Op):
r = copy(r)
rout[0] = r
rval = self.exec_fn(r, *(args + [shape]))
if (not isinstance(rval, numpy.ndarray) or
if (not isinstance(rval, np.ndarray) or
str(rval.dtype) != node.outputs[1].type.dtype):
rval = theano._asarray(rval, dtype=node.outputs[1].type.dtype)
......@@ -527,13 +527,13 @@ def binomial(random_state, size=None, n=1, p=0.5, ndim=None,
"""
if prob is not None:
p = prob
print("DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as numpy.", file=sys.stderr)
print("DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as np.", file=sys.stderr)
n = tensor.as_tensor_variable(n)
p = tensor.as_tensor_variable(p)
ndim, size, bcast = _infer_ndim_bcast(ndim, size, n, p)
if n.dtype == 'int64':
try:
numpy.random.binomial(n=numpy.asarray([2, 3, 4], dtype='int64'), p=numpy.asarray([.1, .2, .3], dtype='float64'))
np.random.binomial(n=np.asarray([2, 3, 4], dtype='int64'), p=np.asarray([.1, .2, .3], dtype='float64'))
except TypeError:
# THIS WORKS AROUND A NUMPY BUG on 32bit machine
n = tensor.cast(n, 'int32')
......@@ -583,7 +583,7 @@ def random_integers_helper(random_state, low, high, size):
out_size = out_size + (dim_len,)
# Build the indices over which to loop
out = numpy.ndarray(out_size)
out = np.ndarray(out_size)
broadcast_ind = _generate_broadcasting_indices(out_size, low.shape,
high.shape)
# Iterate over these indices, drawing one sample at a time from numpy
......@@ -716,8 +716,8 @@ def permutation_helper(random_state, n, shape):
shape = ()
out_shape = list(shape)
out_shape.append(n)
out = numpy.empty(out_shape, int)
for i in numpy.ndindex(*shape):
out = np.empty(out_shape, int)
for i in np.ndindex(*shape):
out[i] = random_state.permutation(n)
# print 'RETURNING', out.shape
......@@ -801,7 +801,7 @@ def multinomial_helper(random_state, n, pvals, size):
# Build the indices over which to loop
# Note that here, the rows (inner-most 1D subtensors) of pvals and out
# are indexed, not their individual elements
out = numpy.ndarray(out_size)
out = np.ndarray(out_size)
broadcast_ind = _generate_broadcasting_indices(size, n.shape,
pvals.shape[:-1])
# Iterate over these indices, drawing from one multinomial at a
......@@ -815,16 +815,16 @@ def multinomial_helper(random_state, n, pvals, size):
# of probabilities meets or exceeds 1.0.
# In perfect arithmetic this would be correct, but in float32 or
# float64 it is too strict.
pisum = numpy.sum(pvi)
pisum = np.sum(pvi)
if 1.0 < pisum < 1.0 + 1e-5: # correct if we went a little over
# because mtrand.pyx has a ValueError that will trigger if
# sum(pvals[:-1]) > 1.0
pvi = pvi * (1.0 - 5e-5)
# pvi = pvi * .9
pisum = numpy.sum(pvi)
pisum = np.sum(pvi)
elif pvi[-1] < 5e-5: # will this even work?
pvi = pvi * (1.0 - 5e-5)
pisum = numpy.sum(pvi)
pisum = np.sum(pvi)
assert pisum <= 1.0, pisum
out[mi] = random_state.multinomial(n=n[ni],
pvals=pvi.astype('float64'))
......
......@@ -7,7 +7,7 @@ from __future__ import absolute_import, print_function, division
import copy
import numpy
import numpy as np
from theano.compile.sharedvalue import (SharedVariable, shared_constructor,
shared)
......@@ -27,7 +27,7 @@ def randomstate_constructor(value, name=None, strict=False,
SharedVariable Constructor for RandomState.
"""
if not isinstance(value, numpy.random.RandomState):
if not isinstance(value, np.random.RandomState):
raise TypeError
if not borrow:
value = copy.deepcopy(value)
......@@ -65,7 +65,7 @@ class RandomStreams(raw_random.RandomStreamsBase):
# random number generator that provides seeds for member streams.
self.default_instance_seed = seed
# numpy.RandomState instance that gen() uses to seed new streams.
self.gen_seedgen = numpy.random.RandomState(seed)
self.gen_seedgen = np.random.RandomState(seed)
def seed(self, seed=None):
"""
......@@ -85,10 +85,10 @@ class RandomStreams(raw_random.RandomStreamsBase):
if seed is None:
seed = self.default_instance_seed
seedgen = numpy.random.RandomState(seed)
seedgen = np.random.RandomState(seed)
for old_r, new_r in self.state_updates:
old_r_seed = seedgen.randint(2 ** 30)
old_r.set_value(numpy.random.RandomState(int(old_r_seed)),
old_r.set_value(np.random.RandomState(int(old_r_seed)),
borrow=True)
def __getitem__(self, item):
......@@ -161,7 +161,7 @@ class RandomStreams(raw_random.RandomStreamsBase):
"""
seed = int(self.gen_seedgen.randint(2 ** 30))
random_state_variable = shared(numpy.random.RandomState(seed))
random_state_variable = shared(np.random.RandomState(seed))
# Add a reference to distinguish from other shared variables
random_state_variable.tag.is_rng = True
new_r, out = op(random_state_variable, *args, **kwargs)
......
from __future__ import absolute_import, print_function, division
import traceback
import numpy
import numpy as np
from six import integer_types
import theano.tensor.basic
......@@ -41,7 +41,7 @@ def tensor_constructor(value, name=None, strict=False, allow_downcast=None,
if target != 'cpu':
raise TypeError('not for cpu')
if not isinstance(value, numpy.ndarray):
if not isinstance(value, np.ndarray):
raise TypeError()
# if no broadcastable is given, then the default is to assume that
......@@ -51,7 +51,7 @@ def tensor_constructor(value, name=None, strict=False, allow_downcast=None,
broadcastable = (False,) * len(value.shape)
type = TensorType(value.dtype, broadcastable=broadcastable)
return TensorSharedVariable(type=type,
value=numpy.array(value, copy=(not borrow)),
value=np.array(value, copy=(not borrow)),
name=name,
strict=strict,
allow_downcast=allow_downcast)
......@@ -86,12 +86,12 @@ def scalar_constructor(value, name=None, strict=False, allow_downcast=None,
if target != 'cpu':
raise TypeError('not for cpu')
if not isinstance(value, (numpy.number, float, integer_types, complex)):
if not isinstance(value, (np.number, float, integer_types, complex)):
raise TypeError()
try:
dtype = value.dtype
except Exception:
dtype = numpy.asarray(value).dtype
dtype = np.asarray(value).dtype
dtype = str(dtype)
value = theano._asarray(value, dtype=dtype)
......@@ -101,7 +101,7 @@ def scalar_constructor(value, name=None, strict=False, allow_downcast=None,
# Do not pass the dtype to asarray because we want this to fail if
# strict is True and the types do not match.
rval = ScalarSharedVariable(type=tensor_type,
value=numpy.array(value, copy=True),
value=np.array(value, copy=True),
name=name,
strict=strict,
allow_downcast=allow_downcast)
......
......@@ -9,7 +9,7 @@ from __future__ import absolute_import, print_function, division
import warnings
import itertools
import numpy
import numpy as np
from six.moves import xrange
import six.moves.builtins as builtins
import theano
......@@ -412,7 +412,7 @@ class Pool(OpenMPOp):
if isinstance(out, theano.Variable):
return tensor.maximum(out, 0)
else:
return numpy.maximum(out, 0)
return np.maximum(out, 0)
else:
if isinstance(v, theano.Variable):
return tensor.switch(tensor.ge(stride, downsample),
......@@ -516,7 +516,7 @@ class Pool(OpenMPOp):
if not self.ignore_border:
assert all(z > 0 for z in z_shape[-nd:])
if (z[0] is None) or (z[0].shape != z_shape):
z[0] = numpy.empty(z_shape, dtype=x.dtype)
z[0] = np.empty(z_shape, dtype=x.dtype)
zz = z[0]
# size of pooling output
pool_out_shp = zz.shape[-nd:]
......@@ -525,16 +525,16 @@ class Pool(OpenMPOp):
# pad the image
if max(pad) != 0:
y = numpy.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
else:
y = x
func = numpy.max
func = np.max
if self.mode == 'sum':
func = numpy.sum
func = np.sum
elif self.mode != 'max':
func = numpy.average
func = np.average
# precompute the region boundaries for each dimension
region_slices = [[] for i in xrange(nd)]
......@@ -548,11 +548,11 @@ class Pool(OpenMPOp):
region_slices[i].append(slice(start, end))
# iterate over non-pooling dimensions
for k in numpy.ndindex(*x.shape[:-nd]):
for k in np.ndindex(*x.shape[:-nd]):
zzk = zz[k]
yk = y[k]
# iterate over pooling regions
for r in numpy.ndindex(*pool_out_shp):
for r in np.ndindex(*pool_out_shp):
zzk[r] = func(
yk[[region_slices[i][r[i]] for i in xrange(nd)]])
......@@ -1020,7 +1020,7 @@ class PoolGrad(OpenMPOp):
if isinstance(out, theano.Variable):
return tensor.maximum(out, 0)
else:
return numpy.maximum(out, 0)
return np.maximum(out, 0)
else:
if isinstance(v, theano.Variable):
return tensor.switch(tensor.ge(stride, downsample),
......@@ -1128,12 +1128,12 @@ class MaxPoolGrad(PoolGrad):
# pad the image
if max(pad) != 0:
y = numpy.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
else:
y = x
gx = numpy.zeros_like(y)
gx = np.zeros_like(y)
# precompute the region boundaries for each dimension
region_ranges = [[] for i in xrange(nd)]
......@@ -1144,13 +1144,13 @@ class MaxPoolGrad(PoolGrad):
region_ranges[i].append(xrange(start, end))
# iterate over non-pooling dimensions
for k in numpy.ndindex(*x.shape[:-nd]):
for k in np.ndindex(*x.shape[:-nd]):
gxk = gx[k]
gzk = gz[k]
yk = y[k]
maxoutk = maxout[k]
# iterate over pooling regions
for r in numpy.ndindex(*pool_out_shp):
for r in np.ndindex(*pool_out_shp):
maxout_value = maxoutk[r]
# iterate inside region
for c in itertools.product(*[region_ranges[i][r[i]]
......@@ -1444,7 +1444,7 @@ class AveragePoolGrad(PoolGrad):
raise NotImplementedError()
z_shape = self.out_shape(x.shape, ws, self.ignore_border, stride, pad, nd)
if (gx_stg[0] is None) or (gx_stg[0].shape != z_shape):
gx_stg[0] = numpy.empty(z_shape, dtype=x.dtype)
gx_stg[0] = np.empty(z_shape, dtype=x.dtype)
zz = gx_stg[0]
# size of pooling output
pool_out_shp = zz.shape[-nd:]
......@@ -1453,7 +1453,7 @@ class AveragePoolGrad(PoolGrad):
sum_mode = self.mode == 'sum'
# initialize the padded output
gx = numpy.zeros((x.shape[:-nd] + img_shp), dtype=x.dtype)
gx = np.zeros((x.shape[:-nd] + img_shp), dtype=x.dtype)
# precompute the region boundaries and sizes for each dimension
region_slices = [[] for i in xrange(nd)]
......@@ -1470,11 +1470,11 @@ class AveragePoolGrad(PoolGrad):
# iterate over non-pooling dimensions
region_slice = [None] * nd
for k in numpy.ndindex(*x.shape[:-nd]):
for k in np.ndindex(*x.shape[:-nd]):
gzk = gz[k]
gxk = gx[k]
# iterate over pooling regions
for r in numpy.ndindex(*pool_out_shp):
for r in np.ndindex(*pool_out_shp):
region_size = 1
for i in xrange(nd):
region_slice[i] = region_slices[i][r[i]]
......@@ -1783,7 +1783,7 @@ class DownsampleFactorMaxGradGrad(OpenMPOp):
'DownsampleFactorMaxGradGrad requires input '
'with {} or more dimensions'.format(nd))
if (z[0] is None) or (z[0].shape != maxout.shape):
z[0] = numpy.zeros(maxout.shape, dtype=x.dtype)
z[0] = np.zeros(maxout.shape, dtype=x.dtype)
ggz = z[0] # grad wrt maxout_grad has the same shape as maxout
# size of pooling output
pool_out_shp = ggz.shape[-nd:]
......@@ -1791,10 +1791,10 @@ class DownsampleFactorMaxGradGrad(OpenMPOp):
# pad the image and its gradients
if max(pad) > 0:
y_padded = numpy.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y_padded = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y_padded[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
ggx_padded = numpy.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
ggx_padded = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
ggx_padded[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = ggx
......@@ -1811,13 +1811,13 @@ class DownsampleFactorMaxGradGrad(OpenMPOp):
region_ranges[i].append(xrange(start, end))
# iterate over non-pooling dimensions
for k in numpy.ndindex(*x.shape[:-nd]):
for k in np.ndindex(*x.shape[:-nd]):
ggxk = ggx_padded[k]
ggzk = ggz[k]
yk = y_padded[k]
maxoutk = maxout[k]
# iterate over pooling regions
for r in numpy.ndindex(*pool_out_shp):
for r in np.ndindex(*pool_out_shp):
# iterate inside region
maxout_value = maxoutk[r]
for c in itertools.product(*[region_ranges[i][r[i]]
......@@ -2113,7 +2113,7 @@ class MaxPoolRop(OpenMPOp):
if not self.ignore_border:
assert all(z > 0 for z in z_shape[-nd:])
if (z[0] is None) or (z[0].shape != z_shape):
z[0] = numpy.empty(z_shape, dtype=x.dtype)
z[0] = np.empty(z_shape, dtype=x.dtype)
zz = z[0]
# size of pooling output
pool_out_shp = zz.shape[-nd:]
......@@ -2122,10 +2122,10 @@ class MaxPoolRop(OpenMPOp):
# pad the image and the eval point
if max(pad) != 0:
y = numpy.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
ey = numpy.zeros(ex.shape[:-nd] + img_shp, dtype=ex.dtype)
ey = np.zeros(ex.shape[:-nd] + img_shp, dtype=ex.dtype)
ey[(slice(None),) * (len(ex.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = ex
else:
......@@ -2144,18 +2144,18 @@ class MaxPoolRop(OpenMPOp):
region_slices[i].append(slice(start, end))
# iterate over non-pooling dimensions
for k in numpy.ndindex(*x.shape[:-nd]):
for k in np.ndindex(*x.shape[:-nd]):
zzk = zz[k]
yk = y[k]
eyk = ey[k]
# iterate over pooling regions
for r in numpy.ndindex(*pool_out_shp):
for r in np.ndindex(*pool_out_shp):
# current slice in padded input
ykslice = yk[[region_slices[i][r[i]] for i in xrange(nd)]]
# current slice in eval points
eykslice = eyk[[region_slices[i][r[i]] for i in xrange(nd)]]
# indices of maximum
idx = numpy.unravel_index(numpy.argmax(ykslice), ykslice.shape)
idx = np.unravel_index(np.argmax(ykslice), ykslice.shape)
zzk[r] = eykslice[idx]
def c_headers(self):
......
......@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
import unittest
from nose.plugins.skip import SkipTest
import numpy
import numpy as np
import theano
import theano.tensor as T
......@@ -41,13 +41,13 @@ class TestSignalConv2D(unittest.TestCase):
theano_conv = theano.function([input, filters], output)
# initialize input and compute result
image_data = numpy.random.random(image_shape)
filter_data = numpy.random.random(filter_shape)
image_data = np.random.random(image_shape)
filter_data = np.random.random(filter_shape)
theano_output = theano_conv(image_data, filter_data)
# REFERENCE IMPLEMENTATION ############
out_shape2d = numpy.array(image_shape[-2:]) - numpy.array(filter_shape[-2:]) + 1
ref_output = numpy.zeros(tuple(out_shape2d))
out_shape2d = np.array(image_shape[-2:]) - np.array(filter_shape[-2:]) + 1
ref_output = np.zeros(tuple(out_shape2d))
# reshape as 3D input tensors to make life easier
image_data3d = image_data.reshape((bsize,) + image_shape[-2:])
......@@ -64,7 +64,7 @@ class TestSignalConv2D(unittest.TestCase):
image2d = image_data3d[b, :, :]
filter2d = filter_data3d[k, :, :]
output2d = numpy.zeros(ref_output.shape)
output2d = np.zeros(ref_output.shape)
for row in range(ref_output.shape[0]):
for col in range(ref_output.shape[1]):
output2d[row, col] += (
......
......@@ -3,7 +3,7 @@ import logging
import warnings
from six.moves import xrange
import numpy
import numpy as np
try:
import scipy.linalg
......@@ -145,7 +145,7 @@ class CholeskyGrad(Op):
dx = outputs[0]
N = x.shape[0]
if self.lower:
F = numpy.tril(dz)
F = np.tril(dz)
for k in xrange(N - 1, -1, -1):
for j in xrange(k + 1, N):
for i in xrange(j, N):
......@@ -156,7 +156,7 @@ class CholeskyGrad(Op):
F[k, k] -= L[j, k] * F[j, k]
F[k, k] /= (2 * L[k, k])
else:
F = numpy.triu(dz)
F = np.triu(dz)
for k in xrange(N - 1, -1, -1):
for j in xrange(k + 1, N):
for i in xrange(j, N):
......@@ -206,8 +206,8 @@ class Solve(Op):
# infer dtype by solving the most simple
# case with (1, 1) matrices
o_dtype = scipy.linalg.solve(
numpy.eye(1).astype(A.dtype),
numpy.eye(1).astype(b.dtype)).dtype
np.eye(1).astype(A.dtype),
np.eye(1).astype(b.dtype)).dtype
x = tensor.tensor(
broadcastable=b.broadcastable,
dtype=o_dtype)
......@@ -370,11 +370,11 @@ class EigvalshGrad(Op):
assert lower in [True, False]
self.lower = lower
if lower:
self.tri0 = numpy.tril
self.tri1 = lambda a: numpy.triu(a, 1)
self.tri0 = np.tril
self.tri1 = lambda a: np.triu(a, 1)
else:
self.tri0 = numpy.triu
self.tri1 = lambda a: numpy.tril(a, -1)
self.tri0 = np.triu
self.tri1 = lambda a: np.tril(a, -1)
def make_node(self, a, b, gw):
assert imported_scipy, (
......@@ -394,14 +394,14 @@ class EigvalshGrad(Op):
def perform(self, node, inputs, outputs):
(a, b, gw) = inputs
w, v = scipy.linalg.eigh(a, b, lower=self.lower)
gA = v.dot(numpy.diag(gw).dot(v.T))
gB = - v.dot(numpy.diag(gw * w).dot(v.T))
gA = v.dot(np.diag(gw).dot(v.T))
gB = - v.dot(np.diag(gw * w).dot(v.T))
# See EighGrad comments for an explanation of these lines
out1 = self.tri0(gA) + self.tri1(gA).T
out2 = self.tri0(gB) + self.tri1(gB).T
outputs[0][0] = numpy.asarray(out1, dtype=node.outputs[0].dtype)
outputs[1][0] = numpy.asarray(out2, dtype=node.outputs[1].dtype)
outputs[0][0] = np.asarray(out1, dtype=node.outputs[0].dtype)
outputs[1][0] = np.asarray(out2, dtype=node.outputs[1].dtype)
def infer_shape(self, node, shapes):
return [shapes[0], shapes[1]]
......@@ -510,13 +510,13 @@ class ExpmGrad(Op):
w, V = scipy.linalg.eig(A, right=True)
U = scipy.linalg.inv(V).T
exp_w = numpy.exp(w)
X = numpy.subtract.outer(exp_w, exp_w) / numpy.subtract.outer(w, w)
numpy.fill_diagonal(X, exp_w)
exp_w = np.exp(w)
X = np.subtract.outer(exp_w, exp_w) / np.subtract.outer(w, w)
np.fill_diagonal(X, exp_w)
Y = U.dot(V.T.dot(gA).dot(U) * X).dot(V.T)
with warnings.catch_warnings():
warnings.simplefilter("ignore", numpy.ComplexWarning)
warnings.simplefilter("ignore", np.ComplexWarning)
out[0] = Y.astype(A.dtype)
......
......@@ -4,7 +4,7 @@ from textwrap import dedent
import warnings
import logging
import numpy
import numpy as np
from six import integer_types
from six.moves import xrange
......@@ -58,7 +58,7 @@ def make_constant(args):
return slice(conv(a.start),
conv(a.stop),
conv(a.step))
elif isinstance(a, (integer_types, numpy.integer)):
elif isinstance(a, (integer_types, np.integer)):
return scal.ScalarConstant(scal.int64, a)
else:
return a
......@@ -355,11 +355,11 @@ class Subtensor(Op):
if (isinstance(entry, gof.Variable) and
entry.type in tensor_types and
numpy.all(entry.type.broadcastable)):
np.all(entry.type.broadcastable)):
return scal.get_scalar_type(entry.type.dtype)
elif (isinstance(entry, gof.Type) and
entry in tensor_types and
numpy.all(entry.broadcastable)):
np.all(entry.broadcastable)):
return scal.get_scalar_type(entry.dtype)
elif slice_ok and isinstance(entry, slice):
a = entry.start
......@@ -385,7 +385,7 @@ class Subtensor(Op):
slice_c = None
return slice(slice_a, slice_b, slice_c)
elif isinstance(entry, (integer_types, numpy.integer)):
elif isinstance(entry, (integer_types, np.integer)):
# Disallow the use of python scalars in idx_list
raise TypeError("Python scalar in idx_list."
"Please report this error to theano-dev.")
......@@ -510,8 +510,8 @@ class Subtensor(Op):
if start is None:
start = 0
if (p.stop is None or
(isinstance(p.stop, (integer_types, numpy.integer,
numpy.ndarray)) and
(isinstance(p.stop, (integer_types, np.integer,
np.ndarray)) and
p.stop > start)):
broadcastable.append(True)
continue
......@@ -531,7 +531,7 @@ class Subtensor(Op):
if len(cdata) == 1:
cdata = cdata[0]
out[0] = numpy.asarray(x.__getitem__(cdata))
out[0] = np.asarray(x.__getitem__(cdata))
def infer_shape(self, node, shapes):
xshp = shapes[0]
......@@ -681,7 +681,7 @@ class Subtensor(Op):
return pos[1]
def init_entry(entry, depth=0):
if isinstance(entry, (numpy.integer, integer_types)):
if isinstance(entry, (np.integer, integer_types)):
init_cmds.append(
"subtensor_spec[%i] = %i;" % (spec_pos(),
entry))
......@@ -1390,8 +1390,8 @@ class IncSubtensor(Op):
op_is_set = 0
fail = sub['fail']
view_ndim = (node.inputs[0].ndim -
numpy.sum([not isinstance(idx, slice)
for idx in self.idx_list]))
np.sum([not isinstance(idx, slice)
for idx in self.idx_list]))
copy_of_x = self.copy_of_x(x)
......@@ -1712,11 +1712,11 @@ class AdvancedSubtensor1(Op):
# We need to check if values in i can fit in numpy.intp, because
# if they don't, that should be an error (no array can have that
# many elements on a 32-bit arch).
if i.dtype != numpy.intp:
i_ = theano._asarray(i, dtype=numpy.intp)
if not numpy.can_cast(i.dtype, numpy.intp):
if i.dtype != np.intp:
i_ = theano._asarray(i, dtype=np.intp)
if not np.can_cast(i.dtype, np.intp):
# Check if there was actually an incorrect conversion
if numpy.any(i != i_):
if np.any(i != i_):
raise IndexError(
'index contains values that are bigger '
'than the maximum array size on this system.', i)
......@@ -1946,7 +1946,7 @@ class AdvancedIncSubtensor1(Op):
return compile_cutils_code()
def c_code(self, node, name, input_names, output_names, sub):
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
numpy_ver = [int(n) for n in np.__version__.split('.')[:2]]
if bool(numpy_ver < [1, 8]):
raise NotImplementedError
x, y, idx = input_names
......@@ -2113,13 +2113,13 @@ def adv_index_broadcastable_pattern(a, idx):
if isinstance(v.type, SliceType):
return slice(None, None)
return numpy.zeros((2,) * v.ndim, int)
return np.zeros((2,) * v.ndim, int)
newidx = tuple(map(replace_slice, idx))
# 2 - True = 1; 2 - False = 2
fakeshape = [2 - bc for bc in a.broadcastable]
retshape = numpy.empty(fakeshape)[newidx].shape
retshape = np.empty(fakeshape)[newidx].shape
return tuple([dim == 1 for dim in retshape])
......
......@@ -129,7 +129,7 @@ class HiddenLayer(object):
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: np.random.RandomState
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
......@@ -176,7 +176,7 @@ class MLP(object):
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: np.random.RandomState
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
......
......@@ -164,8 +164,8 @@ def get_numeric_types(with_int=True, with_float=True, with_complex=False,
# Return True if scalars defined from `cls1` are within the hierarchy
# starting from `cls2`.
# The third test below is to catch for instance the fact that
# one can use ``dtype=np.number`` and obtain a float64 scalar, even
# though `np.number` is not under `np.floating` in the class
# one can use ``dtype=numpy.number`` and obtain a float64 scalar, even
# though `numpy.number` is not under `numpy.floating` in the class
# hierarchy.
return (cls1 is cls2 or
issubclass(cls1, cls2) or
......
from __future__ import absolute_import, print_function, division
import sys
import numpy
import numpy as np
from unittest import TestCase
from nose.plugins.skip import SkipTest
......@@ -44,9 +44,9 @@ class TestCGer(TestCase, TestOptimizationMixin):
self.a = tensor.tensor(dtype=dtype, broadcastable=())
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.Aval = numpy.ones((2, 3), dtype=dtype)
self.xval = numpy.asarray([1, 2], dtype=dtype)
self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
self.Aval = np.ones((2, 3), dtype=dtype)
self.xval = np.asarray([1, 2], dtype=dtype)
self.yval = np.asarray([1.5, 2.7, 3.9], dtype=dtype)
def function(self, inputs, outputs):
return theano.function(inputs, outputs,
......@@ -59,7 +59,7 @@ class TestCGer(TestCase, TestOptimizationMixin):
f(self.Aval[::-1, ::-1], self.xval, self.yval)
def b(self, bval):
return tensor.as_tensor_variable(numpy.asarray(bval, dtype=self.dtype))
return tensor.as_tensor_variable(np.asarray(bval, dtype=self.dtype))
def test_eq(self):
self.assertTrue(CGer(True) == CGer(True))
......@@ -127,13 +127,13 @@ class TestCGemv(TestCase, TestOptimizationMixin):
self.mode = theano.compile.get_default_mode().including('fast_run')
# matrix
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
self.Aval = numpy.ones((2, 3), dtype=dtype)
self.Aval = np.ones((2, 3), dtype=dtype)
# vector
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.xval = numpy.asarray([1, 2], dtype=dtype)
self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
self.xval = np.asarray([1, 2], dtype=dtype)
self.yval = np.asarray([1.5, 2.7, 3.9], dtype=dtype)
# scalar
self.a = tensor.tensor(dtype=dtype, broadcastable=())
......@@ -144,11 +144,11 @@ class TestCGemv(TestCase, TestOptimizationMixin):
f = theano.function([self.A, self.x, self.y, self.a],
self.a*self.y + theano.dot(self.A, self.x),
mode=mode)
Aval = numpy.ones((3, 1), dtype=self.dtype)
xval = numpy.ones((1,), dtype=self.dtype)
yval = float('NaN') * numpy.ones((3,), dtype=self.dtype)
Aval = np.ones((3, 1), dtype=self.dtype)
xval = np.ones((1,), dtype=self.dtype)
yval = float('NaN') * np.ones((3,), dtype=self.dtype)
zval = f(Aval, xval, yval, 0)
assert not numpy.isnan(zval).any()
assert not np.isnan(zval).any()
def test_optimizations_vm(self):
skip_if_blas_ldflags_empty()
......@@ -165,12 +165,12 @@ class TestCGemv(TestCase, TestOptimizationMixin):
)
# Assert they produce the same output
assert numpy.allclose(f(self.xval, self.Aval),
numpy.dot(self.xval, self.Aval))
assert np.allclose(f(self.xval, self.Aval),
np.dot(self.xval, self.Aval))
# Test with negative strides on 2 dims
assert numpy.allclose(f(self.xval, self.Aval[::-1, ::-1]),
numpy.dot(self.xval, self.Aval[::-1, ::-1]))
assert np.allclose(f(self.xval, self.Aval[::-1, ::-1]),
np.dot(self.xval, self.Aval[::-1, ::-1]))
def test_optimizations_mv(self):
skip_if_blas_ldflags_empty()
......@@ -187,11 +187,11 @@ class TestCGemv(TestCase, TestOptimizationMixin):
)
# Assert they produce the same output
assert numpy.allclose(f(self.Aval, self.yval),
numpy.dot(self.Aval, self.yval))
assert np.allclose(f(self.Aval, self.yval),
np.dot(self.Aval, self.yval))
# Test with negative strides on 2 dims
assert numpy.allclose(f(self.Aval[::-1, ::-1], self.yval),
numpy.dot(self.Aval[::-1, ::-1], self.yval))
assert np.allclose(f(self.Aval[::-1, ::-1], self.yval),
np.dot(self.Aval[::-1, ::-1], self.yval))
def test_force_gemv_init(self):
if check_force_gemv_init():
......@@ -203,20 +203,20 @@ class TestCGemv(TestCase, TestOptimizationMixin):
def t_gemv1(self, m_shp):
''' test vector2 + dot(matrix, vector1) '''
rng = numpy.random.RandomState(unittest_tools.fetch_seed())
v1 = theano.shared(numpy.array(rng.uniform(size=(m_shp[1],)),
rng = np.random.RandomState(unittest_tools.fetch_seed())
v1 = theano.shared(np.array(rng.uniform(size=(m_shp[1],)),
dtype='float32'))
v2_orig = numpy.array(rng.uniform(size=(m_shp[0],)), dtype='float32')
v2_orig = np.array(rng.uniform(size=(m_shp[0],)), dtype='float32')
v2 = theano.shared(v2_orig)
m = theano.shared(numpy.array(rng.uniform(size=m_shp),
m = theano.shared(np.array(rng.uniform(size=m_shp),
dtype='float32'))
f = theano.function([], v2 + tensor.dot(m, v1),
mode=self.mode)
# Assert they produce the same output
assert numpy.allclose(f(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
assert np.allclose(f(),
np.dot(m.get_value(), v1.get_value()) + v2_orig)
topo = [n.op for n in f.maker.fgraph.toposort()]
assert topo == [CGemv(inplace=False)], topo
......@@ -227,8 +227,8 @@ class TestCGemv(TestCase, TestOptimizationMixin):
# Assert they produce the same output
g()
assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
assert np.allclose(v2.get_value(),
np.dot(m.get_value(), v1.get_value()) + v2_orig)
topo = [n.op for n in g.maker.fgraph.toposort()]
assert topo == [CGemv(inplace=True)]
......@@ -237,11 +237,11 @@ class TestCGemv(TestCase, TestOptimizationMixin):
m.get_value(borrow=True)[::-1, ::-1],
borrow=True)
v2.set_value(v2_orig)
assert numpy.allclose(f(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
assert np.allclose(f(),
np.dot(m.get_value(), v1.get_value()) + v2_orig)
g()
assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
assert np.allclose(v2.get_value(),
np.dot(m.get_value(), v1.get_value()) + v2_orig)
def test_gemv1(self):
skip_if_blas_ldflags_empty()
......@@ -265,12 +265,12 @@ class TestCGemv(TestCase, TestOptimizationMixin):
mode=self.mode)
# Matrix value
A_val = numpy.ones((5, 3), dtype=dtype)
A_val = np.ones((5, 3), dtype=dtype)
# Different vector length
ones_3 = numpy.ones(3, dtype=dtype)
ones_4 = numpy.ones(4, dtype=dtype)
ones_5 = numpy.ones(5, dtype=dtype)
ones_6 = numpy.ones(6, dtype=dtype)
ones_3 = np.ones(3, dtype=dtype)
ones_4 = np.ones(4, dtype=dtype)
ones_5 = np.ones(5, dtype=dtype)
ones_6 = np.ones(6, dtype=dtype)
f(A_val, ones_3, ones_5)
f(A_val[::-1, ::-1], ones_3, ones_5)
......@@ -286,12 +286,12 @@ class TestCGemv(TestCase, TestOptimizationMixin):
f = theano.function([x, y, z],
[tensor.dot(y, x), tensor.dot(z,x)],
mode=mode_blas_opt)
vx = numpy.random.rand(3, 3)
vy = numpy.random.rand(3)
vz = numpy.random.rand(3)
vx = np.random.rand(3, 3)
vy = np.random.rand(3)
vz = np.random.rand(3)
out = f(vx, vy, vz)
assert numpy.allclose(out[0], numpy.dot(vy, vx))
assert numpy.allclose(out[1], numpy.dot(vz, vx))
assert np.allclose(out[0], np.dot(vy, vx))
assert np.allclose(out[1], np.dot(vz, vx))
assert len([n for n in f.maker.fgraph.apply_nodes
if isinstance(n.op, tensor.AllocEmpty)]) == 2
......
......@@ -16,7 +16,7 @@ class test_casting(unittest.TestCase):
x = type_fn()
f = function([x], op_fn(x))
xval = theano._asarray(numpy.random.rand(10) * 10,
xval = theano._asarray(np.random.rand(10) * 10,
dtype=type_fn.dtype)
yval = f(xval)
assert (str(yval.dtype) ==
......@@ -25,7 +25,7 @@ class test_casting(unittest.TestCase):
def test_illegal(self):
try:
x = zmatrix()
function([x], cast(x, 'float64'))(numpy.ones((2, 3),
function([x], cast(x, 'float64'))(np.ones((2, 3),
dtype='complex128'))
except TypeError:
return
......@@ -44,13 +44,13 @@ class test_casting(unittest.TestCase):
_convert_to_float64]):
y = converter(x)
f = function([compile.In(x, strict=True)], y)
a = numpy.arange(10, dtype=type1)
a = np.arange(10, dtype=type1)
b = f(a)
self.assertTrue(numpy.all(b == numpy.arange(10, dtype=type2)))
self.assertTrue(np.all(b == np.arange(10, dtype=type2)))
def test_convert_to_complex(self):
val64 = numpy.ones(3, dtype='complex64') + 0.5j
val128 = numpy.ones(3, dtype='complex128') + 0.5j
val64 = np.ones(3, dtype='complex64') + 0.5j
val128 = np.ones(3, dtype='complex128') + 0.5j
vec64 = TensorType('complex64', (False, ))()
vec128 = TensorType('complex128', (False, ))()
......@@ -70,22 +70,22 @@ class test_casting(unittest.TestCase):
# upcasting to complex128
for t in ['int8', 'int16', 'int32', 'int64', 'float32', 'float64']:
a = theano.shared(numpy.ones(3, dtype=t))
b = theano.shared(numpy.ones(3, dtype='complex128'))
a = theano.shared(np.ones(3, dtype=t))
b = theano.shared(np.ones(3, dtype='complex128'))
f = function([], basic._convert_to_complex128(a))
assert a.type.values_eq_approx(b.get_value(), f())
# upcasting to complex64
for t in ['int8', 'int16', 'int32', 'int64', 'float32']:
a = theano.shared(numpy.ones(3, dtype=t))
b = theano.shared(numpy.ones(3, dtype='complex64'))
a = theano.shared(np.ones(3, dtype=t))
b = theano.shared(np.ones(3, dtype='complex64'))
f = function([], basic._convert_to_complex64(a))
assert a.type.values_eq_approx(b.get_value(), f())
# downcast to complex64
for t in ['float64']:
a = theano.shared(numpy.ones(3, dtype=t))
b = theano.shared(numpy.ones(3, dtype='complex64'))
a = theano.shared(np.ones(3, dtype=t))
b = theano.shared(np.ones(3, dtype='complex64'))
f = function([], basic._convert_to_complex64(a))
assert a.type.values_eq_approx(b.get_value(), f())
......@@ -96,5 +96,5 @@ class test_casting(unittest.TestCase):
inputs = [v0]
outputs = [v1]
f = function(inputs, outputs)
i = numpy.zeros((2, 2))
assert (f(i) == numpy.zeros((2, 2))).all()
i = np.zeros((2, 2))
assert (f(i) == np.zeros((2, 2))).all()
......@@ -239,8 +239,8 @@ def test_det_shape():
class test_diag(unittest.TestCase):
"""
Test that linalg.diag has the same behavior as np.diag.
np.diag has two behaviors:
Test that linalg.diag has the same behavior as numpy.diag.
numpy.diag has two behaviors:
(1) when given a vector, it returns a matrix with that vector as the
diagonal.
(2) when given a matrix, returns a vector which is the diagonal of the
......
......@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
import logging
import warnings
import numpy
import numpy as np
import theano
from theano import config
......@@ -50,7 +50,7 @@ class TensorType(Type):
self.broadcastable = tuple(bool(b) for b in broadcastable)
self.dtype_specs() # error checking is done there
self.name = name
self.numpy_dtype = numpy.dtype(self.dtype)
self.numpy_dtype = np.dtype(self.dtype)
self.sparse_grad = sparse_grad
if sparse_grad:
warnings.warn(
......@@ -88,12 +88,12 @@ class TensorType(Type):
'maybe you are trying to call a function on a (possibly '
'shared) variable instead of a numeric array?')
if ((type(data) is numpy.ndarray) and
if ((type(data) is np.ndarray) and
(data.dtype == self.numpy_dtype)):
if data.dtype.num != self.numpy_dtype.num:
data = theano._asarray(data, dtype=self.dtype)
# -- now fall through to ndim check
elif ((type(data) is numpy.memmap) and
elif ((type(data) is np.memmap) and
(data.dtype == self.numpy_dtype)):
# numpy.memmap is a "safe" subclass of ndarray,
# so we can use it whereever we expect a base ndarray.
......@@ -103,7 +103,7 @@ class TensorType(Type):
elif strict:
# If any of the two conditions above was not met,
# we raise a meaningful TypeError.
if not (type(data) is numpy.ndarray):
if not (type(data) is np.ndarray):
raise TypeError("%s expected a ndarray object." % self,
data, type(data))
if data.dtype != self.numpy_dtype:
......@@ -118,7 +118,7 @@ class TensorType(Type):
# TODO: consider to pad shape with ones to make it consistent
# with self.broadcastable... like vector->row type thing
else:
if isinstance(data, numpy.ndarray):
if isinstance(data, np.ndarray):
# Check if self.dtype can accurately represent data
# (do not try to convert the data)
up_dtype = scal.upcast(self.dtype, data.dtype)
......@@ -150,7 +150,7 @@ class TensorType(Type):
converted_data = theano._asarray(data, self.dtype)
# We use the `values_eq` static function from TensorType
# to handle NaN values.
if TensorType.values_eq(numpy.asarray(data),
if TensorType.values_eq(np.asarray(data),
converted_data,
force_same_dtype=False):
data = converted_data
......@@ -195,7 +195,7 @@ class TensorType(Type):
" dimension.", data.shape, self.broadcastable)
i += 1
if (self.filter_checks_isfinite and
not numpy.all(numpy.isfinite(data))):
not np.all(np.isfinite(data))):
raise ValueError("non-finite elements not allowed")
return data
......@@ -294,8 +294,8 @@ class TensorType(Type):
@staticmethod
def may_share_memory(a, b):
# This is a method of TensorType, so both a and b should be ndarrays
if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray):
return numpy.may_share_memory(a, b)
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
return np.may_share_memory(a, b)
else:
return False
......@@ -308,14 +308,14 @@ class TensorType(Type):
if force_same_dtype and a.dtype != b.dtype:
return False
a_eq_b = (a == b)
r = numpy.all(a_eq_b)
r = np.all(a_eq_b)
if r:
return True
# maybe the trouble is that there are NaNs
a_missing = numpy.isnan(a)
a_missing = np.isnan(a)
if a_missing.any():
b_missing = numpy.isnan(b)
return numpy.all(a_eq_b + (a_missing == b_missing))
b_missing = np.isnan(b)
return np.all(a_eq_b + (a_missing == b_missing))
else:
return False
......@@ -553,7 +553,7 @@ class TensorType(Type):
Create an numpy ndarray full of 0 values.
"""
return numpy.zeros(shape, dtype=self.dtype)
return np.zeros(shape, dtype=self.dtype)
def get_shape_info(self, obj):
"""
......@@ -601,9 +601,9 @@ class TensorType(Type):
"""
if shape_info:
return numpy.prod(shape_info) * numpy.dtype(self.dtype).itemsize
return np.prod(shape_info) * np.dtype(self.dtype).itemsize
else: # a scalar
return numpy.dtype(self.dtype).itemsize
return np.dtype(self.dtype).itemsize
theano.compile.ops.expandable_types += (TensorType,)
......@@ -624,13 +624,13 @@ def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False,
Absolute tolerance, passed to _allclose.
"""
if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray):
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
if a.shape != b.shape:
return False
if a.dtype != b.dtype:
return False
if str(a.dtype) not in theano.tensor.continuous_dtypes:
return numpy.all(a == b)
return np.all(a == b)
else:
cmp = theano.tensor.basic._allclose(a, b, rtol=rtol, atol=atol)
if cmp:
......@@ -644,38 +644,38 @@ def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False,
# core recently, so it may not be available to everyone. Thus,
# for now we use a home-made recipe, that should probably be
# revisited in the future.
a_missing = numpy.isnan(a)
a_inf = numpy.isinf(a)
a_missing = np.isnan(a)
a_inf = np.isinf(a)
if not (a_missing.any() or (allow_remove_inf and a_inf.any())):
# There are no missing values in a, thus this is not the
# reason why numpy.allclose(a, b) returned False.
_logger.info(
'numpy allclose failed for abs_err %f and rel_err %f',
numpy.max(abs(a - b)),
numpy.max(abs(a - b) / (abs(a) + abs(b))))
np.max(abs(a - b)),
np.max(abs(a - b) / (abs(a) + abs(b))))
return False
# The following line is what numpy.allclose bases its decision
# upon, according to its documentation.
rtol = 1.0000000000000001e-05
atol = 1e-8
cmp_elemwise = (numpy.absolute(a - b) <=
(atol + rtol * numpy.absolute(b)))
cmp_elemwise = (np.absolute(a - b) <=
(atol + rtol * np.absolute(b)))
# Find places where both a and b have missing values.
both_missing = a_missing * numpy.isnan(b)
both_missing = a_missing * np.isnan(b)
# Find places where both a and b have inf of the same sign.
both_inf = a_inf * numpy.isinf(b)
both_inf = a_inf * np.isinf(b)
# cmp_elemwise is weird when we have inf and -inf.
# set it to False
cmp_elemwise = numpy.where(
cmp_elemwise = np.where(
both_inf & cmp_elemwise,
a == b,
cmp_elemwise)
# check the sign of the inf
both_inf = numpy.where(both_inf, (a == b), both_inf)
both_inf = np.where(both_inf, (a == b), both_inf)
if allow_remove_inf:
both_inf += a_inf
......
......@@ -3,7 +3,7 @@ from __future__ import absolute_import, print_function, division
# Slice type and Op. None Type and NoneConst.
#
import numpy
import numpy as np
import theano
from theano.gof import Apply, Constant, Generic, Op, Type, hashtype
......@@ -78,15 +78,15 @@ class SliceConstant(Constant):
def __init__(self, type, data, name=None):
assert isinstance(data, slice)
# Numpy ndarray aren't hashable, so get rid of them.
if isinstance(data.start, numpy.ndarray):
if isinstance(data.start, np.ndarray):
assert data.start.ndim == 0
assert str(data.start.dtype) in theano.tensor.integer_dtypes
data = slice(int(data.start), data.stop, data.step)
elif isinstance(data.stop, numpy.ndarray):
elif isinstance(data.stop, np.ndarray):
assert data.stop.ndim == 0
assert str(data.stop.dtype) in theano.tensor.integer_dtypes
data = slice(data.start, int(data.stop), data.step)
elif isinstance(data.step, numpy.ndarray):
elif isinstance(data.step, np.ndarray):
assert data.step.ndim == 0
assert str(data.step.dtype) in theano.tensor.integer_dtypes
data = slice(data.start, int(data.stop), data.step)
......
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import theano
from theano.compat import izip
......@@ -23,7 +23,7 @@ def hash_from_ndarray(data):
# too long hash, I call it again on the concatenation of all parts.
if not data.flags["C_CONTIGUOUS"]:
# hash_from_code needs a C-contiguous array.
data = numpy.ascontiguousarray(data)
data = np.ascontiguousarray(data)
return hash_from_code(hash_from_code(data) +
hash_from_code(str(data.shape)) +
hash_from_code(str(data.strides)) +
......
......@@ -4,7 +4,7 @@ import copy
import traceback as tb
import warnings
import numpy
import numpy as np
from six import integer_types
from six.moves import xrange
......@@ -462,7 +462,7 @@ class _tensor_py_operators(object):
def check_bool(args_el):
try:
if (isinstance(args_el, (numpy.bool_, bool)) or
if (isinstance(args_el, (np.bool_, bool)) or
args_el.dtype == 'bool'):
raise TypeError('TensorType does not support boolean '
'mask for indexing such as tensor[x==0]. '
......@@ -495,7 +495,7 @@ class _tensor_py_operators(object):
elif len(ellipses) == 1:
new_axes = sum(1
for index in args
if index is numpy.newaxis) # numpy.newaxis is None
if index is np.newaxis) # numpy.newaxis is None
ellipsis_at = ellipses[0]
args = list(args)
args[ellipsis_at: ellipsis_at + 1] = (
......@@ -503,7 +503,7 @@ class _tensor_py_operators(object):
# Force input to be int64 datatype if input is an empty list or tuple
# Else leave it as is if it is a real number
args = tuple([numpy.array(inp, dtype=numpy.int64)
args = tuple([np.array(inp, dtype=np.int64)
if(inp == [] or inp == ()) else inp for inp in args])
# Convert python literals to theano constants
args = theano.tensor.subtensor.make_constant(args)
......@@ -515,7 +515,7 @@ class _tensor_py_operators(object):
axis = None
for i, arg in enumerate(args):
try:
if arg is not numpy.newaxis:
if arg is not np.newaxis:
theano.tensor.subtensor.Subtensor.convert(arg)
except theano.tensor.subtensor.AdvancedIndexingError:
if advanced:
......@@ -532,14 +532,14 @@ class _tensor_py_operators(object):
all(isinstance(a, slice) and
equal_slices(a, slice(None)) for a in args[axis + 1:]) and
isinstance(args[axis],
(numpy.ndarray, list,
(np.ndarray, list,
TensorVariable, TensorConstant,
theano.tensor.sharedvar.TensorSharedVariable))):
return self.take(args[axis], axis)
else:
return theano.tensor.subtensor.advanced_subtensor(self, *args)
else:
if numpy.newaxis in args:
if np.newaxis in args:
# None (aka np.newaxis) in numpy indexing means to add a
# broadcastable dimension, which theano traditionally did with
# the dimshuffle op. The following code converts numpy-style
......@@ -550,7 +550,7 @@ class _tensor_py_operators(object):
pattern = []
new_args = []
for arg in args:
if arg == numpy.newaxis:
if arg == np.newaxis:
pattern.append('x')
new_args.append(slice(None, None, None))
else:
......@@ -642,7 +642,7 @@ class _tensor_py_operators(object):
def norm(self, L, axis=None, keepdims=False):
if L == 0:
raise NotImplementedError()
if numpy.isinf(L):
if np.isinf(L):
raise NotImplementedError()
# optimizations will/should catch cases like L=1, L=2
y = theano.tensor.basic.pow(
......@@ -862,7 +862,7 @@ class TensorConstantSignature(tuple):
# (note that if there are NaN values in d1, this will return
# False, which is why we do not bother with testing `other.has_nan`
# here).
return (self.sum == other.sum) and numpy.all(d0 == d1)
return (self.sum == other.sum) and np.all(d0 == d1)
def __hash__(self):
t, d = self
......@@ -880,25 +880,25 @@ class TensorConstantSignature(tuple):
self._sum = self.no_nan.sum()
# The following 2 lines are needede as in Python 3.3 with NumPy
# 1.7.1, numpy.ndarray and numpy.memmap aren't hashable.
if type(self._sum) is numpy.memmap:
self._sum = numpy.asarray(self._sum).item()
if type(self._sum) is np.memmap:
self._sum = np.asarray(self._sum).item()
if self.has_nan and self.no_nan.mask.all():
# In this case the sum is not properly computed by numpy.
self._sum = 0
if numpy.isinf(self._sum) or numpy.isnan(self._sum):
if np.isinf(self._sum) or np.isnan(self._sum):
# NaN may happen when there are both -inf and +inf values.
if self.has_nan:
# Filter both NaN and Inf values.
mask = self.no_nan.mask + numpy.isinf(self[1])
mask = self.no_nan.mask + np.isinf(self[1])
else:
# Filter only Inf values.
mask = numpy.isinf(self[1])
mask = np.isinf(self[1])
if mask.all():
self._sum = 0
else:
self._sum = numpy.ma.masked_array(self[1], mask).sum()
self._sum = np.ma.masked_array(self[1], mask).sum()
# At this point there should be no more NaN.
assert not numpy.isnan(self._sum)
assert not np.isnan(self._sum)
return self._sum
sum = property(_get_sum)
......@@ -906,9 +906,9 @@ class TensorConstantSignature(tuple):
try:
return self._no_nan
except AttributeError:
nan_mask = numpy.isnan(self[1])
nan_mask = np.isnan(self[1])
if nan_mask.any():
self._no_nan = numpy.ma.masked_array(self[1], nan_mask)
self._no_nan = np.ma.masked_array(self[1], nan_mask)
self.has_nan = True
else:
self._no_nan = self[1]
......@@ -926,7 +926,7 @@ class TensorConstant(_tensor_py_operators, Constant):
def __init__(self, type, data, name=None):
Constant.__init__(self, type, data, name)
self.tag.unique_value = None
if isinstance(data, numpy.ndarray) and data.ndim > 0:
if isinstance(data, np.ndarray) and data.ndim > 0:
flat_data = data.ravel()
if flat_data.shape[0]:
if (flat_data == flat_data[0]).all():
......@@ -949,7 +949,7 @@ class TensorConstant(_tensor_py_operators, Constant):
def equals(self, other):
# Override Contant.equals to allow to compare with
# numpy.ndarray, and python type.
if isinstance(other, (numpy.ndarray, int, float)):
if isinstance(other, (np.ndarray, int, float)):
# Make a TensorConstant to be able to compare
other = theano.tensor.basic.constant(other)
return (isinstance(other, TensorConstant) and
......
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
from theano.tensor.elemwise import Elemwise
from theano import scalar
......@@ -15,7 +15,7 @@ class XlogX(scalar.UnaryScalarOp):
def st_impl(x):
if x == 0.0:
return 0.0
return x * numpy.log(x)
return x * np.log(x)
def impl(self, x):
return XlogX.st_impl(x)
......@@ -48,7 +48,7 @@ class XlogY0(scalar.BinaryScalarOp):
def st_impl(x, y):
if x == 0.0:
return 0.0
return x * numpy.log(y)
return x * np.log(y)
def impl(self, x, y):
return XlogY0.st_impl(x, y)
......
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
from .type import TypedListType
import theano
......@@ -500,7 +500,7 @@ class Index(Op):
(out,) = outputs
for y in range(len(x)):
if node.inputs[0].ttype.values_eq(x[y], elem):
out[0] = numpy.asarray(y, dtype=theano.config.floatX)
out[0] = np.asarray(y, dtype=theano.config.floatX)
break
def __str__(self):
......@@ -530,7 +530,7 @@ class Count(Op):
for y in range(len(x)):
if node.inputs[0].ttype.values_eq(x[y], elem):
out[0] += 1
out[0] = numpy.asarray(out[0], dtype=theano.config.floatX)
out[0] = np.asarray(out[0], dtype=theano.config.floatX)
def __str__(self):
return self.__class__.__name__
......@@ -565,7 +565,7 @@ class Length(Op):
def perform(self, node, x, outputs):
(out,) = outputs
out[0] = numpy.asarray(len(x[0]), 'int64')
out[0] = np.asarray(len(x[0]), 'int64')
def __str__(self):
return self.__class__.__name__
......
......@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
import unittest
from nose.plugins.skip import SkipTest
import numpy
import numpy as np
import theano
import theano.typed_list
......@@ -24,8 +24,8 @@ except ImportError:
# took from tensors/tests/test_basic.py
def rand_ranged_matrix(minimum, maximum, shape):
return numpy.asarray(numpy.random.rand(*shape) * (maximum - minimum) +
minimum, dtype=theano.config.floatX)
return np.asarray(np.random.rand(*shape) * (maximum - minimum) +
minimum, dtype=theano.config.floatX)
# took from sparse/tests/test_basic.py
......@@ -34,8 +34,8 @@ def random_lil(shape, dtype, nnz):
huge = 2 ** 30
for k in range(nnz):
# set non-zeros in random locations (row x, col y)
idx = numpy.random.randint(1, huge + 1, size=2) % shape
value = numpy.random.rand()
idx = np.random.randint(1, huge + 1, size=2) % shape
value = np.random.rand()
# if dtype *int*, value will always be zeros!
if dtype in theano.tensor.integer_dtypes:
value = int(value * 100)
......@@ -68,7 +68,7 @@ class test_get_item(unittest.TestCase):
x = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], slice(0, 1, 1)), [x]))
self.assertTrue(np.array_equal(f([x], slice(0, 1, 1)), [x]))
def test_sanity_check_single(self):
......@@ -84,9 +84,9 @@ class test_get_item(unittest.TestCase):
x = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x],
numpy.asarray(0, dtype='int64')),
x))
self.assertTrue(np.array_equal(f([x],
np.asarray(0, dtype='int64')),
x))
def test_interface(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
......@@ -100,16 +100,16 @@ class test_get_item(unittest.TestCase):
x = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x],
numpy.asarray(0, dtype='int64')),
x))
self.assertTrue(np.array_equal(f([x],
np.asarray(0, dtype='int64')),
x))
z = mySymbolicMatricesList[0]
f = theano.function([mySymbolicMatricesList],
z)
self.assertTrue(numpy.array_equal(f([x]), x))
self.assertTrue(np.array_equal(f([x]), x))
def test_wrong_input(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
......@@ -130,14 +130,14 @@ class test_get_item(unittest.TestCase):
x = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x]), x))
self.assertTrue(np.array_equal(f([x]), x))
z = GetItem()(mySymbolicMatricesList, slice(0, 1, 1))
f = theano.function([mySymbolicMatricesList],
z)
self.assertTrue(numpy.array_equal(f([x]), [x]))
self.assertTrue(np.array_equal(f([x]), [x]))
class test_append(unittest.TestCase):
......@@ -156,7 +156,7 @@ class test_append(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], y), [x, y]))
self.assertTrue(np.array_equal(f([x], y), [x, y]))
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
......@@ -171,7 +171,7 @@ class test_append(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], y), [x, y]))
self.assertTrue(np.array_equal(f([x], y), [x, y]))
def test_interfaces(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
......@@ -186,7 +186,7 @@ class test_append(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], y), [x, y]))
self.assertTrue(np.array_equal(f([x], y), [x, y]))
class test_extend(unittest.TestCase):
......@@ -206,7 +206,7 @@ class test_extend(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], [y]), [x, y]))
self.assertTrue(np.array_equal(f([x], [y]), [x, y]))
def test_sanity_check(self):
mySymbolicMatricesList1 = TypedListType(T.TensorType(
......@@ -223,7 +223,7 @@ class test_extend(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], [y]), [x, y]))
self.assertTrue(np.array_equal(f([x], [y]), [x, y]))
def test_interface(self):
mySymbolicMatricesList1 = TypedListType(T.TensorType(
......@@ -240,7 +240,7 @@ class test_extend(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], [y]), [x, y]))
self.assertTrue(np.array_equal(f([x], [y]), [x, y]))
class test_insert(unittest.TestCase):
......@@ -260,10 +260,10 @@ class test_insert(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x],
numpy.asarray(1, dtype='int64'),
y),
[x, y]))
self.assertTrue(np.array_equal(f([x],
np.asarray(1, dtype='int64'),
y),
[x, y]))
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
......@@ -279,7 +279,7 @@ class test_insert(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], numpy.asarray(1,
self.assertTrue(np.array_equal(f([x], np.asarray(1,
dtype='int64'), y), [x, y]))
def test_interface(self):
......@@ -296,10 +296,10 @@ class test_insert(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x],
numpy.asarray(1, dtype='int64'),
y),
[x, y]))
self.assertTrue(np.array_equal(f([x],
np.asarray(1, dtype='int64'),
y),
[x, y]))
class test_remove(unittest.TestCase):
......@@ -318,7 +318,7 @@ class test_remove(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y], y), [x]))
self.assertTrue(np.array_equal(f([x, y], y), [x]))
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
......@@ -333,7 +333,7 @@ class test_remove(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y], y), [x]))
self.assertTrue(np.array_equal(f([x, y], y), [x]))
def test_interface(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
......@@ -348,7 +348,7 @@ class test_remove(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y], y), [x]))
self.assertTrue(np.array_equal(f([x, y], y), [x]))
class test_reverse(unittest.TestCase):
......@@ -366,7 +366,7 @@ class test_reverse(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y]), [y, x]))
self.assertTrue(np.array_equal(f([x, y]), [y, x]))
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
......@@ -380,7 +380,7 @@ class test_reverse(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y]), [y, x]))
self.assertTrue(np.array_equal(f([x, y]), [y, x]))
def test_interface(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
......@@ -394,7 +394,7 @@ class test_reverse(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y]), [y, x]))
self.assertTrue(np.array_equal(f([x, y]), [y, x]))
class test_index(unittest.TestCase):
......@@ -570,10 +570,10 @@ class TestMakeList(unittest.TestCase):
x = T.tensor3()
y = T.tensor3()
A = numpy.cast[theano.config.floatX](numpy.random.rand(5, 3))
B = numpy.cast[theano.config.floatX](numpy.random.rand(7, 2))
X = numpy.cast[theano.config.floatX](numpy.random.rand(5, 6, 1))
Y = numpy.cast[theano.config.floatX](numpy.random.rand(1, 9, 3))
A = np.cast[theano.config.floatX](np.random.rand(5, 3))
B = np.cast[theano.config.floatX](np.random.rand(7, 2))
X = np.cast[theano.config.floatX](np.random.rand(5, 6, 1))
Y = np.cast[theano.config.floatX](np.random.rand(1, 9, 3))
make_list((3., 4.))
c = make_list((a, b))
......
from __future__ import absolute_import, print_function, division
import unittest
import numpy
import numpy as np
import theano
import theano.typed_list
......@@ -14,8 +14,8 @@ from theano import In
# took from tensors/tests/test_basic.py
def rand_ranged_matrix(minimum, maximum, shape):
return numpy.asarray(numpy.random.rand(*shape) * (maximum - minimum) +
minimum, dtype=theano.config.floatX)
return np.asarray(np.random.rand(*shape) * (maximum - minimum) +
minimum, dtype=theano.config.floatX)
class test_inplace(unittest.TestCase):
......@@ -34,7 +34,7 @@ class test_inplace(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y]), [y, x]))
self.assertTrue(np.array_equal(f([x, y]), [y, x]))
def test_append_inplace(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
......@@ -52,7 +52,7 @@ class test_inplace(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], y), [x, y]))
self.assertTrue(np.array_equal(f([x], y), [x, y]))
def test_extend_inplace(self):
mySymbolicMatricesList1 = TypedListType(T.TensorType(
......@@ -72,7 +72,7 @@ class test_inplace(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], [y]), [x, y]))
self.assertTrue(np.array_equal(f([x], [y]), [x, y]))
def test_insert_inplace(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
......@@ -92,7 +92,7 @@ class test_inplace(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], numpy.asarray(1,
self.assertTrue(np.array_equal(f([x], np.asarray(1,
dtype='int64'), y), [x, y]))
def test_remove_inplace(self):
......@@ -110,7 +110,7 @@ class test_inplace(unittest.TestCase):
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y], y), [x]))
self.assertTrue(np.array_equal(f([x, y], y), [x]))
def test_constant_folding():
......
from __future__ import absolute_import, print_function, division
import unittest
import numpy
import numpy as np
import theano
import theano.typed_list
......@@ -12,8 +12,8 @@ from theano.tests import unittest_tools as utt
# took from tensors/tests/test_basic.py
def rand_ranged_matrix(minimum, maximum, shape):
return numpy.asarray(numpy.random.rand(*shape) * (maximum - minimum) +
minimum, dtype=theano.config.floatX)
return np.asarray(np.random.rand(*shape) * (maximum - minimum) +
minimum, dtype=theano.config.floatX)
class test_typed_list_type(unittest.TestCase):
......@@ -84,7 +84,7 @@ class test_typed_list_type(unittest.TestCase):
x = rand_ranged_matrix(-1000, 1000, [100, 100])
self.assertTrue(numpy.array_equal(myType.filter([x]), [x]))
self.assertTrue(np.array_equal(myType.filter([x]), [x]))
def test_intern_filter(self):
"""
......@@ -95,9 +95,9 @@ class test_typed_list_type(unittest.TestCase):
myType = TypedListType(T.TensorType('float64',
(False, False)))
x = numpy.asarray([[4, 5], [4, 5]], dtype='float32')
x = np.asarray([[4, 5], [4, 5]], dtype='float32')
self.assertTrue(numpy.array_equal(myType.filter([x]), [x]))
self.assertTrue(np.array_equal(myType.filter([x]), [x]))
# Will fail for unknown reasons
# under search
......@@ -125,7 +125,7 @@ class test_typed_list_type(unittest.TestCase):
x = rand_ranged_matrix(-1000, 1000, [100, 100])
self.assertTrue(numpy.array_equal(myType.filter([[x]]), [[x]]))
self.assertTrue(np.array_equal(myType.filter([[x]]), [[x]]))
def test_comparison_different_depth(self):
"""
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论