提交 8897859b authored 作者: amrithasuresh's avatar amrithasuresh

Updated numpy as np

上级 c60f369f
...@@ -5,7 +5,7 @@ from six.moves import builtins ...@@ -5,7 +5,7 @@ from six.moves import builtins
import sys import sys
import warnings import warnings
import numpy import numpy as np
from six import integer_types from six import integer_types
from six.moves import xrange from six.moves import xrange
import numbers import numbers
...@@ -72,12 +72,12 @@ def check_equal_numpy(x, y): ...@@ -72,12 +72,12 @@ def check_equal_numpy(x, y):
Checks the dtype and shape if x and y are numpy.ndarray instances. Checks the dtype and shape if x and y are numpy.ndarray instances.
""" """
if isinstance(x, numpy.ndarray) and isinstance(y, numpy.ndarray): if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
return (x.dtype == y.dtype and x.shape == y.shape and return (x.dtype == y.dtype and x.shape == y.shape and
numpy.all(abs(x - y) < 1e-10)) np.all(abs(x - y) < 1e-10))
elif (isinstance(x, numpy.random.RandomState) and elif (isinstance(x, np.random.RandomState) and
isinstance(y, numpy.random.RandomState)): isinstance(y, np.random.RandomState)):
return python_all(numpy.all(a == b) for a, b in return python_all(np.all(a == b) for a, b in
izip(x.__getstate__(), y.__getstate__())) izip(x.__getstate__(), y.__getstate__()))
else: else:
return x == y return x == y
...@@ -348,15 +348,15 @@ def _get_atol_rtol(a, b): ...@@ -348,15 +348,15 @@ def _get_atol_rtol(a, b):
def _allclose(a, b, rtol=None, atol=None): def _allclose(a, b, rtol=None, atol=None):
a = numpy.asarray(a) a = np.asarray(a)
b = numpy.asarray(b) b = np.asarray(b)
atol_, rtol_ = _get_atol_rtol(a, b) atol_, rtol_ = _get_atol_rtol(a, b)
if rtol is not None: if rtol is not None:
rtol_ = rtol rtol_ = rtol
if atol is not None: if atol is not None:
atol_ = atol atol_ = atol
return numpy.allclose(a, b, atol=atol_, rtol=rtol_) return np.allclose(a, b, atol=atol_, rtol=rtol_)
class NotScalarConstantError(Exception): class NotScalarConstantError(Exception):
...@@ -387,10 +387,10 @@ def numpy_scalar(data): ...@@ -387,10 +387,10 @@ def numpy_scalar(data):
if (data.ndim > 0 and if (data.ndim > 0 and
(len(data.shape) == 0 or (len(data.shape) == 0 or
builtins.max(data.shape) == 0)): builtins.max(data.shape) == 0)):
assert numpy.all(numpy.array([]) == data) assert np.all(np.array([]) == data)
raise EmptyConstantError() raise EmptyConstantError()
try: try:
numpy.complex(data) # works for all numeric scalars np.complex(data) # works for all numeric scalars
return data return data
except Exception: except Exception:
raise NotScalarConstantError( raise NotScalarConstantError(
...@@ -444,10 +444,10 @@ def get_scalar_constant_value(orig_v, elemwise=True, ...@@ -444,10 +444,10 @@ def get_scalar_constant_value(orig_v, elemwise=True,
# to depend on passing it None) # to depend on passing it None)
raise NotScalarConstantError() raise NotScalarConstantError()
if isinstance(v, (numpy.integer, integer_types, float)): if isinstance(v, (np.integer, integer_types, float)):
return numpy.asarray(v) return np.asarray(v)
if isinstance(v, numpy.ndarray): if isinstance(v, np.ndarray):
return numpy_scalar(v).copy() return numpy_scalar(v).copy()
if isinstance(v, Constant): if isinstance(v, Constant):
...@@ -470,11 +470,11 @@ def get_scalar_constant_value(orig_v, elemwise=True, ...@@ -470,11 +470,11 @@ def get_scalar_constant_value(orig_v, elemwise=True,
i = v.owner.op.i i = v.owner.op.i
inp = v.owner.inputs[0] inp = v.owner.inputs[0]
if isinstance(inp, Constant): if isinstance(inp, Constant):
return numpy.asarray(inp.data.shape[i]) return np.asarray(inp.data.shape[i])
# The shape of a broadcastable dimension is 1 # The shape of a broadcastable dimension is 1
if (hasattr(inp.type, 'broadcastable') and if (hasattr(inp.type, 'broadcastable') and
inp.type.broadcastable[i]): inp.type.broadcastable[i]):
return numpy.asarray(1) return np.asarray(1)
# Don't act as the constant_folding optimization here as this # Don't act as the constant_folding optimization here as this
# fct is used too early in the optimization phase. This would # fct is used too early in the optimization phase. This would
...@@ -639,7 +639,7 @@ def get_scalar_constant_value(orig_v, elemwise=True, ...@@ -639,7 +639,7 @@ def get_scalar_constant_value(orig_v, elemwise=True,
raise ValueError(msg) raise ValueError(msg)
if gp_broadcastable[idx]: if gp_broadcastable[idx]:
return numpy.asarray(1) return np.asarray(1)
raise NotScalarConstantError(v) raise NotScalarConstantError(v)
...@@ -1002,7 +1002,7 @@ class TensorFromScalar(Op): ...@@ -1002,7 +1002,7 @@ class TensorFromScalar(Op):
def perform(self, node, inp, out_): def perform(self, node, inp, out_):
s, = inp s, = inp
out, = out_ out, = out_
out[0] = numpy.asarray(s) out[0] = np.asarray(s)
def infer_shape(self, node, in_shapes): def infer_shape(self, node, in_shapes):
return [()] return [()]
...@@ -1216,23 +1216,23 @@ class MaxAndArgmax(Op): ...@@ -1216,23 +1216,23 @@ class MaxAndArgmax(Op):
axes = tuple(range(x.ndim)) axes = tuple(range(x.ndim))
else: else:
axes = tuple(int(ax) for ax in axes) axes = tuple(int(ax) for ax in axes)
max[0] = theano._asarray(numpy.max(x, axes), max[0] = theano._asarray(np.max(x, axes),
dtype=node.outputs[0].dtype) dtype=node.outputs[0].dtype)
# Numpy does not support multiple axes for argmax # Numpy does not support multiple axes for argmax
# Work around # Work around
keep_axes = numpy.array([i for i in range(x.ndim) if i not in axes], keep_axes = np.array([i for i in range(x.ndim) if i not in axes],
dtype='int64') dtype='int64')
# Not-reduced axes in front # Not-reduced axes in front
transposed_x = numpy.transpose(x, numpy.concatenate((keep_axes, axes))) transposed_x = np.transpose(x, np.concatenate((keep_axes, axes)))
kept_shape = transposed_x.shape[:len(keep_axes)] kept_shape = transposed_x.shape[:len(keep_axes)]
reduced_shape = transposed_x.shape[len(keep_axes):] reduced_shape = transposed_x.shape[len(keep_axes):]
# Numpy.prod returns 1.0 when arg is empty, so we cast it to int64 # Numpy.prod returns 1.0 when arg is empty, so we cast it to int64
# Otherwise reshape would complain citing float arg # Otherwise reshape would complain citing float arg
new_shape = kept_shape + (numpy.prod(reduced_shape, dtype='int64'),) new_shape = kept_shape + (np.prod(reduced_shape, dtype='int64'),)
reshaped_x = transposed_x.reshape(new_shape) reshaped_x = transposed_x.reshape(new_shape)
max_idx[0] = theano._asarray(numpy.argmax(reshaped_x, axis=-1), max_idx[0] = theano._asarray(np.argmax(reshaped_x, axis=-1),
dtype='int64') dtype='int64')
def c_code(self, node, name, inp, out, sub): def c_code(self, node, name, inp, out, sub):
...@@ -1399,11 +1399,11 @@ class Argmax(Op): ...@@ -1399,11 +1399,11 @@ class Argmax(Op):
def make_node(self, x, axis=None): def make_node(self, x, axis=None):
x = _as_tensor_variable(x) x = _as_tensor_variable(x)
if isinstance(axis, (integer_types, numpy.integer)): if isinstance(axis, (integer_types, np.integer)):
axis = [int(axis)] axis = [int(axis)]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0: elif isinstance(axis, np.ndarray) and axis.ndim == 0:
axis = [int(axis)] axis = [int(axis)]
elif isinstance(axis, (tuple, list, numpy.ndarray)): elif isinstance(axis, (tuple, list, np.ndarray)):
axis = [int(a) for a in axis] axis = [int(a) for a in axis]
if axis == list(range(x.type.ndim)): if axis == list(range(x.type.ndim)):
axis = None axis = None
...@@ -1415,11 +1415,11 @@ class Argmax(Op): ...@@ -1415,11 +1415,11 @@ class Argmax(Op):
"Argmax needs a constant axis. Got %s" % axis) "Argmax needs a constant axis. Got %s" % axis)
else: else:
assert axis.dtype in integer_dtypes assert axis.dtype in integer_dtypes
if isinstance(axis.data, (integer_types, numpy.integer)) or \ if isinstance(axis.data, (integer_types, np.integer)) or \
(isinstance(axis.data, numpy.ndarray) and (isinstance(axis.data, np.ndarray) and
axis.data.ndim == 0): axis.data.ndim == 0):
axis = [int(axis.data)] axis = [int(axis.data)]
elif isinstance(axis.data, (list, numpy.ndarray)): elif isinstance(axis.data, (list, np.ndarray)):
axis = [int(i) for i in axis.data] axis = [int(i) for i in axis.data]
# Make axis entries non-negative, and sort them # Make axis entries non-negative, and sort them
...@@ -1466,17 +1466,17 @@ class Argmax(Op): ...@@ -1466,17 +1466,17 @@ class Argmax(Op):
# Numpy does not support multiple axes for argmax # Numpy does not support multiple axes for argmax
# Work around # Work around
keep_axes = numpy.array([i for i in range(x.ndim) if i not in axes], keep_axes = np.array([i for i in range(x.ndim) if i not in axes],
dtype='int64') dtype='int64')
# Not-reduced axes in front # Not-reduced axes in front
transposed_x = numpy.transpose(x, numpy.concatenate((keep_axes, transposed_x = np.transpose(x, np.concatenate((keep_axes,
axes))) axes)))
kept_shape = transposed_x.shape[:len(keep_axes)] kept_shape = transposed_x.shape[:len(keep_axes)]
reduced_shape = transposed_x.shape[len(keep_axes):] reduced_shape = transposed_x.shape[len(keep_axes):]
new_shape = kept_shape + (numpy.prod(reduced_shape),) new_shape = kept_shape + (np.prod(reduced_shape),)
reshaped_x = transposed_x.reshape(new_shape) reshaped_x = transposed_x.reshape(new_shape)
max_idx[0] = theano._asarray(numpy.argmax(reshaped_x, axis=-1), max_idx[0] = theano._asarray(np.argmax(reshaped_x, axis=-1),
dtype='int64') dtype='int64')
def c_code(self, node, name, inp, out, sub): def c_code(self, node, name, inp, out, sub):
...@@ -1562,9 +1562,9 @@ def makeKeepDims(x, y, axis): ...@@ -1562,9 +1562,9 @@ def makeKeepDims(x, y, axis):
if axis is None: if axis is None:
axis = list(range(x.type.ndim)) axis = list(range(x.type.ndim))
elif isinstance(axis, (integer_types, numpy.integer)): elif isinstance(axis, (integer_types, np.integer)):
axis = [axis] axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0: elif isinstance(axis, np.ndarray) and axis.ndim == 0:
axis = [int(axis)] axis = [int(axis)]
else: else:
axis = [int(a) for a in axis] axis = [int(a) for a in axis]
...@@ -1609,10 +1609,10 @@ def max_and_argmax(a, axis=None, keepdims=False): ...@@ -1609,10 +1609,10 @@ def max_and_argmax(a, axis=None, keepdims=False):
a = as_tensor_variable(a) a = as_tensor_variable(a)
if axis is None: if axis is None:
axis = list(range(a.type.ndim)) axis = list(range(a.type.ndim))
elif (isinstance(axis, (integer_types, numpy.integer)) or elif (isinstance(axis, (integer_types, np.integer)) or
(isinstance(axis, numpy.ndarray) and axis.ndim == 0)): (isinstance(axis, np.ndarray) and axis.ndim == 0)):
axis = [int(axis)] axis = [int(axis)]
elif isinstance(axis, (tuple, list, numpy.ndarray)): elif isinstance(axis, (tuple, list, np.ndarray)):
axis = [int(i) for i in axis] axis = [int(i) for i in axis]
elif isinstance(axis, Variable): elif isinstance(axis, Variable):
if NoneConst.equals(axis): if NoneConst.equals(axis):
...@@ -1621,10 +1621,10 @@ def max_and_argmax(a, axis=None, keepdims=False): ...@@ -1621,10 +1621,10 @@ def max_and_argmax(a, axis=None, keepdims=False):
raise TypeError("max and argmax computation needs a constant axis. Got %s" % axis) raise TypeError("max and argmax computation needs a constant axis. Got %s" % axis)
else: else:
assert axis.dtype in integer_dtypes assert axis.dtype in integer_dtypes
if (isinstance(axis.data, (integer_types, numpy.integer)) or if (isinstance(axis.data, (integer_types, np.integer)) or
(isinstance(axis.data, numpy.ndarray) and axis.data.ndim == 0)): (isinstance(axis.data, np.ndarray) and axis.data.ndim == 0)):
axis = [int(axis.data)] axis = [int(axis.data)]
elif isinstance(axis.data, (list, numpy.ndarray)): elif isinstance(axis.data, (list, np.ndarray)):
axis = [int(i) for i in axis.data] axis = [int(i) for i in axis.data]
if len(axis) == 0: if len(axis) == 0:
axis = list(range(a.type.ndim)) axis = list(range(a.type.ndim))
...@@ -1838,7 +1838,7 @@ def isnan(a): ...@@ -1838,7 +1838,7 @@ def isnan(a):
"""isnan(a)""" """isnan(a)"""
a = as_tensor_variable(a) a = as_tensor_variable(a)
if a.dtype in discrete_dtypes: if a.dtype in discrete_dtypes:
return alloc(numpy.asarray(False, dtype="bool"), return alloc(np.asarray(False, dtype="bool"),
*[a.shape[i] for i in range(a.ndim)]) *[a.shape[i] for i in range(a.ndim)])
return isnan_(a) return isnan_(a)
...@@ -1857,7 +1857,7 @@ def isinf(a): ...@@ -1857,7 +1857,7 @@ def isinf(a):
"""isinf(a)""" """isinf(a)"""
a = as_tensor_variable(a) a = as_tensor_variable(a)
if a.dtype in discrete_dtypes: if a.dtype in discrete_dtypes:
return alloc(numpy.asarray(False, dtype="bool"), return alloc(np.asarray(False, dtype="bool"),
*[a.shape[i] for i in range(a.ndim)]) *[a.shape[i] for i in range(a.ndim)])
return isinf_(a) return isinf_(a)
...@@ -2426,7 +2426,7 @@ def zeros(shape, dtype=None): ...@@ -2426,7 +2426,7 @@ def zeros(shape, dtype=None):
shape = [shape] shape = [shape]
if dtype is None: if dtype is None:
dtype = config.floatX dtype = config.floatX
return alloc(numpy.array(0, dtype=dtype), *shape) return alloc(np.array(0, dtype=dtype), *shape)
def ones(shape, dtype=None): def ones(shape, dtype=None):
...@@ -2437,7 +2437,7 @@ def ones(shape, dtype=None): ...@@ -2437,7 +2437,7 @@ def ones(shape, dtype=None):
shape = [shape] shape = [shape]
if dtype is None: if dtype is None:
dtype = config.floatX dtype = config.floatX
return alloc(numpy.array(1, dtype=dtype), *shape) return alloc(np.array(1, dtype=dtype), *shape)
class Nonzero(gof.Op): class Nonzero(gof.Op):
...@@ -2481,11 +2481,11 @@ class Nonzero(gof.Op): ...@@ -2481,11 +2481,11 @@ class Nonzero(gof.Op):
a = inp[0] a = inp[0]
out, = out_ out, = out_
result_tuple = numpy.nonzero(a) result_tuple = np.nonzero(a)
if len(result_tuple[0]) > 0: if len(result_tuple[0]) > 0:
result = numpy.vstack(result_tuple) result = np.vstack(result_tuple)
else: else:
result = numpy.zeros((len(result_tuple), 0)) result = np.zeros((len(result_tuple), 0))
out[0] = result.astype('int64') out[0] = result.astype('int64')
...@@ -2627,7 +2627,7 @@ class Tri(gof.Op): ...@@ -2627,7 +2627,7 @@ class Tri(gof.Op):
def perform(self, node, inp, out_): def perform(self, node, inp, out_):
N, M, k = inp N, M, k = inp
out, = out_ out, = out_
out[0] = numpy.tri(N, M, k, dtype=self.dtype) out[0] = np.tri(N, M, k, dtype=self.dtype)
def infer_shape(self, node, in_shapes): def infer_shape(self, node, in_shapes):
out_shape = [node.inputs[0], node.inputs[1]] out_shape = [node.inputs[0], node.inputs[1]]
...@@ -2738,7 +2738,7 @@ class Eye(gof.Op): ...@@ -2738,7 +2738,7 @@ class Eye(gof.Op):
def perform(self, node, inp, out_): def perform(self, node, inp, out_):
n, m, k = inp n, m, k = inp
out, = out_ out, = out_
out[0] = numpy.eye(n, m, k, dtype=self.dtype) out[0] = np.eye(n, m, k, dtype=self.dtype)
def infer_shape(self, node, in_shapes): def infer_shape(self, node, in_shapes):
out_shape = [node.inputs[0], node.inputs[1]] out_shape = [node.inputs[0], node.inputs[1]]
...@@ -2853,9 +2853,9 @@ class Alloc(gof.Op): ...@@ -2853,9 +2853,9 @@ class Alloc(gof.Op):
sh = tuple([int(i) for i in inputs[1:]]) sh = tuple([int(i) for i in inputs[1:]])
if out[0] is None or out[0].shape != sh: if out[0] is None or out[0].shape != sh:
if v.size == 1 and v.item() == 0: if v.size == 1 and v.item() == 0:
out[0] = numpy.zeros(sh, dtype=v.dtype) out[0] = np.zeros(sh, dtype=v.dtype)
else: else:
out[0] = numpy.empty(sh, dtype=v.dtype) out[0] = np.empty(sh, dtype=v.dtype)
out[0][...] = v # broadcast v to fill us up out[0][...] = v # broadcast v to fill us up
else: else:
# reuse the allocated memory. # reuse the allocated memory.
...@@ -3139,7 +3139,7 @@ class Mean(elemwise.CAReduce): ...@@ -3139,7 +3139,7 @@ class Mean(elemwise.CAReduce):
axis = self.axis[0] axis = self.axis[0]
# numpy.asarray is needed as otherwise we can end up with a # numpy.asarray is needed as otherwise we can end up with a
# numpy scalar. # numpy scalar.
output[0] = numpy.asarray(numpy.mean(input, dtype='float64', output[0] = np.asarray(np.mean(input, dtype='float64',
axis=axis)) axis=axis))
def c_code(self, node, name, inames, onames, sub): def c_code(self, node, name, inames, onames, sub):
...@@ -3232,9 +3232,9 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False, ...@@ -3232,9 +3232,9 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False,
if axis is None: if axis is None:
axis = list(range(input.ndim)) axis = list(range(input.ndim))
elif isinstance(axis, (integer_types, numpy.integer)): elif isinstance(axis, (integer_types, np.integer)):
axis = [axis] axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0: elif isinstance(axis, np.ndarray) and axis.ndim == 0:
axis = [int(axis)] axis = [int(axis)]
else: else:
axis = [int(a) for a in axis] axis = [int(a) for a in axis]
...@@ -3291,9 +3291,9 @@ def var(input, axis=None, ddof=0, keepdims=False, corrected=False): ...@@ -3291,9 +3291,9 @@ def var(input, axis=None, ddof=0, keepdims=False, corrected=False):
input_ndim = input.type.ndim input_ndim = input.type.ndim
if axis is None: if axis is None:
axis = list(range(input_ndim)) axis = list(range(input_ndim))
elif isinstance(axis, (integer_types, numpy.integer)): elif isinstance(axis, (integer_types, np.integer)):
axis = [axis] axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0: elif isinstance(axis, np.ndarray) and axis.ndim == 0:
axis = [int(axis)] axis = [int(axis)]
else: else:
axis = [int(a) for a in axis] axis = [int(a) for a in axis]
...@@ -3617,7 +3617,7 @@ def batched_dot(a, b): ...@@ -3617,7 +3617,7 @@ def batched_dot(a, b):
return a * b.dimshuffle(*([0] + ["x"] * (a.ndim - 1))) return a * b.dimshuffle(*([0] + ["x"] * (a.ndim - 1)))
elif a.ndim > 3 or b.ndim > 3: elif a.ndim > 3 or b.ndim > 3:
return batched_tensordot( return batched_tensordot(
a, b, [[a.ndim - 1], [numpy.maximum(1, b.ndim - 2)]]) a, b, [[a.ndim - 1], [np.maximum(1, b.ndim - 2)]])
else: else:
# avoid circular import # avoid circular import
return theano.tensor.blas.BatchedDot()(a, b) return theano.tensor.blas.BatchedDot()(a, b)
...@@ -3736,9 +3736,9 @@ class Split(Op): ...@@ -3736,9 +3736,9 @@ class Split(Op):
raise ValueError('In Split.perform(), len(splits) != len_splits.', raise ValueError('In Split.perform(), len(splits) != len_splits.',
(len(splits), self.len_splits)) (len(splits), self.len_splits))
if numpy.sum(splits) != len_along_axis: if np.sum(splits) != len_along_axis:
raise ValueError('The splits sum to %s, expected %s' % raise ValueError('The splits sum to %s, expected %s' %
(numpy.sum(splits), len_along_axis)) (np.sum(splits), len_along_axis))
if python_any([nb < 0 for nb in splits]): if python_any([nb < 0 for nb in splits]):
raise ValueError('Split: you tried to make an ndarray with a ' raise ValueError('Split: you tried to make an ndarray with a '
'negative number of elements.') 'negative number of elements.')
...@@ -3828,8 +3828,8 @@ class Split(Op): ...@@ -3828,8 +3828,8 @@ class Split(Op):
outputs_pointers = '&' + (', &'.join(outputs)) outputs_pointers = '&' + (', &'.join(outputs))
x, axis, splits = inputs x, axis, splits = inputs
fail = sub['fail'] fail = sub['fail']
x_typenum = numpy.dtype(node.inputs[0].dtype).num x_typenum = np.dtype(node.inputs[0].dtype).num
x_itemsize = numpy.dtype(node.inputs[0].dtype).itemsize x_itemsize = np.dtype(node.inputs[0].dtype).itemsize
axis_dtype = node.inputs[1].type.dtype_specs()[1] axis_dtype = node.inputs[1].type.dtype_specs()[1]
splits_dtype = node.inputs[2].type.dtype_specs()[1] splits_dtype = node.inputs[2].type.dtype_specs()[1]
expected_splits_count = self.len_splits expected_splits_count = self.len_splits
...@@ -4187,7 +4187,7 @@ class Join(Op): ...@@ -4187,7 +4187,7 @@ class Join(Op):
view = self.view view = self.view
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:] axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
# we check these tensors for being empty. # we check these tensors for being empty.
if (view != -1) and numpy.all( if (view != -1) and np.all(
[tensor.shape[axis] == 0 for tensor in [tensor.shape[axis] == 0 for tensor in
tensors[0:view] + tensors[view + 1:]]): tensors[0:view] + tensors[view + 1:]]):
out[0] = tensors[view] out[0] = tensors[view]
...@@ -4198,7 +4198,7 @@ class Join(Op): ...@@ -4198,7 +4198,7 @@ class Join(Op):
raise IndexError("Join axis %d out of bounds [0, %d)" % raise IndexError("Join axis %d out of bounds [0, %d)" %
(axis, ndim)) (axis, ndim))
out[0] = theano._asarray(numpy.concatenate(tensors, axis=axis), out[0] = theano._asarray(np.concatenate(tensors, axis=axis),
dtype=node.outputs[0].type.dtype) dtype=node.outputs[0].type.dtype)
def c_code_cache_version(self): def c_code_cache_version(self):
...@@ -4584,9 +4584,9 @@ def stack(*tensors, **kwargs): ...@@ -4584,9 +4584,9 @@ def stack(*tensors, **kwargs):
# And DebugMode can't detect error in this code as it is not in an # And DebugMode can't detect error in this code as it is not in an
# optimization. # optimization.
# See ticket #660 # See ticket #660
if numpy.all( if np.all(
[ # in case there is direct int in tensors. [ # in case there is direct int in tensors.
isinstance(t, (numpy.number, float, integer_types, isinstance(t, (np.number, float, integer_types,
python_complex)) or python_complex)) or
(isinstance(t, Variable) and (isinstance(t, Variable) and
isinstance(t.type, TensorType) and isinstance(t.type, TensorType) and
...@@ -4669,7 +4669,7 @@ def get_vector_length(v): ...@@ -4669,7 +4669,7 @@ def get_vector_length(v):
v.owner.inputs, v.owner.op.idx_list)[0].step) v.owner.inputs, v.owner.op.idx_list)[0].step)
ndim = v.owner.inputs[0].owner.inputs[0].ndim ndim = v.owner.inputs[0].owner.inputs[0].ndim
types = (numbers.Integral, numpy.integer) types = (numbers.Integral, np.integer)
if start is None: if start is None:
start = 0 start = 0
elif isinstance(start, types) and start < 0: elif isinstance(start, types) and start < 0:
...@@ -4790,7 +4790,7 @@ class Reshape(Op): ...@@ -4790,7 +4790,7 @@ class Reshape(Op):
' length %i' ' length %i'
', should be %i' % (len(shp), self.ndim), shp) ', should be %i' % (len(shp), self.ndim), shp)
try: try:
out[0] = numpy.reshape(x, shp) out[0] = np.reshape(x, shp)
except Exception: except Exception:
raise ValueError('Cannot reshape input of shape %s to shape %s' % raise ValueError('Cannot reshape input of shape %s to shape %s' %
(x.shape, shp)) (x.shape, shp))
...@@ -4976,12 +4976,12 @@ class Flatten(Op): ...@@ -4976,12 +4976,12 @@ class Flatten(Op):
try: try:
out[0] = x.reshape(x.size) out[0] = x.reshape(x.size)
except AttributeError: except AttributeError:
out[0] = x.reshape((numpy.prod(x.shape),)) out[0] = x.reshape((np.prod(x.shape),))
elif outdim == len(x.shape): elif outdim == len(x.shape):
out[0] = x out[0] = x
else: else:
newshape = (x.shape[:outdim - 1] + newshape = (x.shape[:outdim - 1] +
(numpy.prod(x.shape[outdim - 1:]),)) (np.prod(x.shape[outdim - 1:]),))
out[0] = x.reshape(newshape) out[0] = x.reshape(newshape)
def infer_shape(self, node, in_shapes): def infer_shape(self, node, in_shapes):
...@@ -5196,16 +5196,16 @@ class Tile(Op): ...@@ -5196,16 +5196,16 @@ class Tile(Op):
def perform(self, node, inp, out_): def perform(self, node, inp, out_):
x, reps = inp x, reps = inp
out, = out_ out, = out_
res = numpy.tile(x, reps) res = np.tile(x, reps)
if res.ndim != self.ndim: if res.ndim != self.ndim:
raise ValueError( raise ValueError(
'Tile.perform produced incorrect number of dimensions') 'Tile.perform produced incorrect number of dimensions')
if (numpy.asarray(reps) == 1).all(): if (np.asarray(reps) == 1).all():
# In that case, some NumPy version return a view! As this # In that case, some NumPy version return a view! As this
# op isn't declared as inplace, we need to check that and # op isn't declared as inplace, we need to check that and
# copy the data. # copy the data.
if numpy.may_share_memory(res, x): if np.may_share_memory(res, x):
res = res.copy() res = res.copy()
out[0] = res out[0] = res
...@@ -5289,7 +5289,7 @@ def tile(x, reps, ndim=None): ...@@ -5289,7 +5289,7 @@ def tile(x, reps, ndim=None):
else: else:
if ndim is not None and len(reps) > ndim: if ndim is not None and len(reps) > ndim:
raise ValueError("len(reps) should be equal or less than ndim") raise ValueError("len(reps) should be equal or less than ndim")
if not numpy.all([isinstance(r, integer_types) or if not np.all([isinstance(r, integer_types) or
(isinstance(r, TensorVariable) and (isinstance(r, TensorVariable) and
r.dtype in theano.tensor.discrete_dtypes) for r in reps]): r.dtype in theano.tensor.discrete_dtypes) for r in reps]):
raise ValueError("elements of reps must be scalars of integer dtype") raise ValueError("elements of reps must be scalars of integer dtype")
...@@ -5305,7 +5305,7 @@ def tile(x, reps, ndim=None): ...@@ -5305,7 +5305,7 @@ def tile(x, reps, ndim=None):
shape = [1] * (ndim - x.ndim) + [x.shape[i] for i in xrange(x.ndim)] shape = [1] * (ndim - x.ndim) + [x.shape[i] for i in xrange(x.ndim)]
alloc_shape = reps + shape alloc_shape = reps + shape
y = alloc(x, *alloc_shape) y = alloc(x, *alloc_shape)
shuffle_ind = numpy.arange(ndim * 2).reshape(2, ndim) shuffle_ind = np.arange(ndim * 2).reshape(2, ndim)
shuffle_ind = shuffle_ind.transpose().flatten() shuffle_ind = shuffle_ind.transpose().flatten()
y = y.dimshuffle(*shuffle_ind) y = y.dimshuffle(*shuffle_ind)
new_shapes = [sh * reps[i] for i, sh in enumerate(shape)] new_shapes = [sh * reps[i] for i, sh in enumerate(shape)]
...@@ -5343,7 +5343,7 @@ class ARange(Op): ...@@ -5343,7 +5343,7 @@ class ARange(Op):
def is_constant_value(var, value): def is_constant_value(var, value):
try: try:
v = get_scalar_constant_value(var) v = get_scalar_constant_value(var)
return numpy.all(v == value) return np.all(v == value)
except NotScalarConstantError: except NotScalarConstantError:
pass pass
return False return False
...@@ -5378,7 +5378,7 @@ class ARange(Op): ...@@ -5378,7 +5378,7 @@ class ARange(Op):
start = start.item() start = start.item()
stop = stop.item() stop = stop.item()
step = step.item() step = step.item()
out[0] = numpy.arange(start, stop, step, dtype=self.dtype) out[0] = np.arange(start, stop, step, dtype=self.dtype)
def connection_pattern(self, node): def connection_pattern(self, node):
...@@ -5424,10 +5424,10 @@ def arange(start, stop=None, step=1, dtype=None): ...@@ -5424,10 +5424,10 @@ def arange(start, stop=None, step=1, dtype=None):
# As an example, if `start`, `stop` and `step` are all int32, # As an example, if `start`, `stop` and `step` are all int32,
# `numpy.arange` returns an int64 array (on 64-bit platforms), # `numpy.arange` returns an int64 array (on 64-bit platforms),
# while the upcast above returns int32. # while the upcast above returns int32.
numpy_dtype = numpy.arange( numpy_dtype = np.arange(
start=numpy.array(0, dtype=start.dtype), start=np.array(0, dtype=start.dtype),
stop=numpy.array(1, dtype=stop.dtype), stop=np.array(1, dtype=stop.dtype),
step=numpy.array(1, dtype=step.dtype)).dtype step=np.array(1, dtype=step.dtype)).dtype
if numpy_dtype != dtype: if numpy_dtype != dtype:
if (config.cast_policy == 'numpy+floatX' and if (config.cast_policy == 'numpy+floatX' and
config.floatX == 'float32' and config.floatX == 'float32' and
...@@ -5653,7 +5653,7 @@ class PermuteRowElements(Op): ...@@ -5653,7 +5653,7 @@ class PermuteRowElements(Op):
out_s.append(outdim) out_s.append(outdim)
if outs[0] is None or outs[0].shape != out_s: if outs[0] is None or outs[0].shape != out_s:
outs[0] = numpy.empty(out_s, dtype=x.dtype) outs[0] = np.empty(out_s, dtype=x.dtype)
self._rec_perform(node, x, y, inverse, outs[0], curdim=0) self._rec_perform(node, x, y, inverse, outs[0], curdim=0)
...@@ -5796,7 +5796,7 @@ class Dot(Op): ...@@ -5796,7 +5796,7 @@ class Dot(Op):
# the asarray is here because dot between two vectors # the asarray is here because dot between two vectors
# gives a numpy float object but we need to return a 0d # gives a numpy float object but we need to return a 0d
# ndarray # ndarray
z[0] = numpy.asarray(numpy.dot(x, y)) z[0] = np.asarray(np.dot(x, y))
def grad(self, inp, grads): def grad(self, inp, grads):
...@@ -5976,7 +5976,7 @@ def dot(a, b): ...@@ -5976,7 +5976,7 @@ def dot(a, b):
if a.ndim == 0 or b.ndim == 0: if a.ndim == 0 or b.ndim == 0:
return a * b return a * b
elif a.ndim > 2 or b.ndim > 2: elif a.ndim > 2 or b.ndim > 2:
return tensordot(a, b, [[a.ndim - 1], [numpy.maximum(0, b.ndim - 2)]]) return tensordot(a, b, [[a.ndim - 1], [np.maximum(0, b.ndim - 2)]])
else: else:
return _dot(a, b) return _dot(a, b)
...@@ -6012,14 +6012,14 @@ def _tensordot_as_dot(a, b, axes, dot, batched): ...@@ -6012,14 +6012,14 @@ def _tensordot_as_dot(a, b, axes, dot, batched):
""" """
a, b = as_tensor_variable(a), as_tensor_variable(b) a, b = as_tensor_variable(a), as_tensor_variable(b)
if not numpy.isscalar(axes) and len(axes) != 2: if not np.isscalar(axes) and len(axes) != 2:
raise ValueError('Axes should be an integer or a ' raise ValueError('Axes should be an integer or a '
'list/tuple of len 2 (%s was provided)' 'list/tuple of len 2 (%s was provided)'
% str(axes)) % str(axes))
# if 'axes' is a number of axes to multiply and sum over (trailing axes # if 'axes' is a number of axes to multiply and sum over (trailing axes
# of a, leading axes of b), we can just reshape and use dot. # of a, leading axes of b), we can just reshape and use dot.
elif numpy.isscalar(axes): elif np.isscalar(axes):
axes = int(axes) axes = int(axes)
for operand_name, operand in (("a", a), ("b", b)): for operand_name, operand in (("a", a), ("b", b)):
...@@ -6083,12 +6083,12 @@ def _tensordot_as_dot(a, b, axes, dot, batched): ...@@ -6083,12 +6083,12 @@ def _tensordot_as_dot(a, b, axes, dot, batched):
'the dimensions of %s (%s.ndim=%i, len(axes[0])=%i).' % 'the dimensions of %s (%s.ndim=%i, len(axes[0])=%i).' %
(i, operand_name, operand_name, operand.ndim, (i, operand_name, operand_name, operand.ndim,
len(axes[i]))) len(axes[i])))
if len(axes[i]) > 0 and numpy.max(axes[i]) >= operand.ndim: if len(axes[i]) > 0 and np.max(axes[i]) >= operand.ndim:
raise ValueError( raise ValueError(
'axes[%i] contains dimensions greater than or equal ' 'axes[%i] contains dimensions greater than or equal '
'to %s.ndim (%s.ndim=%i, max(axes[0])=%i).' % 'to %s.ndim (%s.ndim=%i, max(axes[0])=%i).' %
(i, operand_name, operand_name, operand.ndim, (i, operand_name, operand_name, operand.ndim,
numpy.max(numpy.array(axes[i])))) np.max(np.array(axes[i]))))
if batched and 0 in axes[i]: if batched and 0 in axes[i]:
raise ValueError( raise ValueError(
'axes to sum over must not contain the batch axis ' 'axes to sum over must not contain the batch axis '
...@@ -6243,8 +6243,8 @@ def all(x, axis=None, keepdims=False): ...@@ -6243,8 +6243,8 @@ def all(x, axis=None, keepdims=False):
# Some NumPy version like 1.9.2 return a view for numpy.diagonal # Some NumPy version like 1.9.2 return a view for numpy.diagonal
x = numpy.zeros((4, 4)) x = np.zeros((4, 4))
numpy_diagonal_return_view = numpy.may_share_memory(numpy.diagonal(x), x) numpy_diagonal_return_view = np.may_share_memory(np.diagonal(x), x)
del x del x
...@@ -6271,7 +6271,7 @@ class ExtractDiag(Op): ...@@ -6271,7 +6271,7 @@ class ExtractDiag(Op):
"set to True but numpy version %s and prior versions of " "set to True but numpy version %s and prior versions of "
"numpy.diagonal() do not return a view. Update " "numpy.diagonal() do not return a view. Update "
"numpy to use ExtractDiag(view=True)" % "numpy to use ExtractDiag(view=True)" %
numpy.version.version) np.version.version)
self.view = False self.view = False
if self.view: if self.view:
self.view_map = {0: [0]} self.view_map = {0: [0]}
...@@ -6395,7 +6395,7 @@ class AllocDiag(Op): ...@@ -6395,7 +6395,7 @@ class AllocDiag(Op):
def perform(self, node, inputs, outputs): def perform(self, node, inputs, outputs):
(z,) = outputs (z,) = outputs
z[0] = numpy.diag(inputs[0], self.offset) z[0] = np.diag(inputs[0], self.offset)
def grad(self, inputs, gout): def grad(self, inputs, gout):
(gz,) = gout (gz,) = gout
...@@ -6629,7 +6629,7 @@ class Choose(Op): ...@@ -6629,7 +6629,7 @@ class Choose(Op):
choice = as_tensor_variable(choices) choice = as_tensor_variable(choices)
choice_ndim = choice.ndim - 1 choice_ndim = choice.ndim - 1
choice_bcast = choice.broadcastable[1:] choice_bcast = choice.broadcastable[1:]
out_ndim = numpy.max([a.ndim, choice_ndim]) out_ndim = np.max([a.ndim, choice_ndim])
# Make explicit all added broadcastable dimensions. # Make explicit all added broadcastable dimensions.
a = shape_padleft(a, out_ndim - a.ndim) a = shape_padleft(a, out_ndim - a.ndim)
...@@ -6660,7 +6660,7 @@ class Choose(Op): ...@@ -6660,7 +6660,7 @@ class Choose(Op):
a = inputs[0] a = inputs[0]
choice = inputs[1] choice = inputs[1]
# TODO reuse out? # TODO reuse out?
z[0] = numpy.choose(a, choice, mode=self.mode) z[0] = np.choose(a, choice, mode=self.mode)
class AllocEmpty(gof.Op): class AllocEmpty(gof.Op):
...@@ -6699,7 +6699,7 @@ class AllocEmpty(gof.Op): ...@@ -6699,7 +6699,7 @@ class AllocEmpty(gof.Op):
out, = out_ out, = out_
sh = tuple([int(i) for i in inputs]) sh = tuple([int(i) for i in inputs])
if out[0] is None or out[0].shape != sh: if out[0] is None or out[0].shape != sh:
out[0] = numpy.empty(sh, dtype=self.dtype) out[0] = np.empty(sh, dtype=self.dtype)
def c_code(self, node, name, inputs, out_, sub): def c_code(self, node, name, inputs, out_, sub):
dtype = "NPY_" + self.dtype.upper() dtype = "NPY_" + self.dtype.upper()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论