提交 e5d09827 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #5853 from Amrithasuresh/master

Updated numpy as np #4218
......@@ -134,7 +134,7 @@ if (config.device.startswith('cuda') or
import theano.gpuarray
# Use config.numpy to call numpy.seterr
import numpy
import numpy as np
if config.numpy.seterr_all == 'None':
_all = None
......@@ -156,7 +156,7 @@ if config.numpy.seterr_invalid == 'None':
_invalid = None
else:
_invalid = config.numpy.seterr_invalid
numpy.seterr(
np.seterr(
all=_all,
divide=_divide,
over=_over,
......
......@@ -3,7 +3,7 @@ import errno
import os
import sys
import logging
import numpy
import numpy as np
import platform
import textwrap
import re
......@@ -1052,7 +1052,7 @@ AddConfigVar('profiling.ignore_first_call',
AddConfigVar('optdb.position_cutoff',
'Where to stop eariler during optimization. It represent the'
' position of the optimizer where to stop.',
FloatParam(numpy.inf),
FloatParam(np.inf),
in_c_key=False)
AddConfigVar('optdb.max_use_ratio',
......@@ -1106,11 +1106,11 @@ def default_blas_ldflags():
global numpy
warn_record = []
try:
if (hasattr(numpy.distutils, '__config__') and
numpy.distutils.__config__):
if (hasattr(np.distutils, '__config__') and
np.distutils.__config__):
# If the old private interface is available use it as it
# don't print information to the user.
blas_info = numpy.distutils.__config__.blas_opt_info
blas_info = np.distutils.__config__.blas_opt_info
else:
# We do this import only here, as in some setup, if we
# just import theano and exit, with the import at global
......@@ -1494,7 +1494,7 @@ compiledir_format_dict = {
"python_bitwidth": local_bitwidth(),
"python_int_bitwidth": python_int_bitwidth(),
"theano_version": theano.__version__,
"numpy_version": numpy.__version__,
"numpy_version": np.__version__,
"gxx_version": gcc_version_str.replace(" ", "_"),
"hostname": socket.gethostname()}
......
from __future__ import absolute_import, print_function, division
import theano
import numpy
import numpy as np
from unittest import TestCase
from theano.gof import Op, COp, Apply
from theano import Generic
......@@ -121,21 +121,21 @@ class TestParamsType(TestCase):
npy_scalar=TensorType('float64', tuple()))
wp2 = ParamsType(a=Generic(), array=TensorType('int64', (False,)), floatting=Scalar('float64'),
npy_scalar=TensorType('float64', tuple()))
w1 = Params(wp1, a=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
w2 = Params(wp2, a=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
w1 = Params(wp1, a=1, array=np.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=np.asarray(12))
w2 = Params(wp2, a=1, array=np.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=np.asarray(12))
assert w1 == w2
assert not (w1 != w2)
assert hash(w1) == hash(w2)
# Changing attributes names only (a -> other_name).
wp2_other = ParamsType(other_name=Generic(), array=TensorType('int64', (False,)), floatting=Scalar('float64'),
npy_scalar=TensorType('float64', tuple()))
w2 = Params(wp2_other, other_name=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
w2 = Params(wp2_other, other_name=1, array=np.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=np.asarray(12))
assert w1 != w2
# Changing attributes values only (now a=2).
w2 = Params(wp2, a=2, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
w2 = Params(wp2, a=2, array=np.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=np.asarray(12))
assert w1 != w2
# Changing NumPy array values (5 -> -5).
w2 = Params(wp2, a=1, array=numpy.asarray([1, 2, 4, -5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
w2 = Params(wp2, a=1, array=np.asarray([1, 2, 4, -5, 7]), floatting=-4.5, npy_scalar=np.asarray(12))
assert w1 != w2
def test_hash_and_eq_params_type(self):
......@@ -168,7 +168,7 @@ class TestParamsType(TestCase):
def test_params_type_filtering(self):
shape_tensor5 = (1, 2, 2, 3, 2)
size_tensor5 = shape_tensor5[0] * shape_tensor5[1] * shape_tensor5[2] * shape_tensor5[3] * shape_tensor5[4]
random_tensor = numpy.random.normal(size=size_tensor5).reshape(shape_tensor5)
random_tensor = np.random.normal(size=size_tensor5).reshape(shape_tensor5)
w = ParamsType(a1=TensorType('int32', (False, False)),
a2=TensorType('float64', (False, False, False, False, False)),
......@@ -176,7 +176,7 @@ class TestParamsType(TestCase):
# With a value that does not match the params type.
o = Params(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int64'),
a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int64'),
a2=random_tensor.astype('float32'),
a3=2000)
# should fail (o.a1 is not int32, o.a2 is not float64)
......@@ -188,7 +188,7 @@ class TestParamsType(TestCase):
# With a value that matches the params type.
o1 = Params(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a2=random_tensor.astype('float64'),
a3=2000)
# All should pass.
......@@ -198,7 +198,7 @@ class TestParamsType(TestCase):
# Check values_eq and values_eq_approx.
o2 = Params(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a2=random_tensor.astype('float64'),
a3=2000)
assert w.values_eq(o1, o2)
......@@ -208,7 +208,7 @@ class TestParamsType(TestCase):
# NB: I don't know exactly which kind of differences is rejected by values_eq but accepted by values_eq_approx.
# So, I just play a little with float values.
o3 = Params(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a2=(random_tensor.astype('float32') * 10 / 2.2 * 2.19999999999 / 10).astype('float64'),
a3=2000.0 - 0.00000000000000001)
assert w.values_eq_approx(o1, o3)
......@@ -250,7 +250,7 @@ class TestParamsType(TestCase):
f1 = theano.function([x], y1)
f2 = theano.function([x], y2)
shape = (100, 100)
vx = numpy.random.normal(size=shape[0] * shape[1]).astype('float64').reshape(*shape)
vx = np.random.normal(size=shape[0] * shape[1]).astype('float64').reshape(*shape)
vy1 = f1(vx)
vy2 = f2(vx)
ref = a * (vx**2) + b * vx + c
......
......@@ -7,7 +7,7 @@ http://www.iro.umontreal.ca/~simardr/ssj/indexe.html
"""
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
from theano import Apply, tensor
from theano.gof import local_optimizer
......@@ -170,7 +170,7 @@ class GPUA_mrg_uniform(GpuKernelBase, mrg_uniform_base):
o_rstate, o_sample = out
inplace = int(self.inplace)
ndim = self.output_type.ndim
o_type_num = numpy.asarray(0, dtype=self.output_type.dtype).dtype.num
o_type_num = np.asarray(0, dtype=self.output_type.dtype).dtype.num
fail = sub['fail']
ctx = sub['params']
kname = self.gpu_kernels(node, nodename)[0].objvar
......
差异被折叠。
......@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
# Definitions of theano.scalar ops that have their python implementation taken
# from SciPy. As SciPy is not always available, we treat them separately.
import numpy
import numpy as np
import theano
from theano.gradient import grad_not_implemented
......@@ -43,8 +43,8 @@ class Erf(UnaryScalarOp):
else:
return [x.zeros_like()]
cst = numpy.asarray(2. / numpy.sqrt(numpy.pi),
dtype=upcast(x.type.dtype, gz.type.dtype))
cst = np.asarray(2. / np.sqrt(np.pi),
dtype=upcast(x.type.dtype, gz.type.dtype))
return gz * cst * exp(-x * x),
def c_code(self, node, name, inp, out, sub):
......@@ -74,8 +74,8 @@ class Erfc(UnaryScalarOp):
else:
return [x.zeros_like()]
cst = numpy.asarray(2. / numpy.sqrt(numpy.pi),
dtype=upcast(x.type.dtype, gz.type.dtype))
cst = np.asarray(2. / np.sqrt(np.pi),
dtype=upcast(x.type.dtype, gz.type.dtype))
return - gz * cst * exp(-x * x),
def c_code(self, node, name, inp, out, sub):
......@@ -120,8 +120,8 @@ class Erfcx(UnaryScalarOp):
else:
return [x.zeros_like()]
cst = numpy.asarray(2. / numpy.sqrt(numpy.pi),
dtype=upcast(x.type.dtype, gz.type.dtype))
cst = np.asarray(2. / np.sqrt(np.pi),
dtype=upcast(x.type.dtype, gz.type.dtype))
return gz * (-cst + (2. * x) * erfcx(x)),
erfcx = Erfcx(upgrade_to_float_no_complex, name='erfcx')
......@@ -155,8 +155,8 @@ class Erfinv(UnaryScalarOp):
else:
return [x.zeros_like()]
cst = numpy.asarray(numpy.sqrt(numpy.pi) / 2.,
dtype=upcast(x.type.dtype, gz.type.dtype))
cst = np.asarray(np.sqrt(np.pi) / 2.,
dtype=upcast(x.type.dtype, gz.type.dtype))
return gz * cst * exp(erfinv(x) ** 2),
# TODO: erfinv() is not provided by the C standard library
......@@ -188,8 +188,8 @@ class Erfcinv(UnaryScalarOp):
else:
return [x.zeros_like()]
cst = numpy.asarray(numpy.sqrt(numpy.pi) / 2.,
dtype=upcast(x.type.dtype, gz.type.dtype))
cst = np.asarray(np.sqrt(np.pi) / 2.,
dtype=upcast(x.type.dtype, gz.type.dtype))
return - gz * cst * exp(erfcinv(x) ** 2),
# TODO: erfcinv() is not provided by the C standard library
......
......@@ -16,7 +16,7 @@ way (as scan does) to create a shared variable of this kind.
"""
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
from six import integer_types
from theano.compile import SharedVariable
......@@ -48,15 +48,15 @@ def shared(value, name=None, strict=False, allow_downcast=None):
We implement this using 0-d tensors for now.
"""
if not isinstance(value, (numpy.number, float, integer_types, complex)):
if not isinstance(value, (np.number, float, integer_types, complex)):
raise TypeError()
try:
dtype = value.dtype
except AttributeError:
dtype = numpy.asarray(value).dtype
dtype = np.asarray(value).dtype
dtype = str(dtype)
value = getattr(numpy, dtype)(value)
value = getattr(np, dtype)(value)
scalar_type = Scalar(dtype=dtype)
rval = ScalarSharedVariable(
type=scalar_type,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论