提交 e5d09827 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #5853 from Amrithasuresh/master

Updated numpy as np #4218
...@@ -134,7 +134,7 @@ if (config.device.startswith('cuda') or ...@@ -134,7 +134,7 @@ if (config.device.startswith('cuda') or
import theano.gpuarray import theano.gpuarray
# Use config.numpy to call numpy.seterr # Use config.numpy to call numpy.seterr
import numpy import numpy as np
if config.numpy.seterr_all == 'None': if config.numpy.seterr_all == 'None':
_all = None _all = None
...@@ -156,7 +156,7 @@ if config.numpy.seterr_invalid == 'None': ...@@ -156,7 +156,7 @@ if config.numpy.seterr_invalid == 'None':
_invalid = None _invalid = None
else: else:
_invalid = config.numpy.seterr_invalid _invalid = config.numpy.seterr_invalid
numpy.seterr( np.seterr(
all=_all, all=_all,
divide=_divide, divide=_divide,
over=_over, over=_over,
......
...@@ -3,7 +3,7 @@ import errno ...@@ -3,7 +3,7 @@ import errno
import os import os
import sys import sys
import logging import logging
import numpy import numpy as np
import platform import platform
import textwrap import textwrap
import re import re
...@@ -1052,7 +1052,7 @@ AddConfigVar('profiling.ignore_first_call', ...@@ -1052,7 +1052,7 @@ AddConfigVar('profiling.ignore_first_call',
AddConfigVar('optdb.position_cutoff', AddConfigVar('optdb.position_cutoff',
'Where to stop eariler during optimization. It represent the' 'Where to stop eariler during optimization. It represent the'
' position of the optimizer where to stop.', ' position of the optimizer where to stop.',
FloatParam(numpy.inf), FloatParam(np.inf),
in_c_key=False) in_c_key=False)
AddConfigVar('optdb.max_use_ratio', AddConfigVar('optdb.max_use_ratio',
...@@ -1106,11 +1106,11 @@ def default_blas_ldflags(): ...@@ -1106,11 +1106,11 @@ def default_blas_ldflags():
global numpy global numpy
warn_record = [] warn_record = []
try: try:
if (hasattr(numpy.distutils, '__config__') and if (hasattr(np.distutils, '__config__') and
numpy.distutils.__config__): np.distutils.__config__):
# If the old private interface is available use it as it # If the old private interface is available use it as it
# don't print information to the user. # don't print information to the user.
blas_info = numpy.distutils.__config__.blas_opt_info blas_info = np.distutils.__config__.blas_opt_info
else: else:
# We do this import only here, as in some setup, if we # We do this import only here, as in some setup, if we
# just import theano and exit, with the import at global # just import theano and exit, with the import at global
...@@ -1494,7 +1494,7 @@ compiledir_format_dict = { ...@@ -1494,7 +1494,7 @@ compiledir_format_dict = {
"python_bitwidth": local_bitwidth(), "python_bitwidth": local_bitwidth(),
"python_int_bitwidth": python_int_bitwidth(), "python_int_bitwidth": python_int_bitwidth(),
"theano_version": theano.__version__, "theano_version": theano.__version__,
"numpy_version": numpy.__version__, "numpy_version": np.__version__,
"gxx_version": gcc_version_str.replace(" ", "_"), "gxx_version": gcc_version_str.replace(" ", "_"),
"hostname": socket.gethostname()} "hostname": socket.gethostname()}
......
from __future__ import absolute_import, print_function, division from __future__ import absolute_import, print_function, division
import theano import theano
import numpy import numpy as np
from unittest import TestCase from unittest import TestCase
from theano.gof import Op, COp, Apply from theano.gof import Op, COp, Apply
from theano import Generic from theano import Generic
...@@ -121,21 +121,21 @@ class TestParamsType(TestCase): ...@@ -121,21 +121,21 @@ class TestParamsType(TestCase):
npy_scalar=TensorType('float64', tuple())) npy_scalar=TensorType('float64', tuple()))
wp2 = ParamsType(a=Generic(), array=TensorType('int64', (False,)), floatting=Scalar('float64'), wp2 = ParamsType(a=Generic(), array=TensorType('int64', (False,)), floatting=Scalar('float64'),
npy_scalar=TensorType('float64', tuple())) npy_scalar=TensorType('float64', tuple()))
w1 = Params(wp1, a=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12)) w1 = Params(wp1, a=1, array=np.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=np.asarray(12))
w2 = Params(wp2, a=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12)) w2 = Params(wp2, a=1, array=np.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=np.asarray(12))
assert w1 == w2 assert w1 == w2
assert not (w1 != w2) assert not (w1 != w2)
assert hash(w1) == hash(w2) assert hash(w1) == hash(w2)
# Changing attributes names only (a -> other_name). # Changing attributes names only (a -> other_name).
wp2_other = ParamsType(other_name=Generic(), array=TensorType('int64', (False,)), floatting=Scalar('float64'), wp2_other = ParamsType(other_name=Generic(), array=TensorType('int64', (False,)), floatting=Scalar('float64'),
npy_scalar=TensorType('float64', tuple())) npy_scalar=TensorType('float64', tuple()))
w2 = Params(wp2_other, other_name=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12)) w2 = Params(wp2_other, other_name=1, array=np.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=np.asarray(12))
assert w1 != w2 assert w1 != w2
# Changing attributes values only (now a=2). # Changing attributes values only (now a=2).
w2 = Params(wp2, a=2, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12)) w2 = Params(wp2, a=2, array=np.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=np.asarray(12))
assert w1 != w2 assert w1 != w2
# Changing NumPy array values (5 -> -5). # Changing NumPy array values (5 -> -5).
w2 = Params(wp2, a=1, array=numpy.asarray([1, 2, 4, -5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12)) w2 = Params(wp2, a=1, array=np.asarray([1, 2, 4, -5, 7]), floatting=-4.5, npy_scalar=np.asarray(12))
assert w1 != w2 assert w1 != w2
def test_hash_and_eq_params_type(self): def test_hash_and_eq_params_type(self):
...@@ -168,7 +168,7 @@ class TestParamsType(TestCase): ...@@ -168,7 +168,7 @@ class TestParamsType(TestCase):
def test_params_type_filtering(self): def test_params_type_filtering(self):
shape_tensor5 = (1, 2, 2, 3, 2) shape_tensor5 = (1, 2, 2, 3, 2)
size_tensor5 = shape_tensor5[0] * shape_tensor5[1] * shape_tensor5[2] * shape_tensor5[3] * shape_tensor5[4] size_tensor5 = shape_tensor5[0] * shape_tensor5[1] * shape_tensor5[2] * shape_tensor5[3] * shape_tensor5[4]
random_tensor = numpy.random.normal(size=size_tensor5).reshape(shape_tensor5) random_tensor = np.random.normal(size=size_tensor5).reshape(shape_tensor5)
w = ParamsType(a1=TensorType('int32', (False, False)), w = ParamsType(a1=TensorType('int32', (False, False)),
a2=TensorType('float64', (False, False, False, False, False)), a2=TensorType('float64', (False, False, False, False, False)),
...@@ -176,7 +176,7 @@ class TestParamsType(TestCase): ...@@ -176,7 +176,7 @@ class TestParamsType(TestCase):
# With a value that does not match the params type. # With a value that does not match the params type.
o = Params(w, o = Params(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int64'), a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int64'),
a2=random_tensor.astype('float32'), a2=random_tensor.astype('float32'),
a3=2000) a3=2000)
# should fail (o.a1 is not int32, o.a2 is not float64) # should fail (o.a1 is not int32, o.a2 is not float64)
...@@ -188,7 +188,7 @@ class TestParamsType(TestCase): ...@@ -188,7 +188,7 @@ class TestParamsType(TestCase):
# With a value that matches the params type. # With a value that matches the params type.
o1 = Params(w, o1 = Params(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'), a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a2=random_tensor.astype('float64'), a2=random_tensor.astype('float64'),
a3=2000) a3=2000)
# All should pass. # All should pass.
...@@ -198,7 +198,7 @@ class TestParamsType(TestCase): ...@@ -198,7 +198,7 @@ class TestParamsType(TestCase):
# Check values_eq and values_eq_approx. # Check values_eq and values_eq_approx.
o2 = Params(w, o2 = Params(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'), a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a2=random_tensor.astype('float64'), a2=random_tensor.astype('float64'),
a3=2000) a3=2000)
assert w.values_eq(o1, o2) assert w.values_eq(o1, o2)
...@@ -208,7 +208,7 @@ class TestParamsType(TestCase): ...@@ -208,7 +208,7 @@ class TestParamsType(TestCase):
# NB: I don't know exactly which kind of differences is rejected by values_eq but accepted by values_eq_approx. # NB: I don't know exactly which kind of differences is rejected by values_eq but accepted by values_eq_approx.
# So, I just play a little with float values. # So, I just play a little with float values.
o3 = Params(w, o3 = Params(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'), a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a2=(random_tensor.astype('float32') * 10 / 2.2 * 2.19999999999 / 10).astype('float64'), a2=(random_tensor.astype('float32') * 10 / 2.2 * 2.19999999999 / 10).astype('float64'),
a3=2000.0 - 0.00000000000000001) a3=2000.0 - 0.00000000000000001)
assert w.values_eq_approx(o1, o3) assert w.values_eq_approx(o1, o3)
...@@ -250,7 +250,7 @@ class TestParamsType(TestCase): ...@@ -250,7 +250,7 @@ class TestParamsType(TestCase):
f1 = theano.function([x], y1) f1 = theano.function([x], y1)
f2 = theano.function([x], y2) f2 = theano.function([x], y2)
shape = (100, 100) shape = (100, 100)
vx = numpy.random.normal(size=shape[0] * shape[1]).astype('float64').reshape(*shape) vx = np.random.normal(size=shape[0] * shape[1]).astype('float64').reshape(*shape)
vy1 = f1(vx) vy1 = f1(vx)
vy2 = f2(vx) vy2 = f2(vx)
ref = a * (vx**2) + b * vx + c ref = a * (vx**2) + b * vx + c
......
...@@ -7,7 +7,7 @@ http://www.iro.umontreal.ca/~simardr/ssj/indexe.html ...@@ -7,7 +7,7 @@ http://www.iro.umontreal.ca/~simardr/ssj/indexe.html
""" """
from __future__ import absolute_import, print_function, division from __future__ import absolute_import, print_function, division
import numpy import numpy as np
from theano import Apply, tensor from theano import Apply, tensor
from theano.gof import local_optimizer from theano.gof import local_optimizer
...@@ -170,7 +170,7 @@ class GPUA_mrg_uniform(GpuKernelBase, mrg_uniform_base): ...@@ -170,7 +170,7 @@ class GPUA_mrg_uniform(GpuKernelBase, mrg_uniform_base):
o_rstate, o_sample = out o_rstate, o_sample = out
inplace = int(self.inplace) inplace = int(self.inplace)
ndim = self.output_type.ndim ndim = self.output_type.ndim
o_type_num = numpy.asarray(0, dtype=self.output_type.dtype).dtype.num o_type_num = np.asarray(0, dtype=self.output_type.dtype).dtype.num
fail = sub['fail'] fail = sub['fail']
ctx = sub['params'] ctx = sub['params']
kname = self.gpu_kernels(node, nodename)[0].objvar kname = self.gpu_kernels(node, nodename)[0].objvar
......
差异被折叠。
...@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division ...@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
# Definitions of theano.scalar ops that have their python implementation taken # Definitions of theano.scalar ops that have their python implementation taken
# from SciPy. As SciPy is not always available, we treat them separately. # from SciPy. As SciPy is not always available, we treat them separately.
import numpy import numpy as np
import theano import theano
from theano.gradient import grad_not_implemented from theano.gradient import grad_not_implemented
...@@ -43,8 +43,8 @@ class Erf(UnaryScalarOp): ...@@ -43,8 +43,8 @@ class Erf(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
cst = numpy.asarray(2. / numpy.sqrt(numpy.pi), cst = np.asarray(2. / np.sqrt(np.pi),
dtype=upcast(x.type.dtype, gz.type.dtype)) dtype=upcast(x.type.dtype, gz.type.dtype))
return gz * cst * exp(-x * x), return gz * cst * exp(-x * x),
def c_code(self, node, name, inp, out, sub): def c_code(self, node, name, inp, out, sub):
...@@ -74,8 +74,8 @@ class Erfc(UnaryScalarOp): ...@@ -74,8 +74,8 @@ class Erfc(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
cst = numpy.asarray(2. / numpy.sqrt(numpy.pi), cst = np.asarray(2. / np.sqrt(np.pi),
dtype=upcast(x.type.dtype, gz.type.dtype)) dtype=upcast(x.type.dtype, gz.type.dtype))
return - gz * cst * exp(-x * x), return - gz * cst * exp(-x * x),
def c_code(self, node, name, inp, out, sub): def c_code(self, node, name, inp, out, sub):
...@@ -120,8 +120,8 @@ class Erfcx(UnaryScalarOp): ...@@ -120,8 +120,8 @@ class Erfcx(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
cst = numpy.asarray(2. / numpy.sqrt(numpy.pi), cst = np.asarray(2. / np.sqrt(np.pi),
dtype=upcast(x.type.dtype, gz.type.dtype)) dtype=upcast(x.type.dtype, gz.type.dtype))
return gz * (-cst + (2. * x) * erfcx(x)), return gz * (-cst + (2. * x) * erfcx(x)),
erfcx = Erfcx(upgrade_to_float_no_complex, name='erfcx') erfcx = Erfcx(upgrade_to_float_no_complex, name='erfcx')
...@@ -155,8 +155,8 @@ class Erfinv(UnaryScalarOp): ...@@ -155,8 +155,8 @@ class Erfinv(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
cst = numpy.asarray(numpy.sqrt(numpy.pi) / 2., cst = np.asarray(np.sqrt(np.pi) / 2.,
dtype=upcast(x.type.dtype, gz.type.dtype)) dtype=upcast(x.type.dtype, gz.type.dtype))
return gz * cst * exp(erfinv(x) ** 2), return gz * cst * exp(erfinv(x) ** 2),
# TODO: erfinv() is not provided by the C standard library # TODO: erfinv() is not provided by the C standard library
...@@ -188,8 +188,8 @@ class Erfcinv(UnaryScalarOp): ...@@ -188,8 +188,8 @@ class Erfcinv(UnaryScalarOp):
else: else:
return [x.zeros_like()] return [x.zeros_like()]
cst = numpy.asarray(numpy.sqrt(numpy.pi) / 2., cst = np.asarray(np.sqrt(np.pi) / 2.,
dtype=upcast(x.type.dtype, gz.type.dtype)) dtype=upcast(x.type.dtype, gz.type.dtype))
return - gz * cst * exp(erfcinv(x) ** 2), return - gz * cst * exp(erfcinv(x) ** 2),
# TODO: erfcinv() is not provided by the C standard library # TODO: erfcinv() is not provided by the C standard library
......
...@@ -16,7 +16,7 @@ way (as scan does) to create a shared variable of this kind. ...@@ -16,7 +16,7 @@ way (as scan does) to create a shared variable of this kind.
""" """
from __future__ import absolute_import, print_function, division from __future__ import absolute_import, print_function, division
import numpy import numpy as np
from six import integer_types from six import integer_types
from theano.compile import SharedVariable from theano.compile import SharedVariable
...@@ -48,15 +48,15 @@ def shared(value, name=None, strict=False, allow_downcast=None): ...@@ -48,15 +48,15 @@ def shared(value, name=None, strict=False, allow_downcast=None):
We implement this using 0-d tensors for now. We implement this using 0-d tensors for now.
""" """
if not isinstance(value, (numpy.number, float, integer_types, complex)): if not isinstance(value, (np.number, float, integer_types, complex)):
raise TypeError() raise TypeError()
try: try:
dtype = value.dtype dtype = value.dtype
except AttributeError: except AttributeError:
dtype = numpy.asarray(value).dtype dtype = np.asarray(value).dtype
dtype = str(dtype) dtype = str(dtype)
value = getattr(numpy, dtype)(value) value = getattr(np, dtype)(value)
scalar_type = Scalar(dtype=dtype) scalar_type = Scalar(dtype=dtype)
rval = ScalarSharedVariable( rval = ScalarSharedVariable(
type=scalar_type, type=scalar_type,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论