提交 28df78fb authored 作者: amrithasuresh's avatar amrithasuresh

Updated numpy as np

上级 85019057
...@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division ...@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
import logging import logging
import warnings import warnings
import numpy import numpy as np
import theano import theano
from theano import config from theano import config
...@@ -50,7 +50,7 @@ class TensorType(Type): ...@@ -50,7 +50,7 @@ class TensorType(Type):
self.broadcastable = tuple(bool(b) for b in broadcastable) self.broadcastable = tuple(bool(b) for b in broadcastable)
self.dtype_specs() # error checking is done there self.dtype_specs() # error checking is done there
self.name = name self.name = name
self.numpy_dtype = numpy.dtype(self.dtype) self.numpy_dtype = np.dtype(self.dtype)
self.sparse_grad = sparse_grad self.sparse_grad = sparse_grad
if sparse_grad: if sparse_grad:
warnings.warn( warnings.warn(
...@@ -88,12 +88,12 @@ class TensorType(Type): ...@@ -88,12 +88,12 @@ class TensorType(Type):
'maybe you are trying to call a function on a (possibly ' 'maybe you are trying to call a function on a (possibly '
'shared) variable instead of a numeric array?') 'shared) variable instead of a numeric array?')
if ((type(data) is numpy.ndarray) and if ((type(data) is np.ndarray) and
(data.dtype == self.numpy_dtype)): (data.dtype == self.numpy_dtype)):
if data.dtype.num != self.numpy_dtype.num: if data.dtype.num != self.numpy_dtype.num:
data = theano._asarray(data, dtype=self.dtype) data = theano._asarray(data, dtype=self.dtype)
# -- now fall through to ndim check # -- now fall through to ndim check
elif ((type(data) is numpy.memmap) and elif ((type(data) is np.memmap) and
(data.dtype == self.numpy_dtype)): (data.dtype == self.numpy_dtype)):
# numpy.memmap is a "safe" subclass of ndarray, # numpy.memmap is a "safe" subclass of ndarray,
# so we can use it whereever we expect a base ndarray. # so we can use it whereever we expect a base ndarray.
...@@ -103,7 +103,7 @@ class TensorType(Type): ...@@ -103,7 +103,7 @@ class TensorType(Type):
elif strict: elif strict:
# If any of the two conditions above was not met, # If any of the two conditions above was not met,
# we raise a meaningful TypeError. # we raise a meaningful TypeError.
if not (type(data) is numpy.ndarray): if not (type(data) is np.ndarray):
raise TypeError("%s expected a ndarray object." % self, raise TypeError("%s expected a ndarray object." % self,
data, type(data)) data, type(data))
if data.dtype != self.numpy_dtype: if data.dtype != self.numpy_dtype:
...@@ -118,7 +118,7 @@ class TensorType(Type): ...@@ -118,7 +118,7 @@ class TensorType(Type):
# TODO: consider to pad shape with ones to make it consistent # TODO: consider to pad shape with ones to make it consistent
# with self.broadcastable... like vector->row type thing # with self.broadcastable... like vector->row type thing
else: else:
if isinstance(data, numpy.ndarray): if isinstance(data, np.ndarray):
# Check if self.dtype can accurately represent data # Check if self.dtype can accurately represent data
# (do not try to convert the data) # (do not try to convert the data)
up_dtype = scal.upcast(self.dtype, data.dtype) up_dtype = scal.upcast(self.dtype, data.dtype)
...@@ -150,7 +150,7 @@ class TensorType(Type): ...@@ -150,7 +150,7 @@ class TensorType(Type):
converted_data = theano._asarray(data, self.dtype) converted_data = theano._asarray(data, self.dtype)
# We use the `values_eq` static function from TensorType # We use the `values_eq` static function from TensorType
# to handle NaN values. # to handle NaN values.
if TensorType.values_eq(numpy.asarray(data), if TensorType.values_eq(np.asarray(data),
converted_data, converted_data,
force_same_dtype=False): force_same_dtype=False):
data = converted_data data = converted_data
...@@ -195,7 +195,7 @@ class TensorType(Type): ...@@ -195,7 +195,7 @@ class TensorType(Type):
" dimension.", data.shape, self.broadcastable) " dimension.", data.shape, self.broadcastable)
i += 1 i += 1
if (self.filter_checks_isfinite and if (self.filter_checks_isfinite and
not numpy.all(numpy.isfinite(data))): not np.all(np.isfinite(data))):
raise ValueError("non-finite elements not allowed") raise ValueError("non-finite elements not allowed")
return data return data
...@@ -294,8 +294,8 @@ class TensorType(Type): ...@@ -294,8 +294,8 @@ class TensorType(Type):
@staticmethod @staticmethod
def may_share_memory(a, b): def may_share_memory(a, b):
# This is a method of TensorType, so both a and b should be ndarrays # This is a method of TensorType, so both a and b should be ndarrays
if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray): if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
return numpy.may_share_memory(a, b) return np.may_share_memory(a, b)
else: else:
return False return False
...@@ -308,14 +308,14 @@ class TensorType(Type): ...@@ -308,14 +308,14 @@ class TensorType(Type):
if force_same_dtype and a.dtype != b.dtype: if force_same_dtype and a.dtype != b.dtype:
return False return False
a_eq_b = (a == b) a_eq_b = (a == b)
r = numpy.all(a_eq_b) r = np.all(a_eq_b)
if r: if r:
return True return True
# maybe the trouble is that there are NaNs # maybe the trouble is that there are NaNs
a_missing = numpy.isnan(a) a_missing = np.isnan(a)
if a_missing.any(): if a_missing.any():
b_missing = numpy.isnan(b) b_missing = np.isnan(b)
return numpy.all(a_eq_b + (a_missing == b_missing)) return np.all(a_eq_b + (a_missing == b_missing))
else: else:
return False return False
...@@ -553,7 +553,7 @@ class TensorType(Type): ...@@ -553,7 +553,7 @@ class TensorType(Type):
Create an numpy ndarray full of 0 values. Create an numpy ndarray full of 0 values.
""" """
return numpy.zeros(shape, dtype=self.dtype) return np.zeros(shape, dtype=self.dtype)
def get_shape_info(self, obj): def get_shape_info(self, obj):
""" """
...@@ -601,9 +601,9 @@ class TensorType(Type): ...@@ -601,9 +601,9 @@ class TensorType(Type):
""" """
if shape_info: if shape_info:
return numpy.prod(shape_info) * numpy.dtype(self.dtype).itemsize return np.prod(shape_info) * np.dtype(self.dtype).itemsize
else: # a scalar else: # a scalar
return numpy.dtype(self.dtype).itemsize return np.dtype(self.dtype).itemsize
theano.compile.ops.expandable_types += (TensorType,) theano.compile.ops.expandable_types += (TensorType,)
...@@ -624,13 +624,13 @@ def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False, ...@@ -624,13 +624,13 @@ def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False,
Absolute tolerance, passed to _allclose. Absolute tolerance, passed to _allclose.
""" """
if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray): if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
if a.shape != b.shape: if a.shape != b.shape:
return False return False
if a.dtype != b.dtype: if a.dtype != b.dtype:
return False return False
if str(a.dtype) not in theano.tensor.continuous_dtypes: if str(a.dtype) not in theano.tensor.continuous_dtypes:
return numpy.all(a == b) return np.all(a == b)
else: else:
cmp = theano.tensor.basic._allclose(a, b, rtol=rtol, atol=atol) cmp = theano.tensor.basic._allclose(a, b, rtol=rtol, atol=atol)
if cmp: if cmp:
...@@ -644,38 +644,38 @@ def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False, ...@@ -644,38 +644,38 @@ def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False,
# core recently, so it may not be available to everyone. Thus, # core recently, so it may not be available to everyone. Thus,
# for now we use a home-made recipe, that should probably be # for now we use a home-made recipe, that should probably be
# revisited in the future. # revisited in the future.
a_missing = numpy.isnan(a) a_missing = np.isnan(a)
a_inf = numpy.isinf(a) a_inf = np.isinf(a)
if not (a_missing.any() or (allow_remove_inf and a_inf.any())): if not (a_missing.any() or (allow_remove_inf and a_inf.any())):
# There are no missing values in a, thus this is not the # There are no missing values in a, thus this is not the
# reason why numpy.allclose(a, b) returned False. # reason why numpy.allclose(a, b) returned False.
_logger.info( _logger.info(
'numpy allclose failed for abs_err %f and rel_err %f', 'numpy allclose failed for abs_err %f and rel_err %f',
numpy.max(abs(a - b)), np.max(abs(a - b)),
numpy.max(abs(a - b) / (abs(a) + abs(b)))) np.max(abs(a - b) / (abs(a) + abs(b))))
return False return False
# The following line is what numpy.allclose bases its decision # The following line is what numpy.allclose bases its decision
# upon, according to its documentation. # upon, according to its documentation.
rtol = 1.0000000000000001e-05 rtol = 1.0000000000000001e-05
atol = 1e-8 atol = 1e-8
cmp_elemwise = (numpy.absolute(a - b) <= cmp_elemwise = (np.absolute(a - b) <=
(atol + rtol * numpy.absolute(b))) (atol + rtol * np.absolute(b)))
# Find places where both a and b have missing values. # Find places where both a and b have missing values.
both_missing = a_missing * numpy.isnan(b) both_missing = a_missing * np.isnan(b)
# Find places where both a and b have inf of the same sign. # Find places where both a and b have inf of the same sign.
both_inf = a_inf * numpy.isinf(b) both_inf = a_inf * np.isinf(b)
# cmp_elemwise is weird when we have inf and -inf. # cmp_elemwise is weird when we have inf and -inf.
# set it to False # set it to False
cmp_elemwise = numpy.where( cmp_elemwise = np.where(
both_inf & cmp_elemwise, both_inf & cmp_elemwise,
a == b, a == b,
cmp_elemwise) cmp_elemwise)
# check the sign of the inf # check the sign of the inf
both_inf = numpy.where(both_inf, (a == b), both_inf) both_inf = np.where(both_inf, (a == b), both_inf)
if allow_remove_inf: if allow_remove_inf:
both_inf += a_inf both_inf += a_inf
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论