提交 1d3c5b3a authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #5824 from Amrithasuresh/master

Updated numpy as np #4218
...@@ -6,7 +6,7 @@ import logging ...@@ -6,7 +6,7 @@ import logging
import time import time
import warnings import warnings
import numpy # for numeric_grad import numpy as np # for numeric_grad
from six import itervalues from six import itervalues
import theano import theano
...@@ -19,7 +19,6 @@ from theano.gof.null_type import NullType, null_type ...@@ -19,7 +19,6 @@ from theano.gof.null_type import NullType, null_type
from theano.gof.op import get_debug_values from theano.gof.op import get_debug_values
from theano.compile import ViewOp, FAST_RUN, DebugMode from theano.compile import ViewOp, FAST_RUN, DebugMode
np = numpy
__authors__ = "James Bergstra, Razvan Pascanu, Arnaud Bergeron, Ian Goodfellow" __authors__ = "James Bergstra, Razvan Pascanu, Arnaud Bergeron, Ian Goodfellow"
__copyright__ = "(c) 2011, Universite de Montreal" __copyright__ = "(c) 2011, Universite de Montreal"
__license__ = "3-clause BSD License" __license__ = "3-clause BSD License"
...@@ -1374,9 +1373,9 @@ class numeric_grad(object): ...@@ -1374,9 +1373,9 @@ class numeric_grad(object):
type_eps = {'float64': 1e-7, type_eps = {'float64': 1e-7,
'float32': 3e-4, 'float32': 3e-4,
'float16': 1e-1, 'float16': 1e-1,
numpy.dtype('float64'): 1e-7, np.dtype('float64'): 1e-7,
numpy.dtype('float32'): 3e-4, np.dtype('float32'): 3e-4,
numpy.dtype('float16'): 1e-1} np.dtype('float16'): 1e-1}
def __init__(self, f, pt, eps=None, out_type=None): def __init__(self, f, pt, eps=None, out_type=None):
"""Return the gradient of f at pt. """Return the gradient of f at pt.
...@@ -1406,7 +1405,7 @@ class numeric_grad(object): ...@@ -1406,7 +1405,7 @@ class numeric_grad(object):
pt = [pt] pt = [pt]
packed_pt = True packed_pt = True
apt = [numpy.array(p) for p in pt] apt = [np.array(p) for p in pt]
shapes = [p.shape for p in apt] shapes = [p.shape for p in apt]
dtypes = [str(p.dtype) for p in apt] dtypes = [str(p.dtype) for p in apt]
...@@ -1423,12 +1422,12 @@ class numeric_grad(object): ...@@ -1423,12 +1422,12 @@ class numeric_grad(object):
(self.type_eps[dt], dt) for dt in dtypes)[1] (self.type_eps[dt], dt) for dt in dtypes)[1]
# create un-initialized memory # create un-initialized memory
x = numpy.ndarray((total_size,), dtype=working_dtype) x = np.ndarray((total_size,), dtype=working_dtype)
# (not out_type is None) --> (out_type is not None) ??? # (not out_type is None) --> (out_type is not None) ???
if (out_type is not None) and (out_type.startswith('complex')): if (out_type is not None) and (out_type.startswith('complex')):
gx = numpy.ndarray((total_size,), dtype=out_type) gx = np.ndarray((total_size,), dtype=out_type)
else: else:
gx = numpy.ndarray((total_size,), dtype=working_dtype) gx = np.ndarray((total_size,), dtype=working_dtype)
if eps is None: if eps is None:
eps = builtins.max(self.type_eps[dt] for dt in dtypes) eps = builtins.max(self.type_eps[dt] for dt in dtypes)
...@@ -1483,13 +1482,13 @@ class numeric_grad(object): ...@@ -1483,13 +1482,13 @@ class numeric_grad(object):
The tuple (abs_err, rel_err) is returned The tuple (abs_err, rel_err) is returned
""" """
abs_err = abs(a - b) abs_err = abs(a - b)
rel_err = abs_err / numpy.maximum(abs(a) + abs(b), 1e-8) rel_err = abs_err / np.maximum(abs(a) + abs(b), 1e-8)
# The numpy.asarray are needed as if a or b is a sparse matrix # The numpy.asarray are needed as if a or b is a sparse matrix
# this would result in a numpy.matrix and not a numpy.ndarray # this would result in a numpy.matrix and not a numpy.ndarray
# and the behave differently causing problem later. # and the behave differently causing problem later.
# In particular a_npy_matrix.flatten().shape == (1, n_element) # In particular a_npy_matrix.flatten().shape == (1, n_element)
abs_err = numpy.asarray(abs_err) abs_err = np.asarray(abs_err)
rel_err = numpy.asarray(rel_err) rel_err = np.asarray(rel_err)
return (abs_err, rel_err) return (abs_err, rel_err)
def abs_rel_errors(self, g_pt): def abs_rel_errors(self, g_pt):
...@@ -1530,11 +1529,11 @@ class numeric_grad(object): ...@@ -1530,11 +1529,11 @@ class numeric_grad(object):
abs_rel_errs = self.abs_rel_errors(g_pt) abs_rel_errs = self.abs_rel_errors(g_pt)
for abs_err, rel_err in abs_rel_errs: for abs_err, rel_err in abs_rel_errs:
if not numpy.all(numpy.isfinite(abs_err)): if not np.all(np.isfinite(abs_err)):
raise ValueError('abs_err not finite', repr(abs_err)) raise ValueError('abs_err not finite', repr(abs_err))
if not numpy.all(numpy.isfinite(rel_err)): if not np.all(np.isfinite(rel_err)):
raise ValueError('rel_err not finite', repr(rel_err)) raise ValueError('rel_err not finite', repr(rel_err))
scaled_err = numpy.minimum(abs_err / abs_tol, rel_err / rel_tol) scaled_err = np.minimum(abs_err / abs_tol, rel_err / rel_tol)
max_i = scaled_err.argmax() max_i = scaled_err.argmax()
pos.append(max_i) pos.append(max_i)
...@@ -1543,7 +1542,7 @@ class numeric_grad(object): ...@@ -1543,7 +1542,7 @@ class numeric_grad(object):
rel_errs.append(rel_err.flatten()[max_i]) rel_errs.append(rel_err.flatten()[max_i])
# max over the arrays in g_pt # max over the arrays in g_pt
max_arg = numpy.argmax(errs) max_arg = np.argmax(errs)
max_pos = pos[max_arg] max_pos = pos[max_arg]
return (max_arg, max_pos, abs_errs[max_arg], rel_errs[max_arg]) return (max_arg, max_pos, abs_errs[max_arg], rel_errs[max_arg])
...@@ -1564,8 +1563,8 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None, ...@@ -1564,8 +1563,8 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None,
Example: Example:
>>> verify_grad(theano.tensor.tanh, >>> verify_grad(theano.tensor.tanh,
... (numpy.asarray([[2,3,4], [-1, 3.3, 9.9]]),), ... (np.asarray([[2,3,4], [-1, 3.3, 9.9]]),),
... rng=numpy.random) ... rng=np.random)
Raises an Exception if the difference between the analytic gradient and Raises an Exception if the difference between the analytic gradient and
numerical gradient (computed through the Finite Difference Method) of a numerical gradient (computed through the Finite Difference Method) of a
...@@ -1609,7 +1608,7 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None, ...@@ -1609,7 +1608,7 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None,
import theano.tensor import theano.tensor
from theano.tensor import as_tensor_variable, TensorType from theano.tensor import as_tensor_variable, TensorType
assert isinstance(pt, (list, tuple)) assert isinstance(pt, (list, tuple))
pt = [numpy.array(p) for p in pt] pt = [np.array(p) for p in pt]
for i, p in enumerate(pt): for i, p in enumerate(pt):
if p.dtype not in ('float16', 'float32', 'float64'): if p.dtype not in ('float16', 'float32', 'float64'):
...@@ -1672,7 +1671,7 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None, ...@@ -1672,7 +1671,7 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None,
def random_projection(): def random_projection():
plain = rng.rand(*o_fn_out.shape) + 0.5 plain = rng.rand(*o_fn_out.shape) + 0.5
if cast_to_output_type and o_output.dtype == "float32": if cast_to_output_type and o_output.dtype == "float32":
return numpy.array(plain, o_output.dtype) return np.array(plain, o_output.dtype)
return plain return plain
t_r = shared(random_projection()) t_r = shared(random_projection())
......
...@@ -15,7 +15,7 @@ from copy import deepcopy ...@@ -15,7 +15,7 @@ from copy import deepcopy
from theano.compat import izip from theano.compat import izip
import logging import logging
import numpy import numpy as np
import theano.tensor import theano.tensor
from theano.tensor import TensorType from theano.tensor import TensorType
...@@ -259,7 +259,7 @@ class IfElse(Op): ...@@ -259,7 +259,7 @@ class IfElse(Op):
if self.as_view: if self.as_view:
storage_map[out][0] = val storage_map[out][0] = val
# Work around broken numpy deepcopy # Work around broken numpy deepcopy
elif type(val) in (numpy.ndarray, numpy.memmap): elif type(val) in (np.ndarray, np.memmap):
storage_map[out][0] = val.copy() storage_map[out][0] = val.copy()
else: else:
storage_map[out][0] = deepcopy(val) storage_map[out][0] = deepcopy(val)
...@@ -276,7 +276,7 @@ class IfElse(Op): ...@@ -276,7 +276,7 @@ class IfElse(Op):
# improves # improves
# Work around broken numpy deepcopy # Work around broken numpy deepcopy
val = storage_map[f][0] val = storage_map[f][0]
if type(val) in (numpy.ndarray, numpy.memmap): if type(val) in (np.ndarray, np.memmap):
storage_map[out][0] = val.copy() storage_map[out][0] = val.copy()
else: else:
storage_map[out][0] = deepcopy(val) storage_map[out][0] = deepcopy(val)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论