提交 ccf6deb0 authored 作者: ballasn's avatar ballasn 提交者: GitHub

Merge pull request #5452 from bscellier/import_numpy

Update "import numpy" to "import numpy as np" (theano/compile directory)
......@@ -59,11 +59,11 @@ class OpFromGraph(gof.Op):
.. code-block:: python
import numpy
import numpy as np
import theano
from theano import config, function, OpFromGraph, tensor
x, y, z = tensor.scalars('xyz')
s = theano.shared(numpy.random.rand(2, 2).astype(config.floatX))
s = theano.shared(np.random.rand(2, 2).astype(config.floatX))
e = x + y * z + s
op = OpFromGraph([x, y, z], [e])
# op behaves like a normal theano op
......
......@@ -14,7 +14,7 @@ import six.moves.copyreg as copyreg
from itertools import chain, product as itertools_product
from theano.compat import izip
import numpy
import numpy as np
import theano
from theano import gof, config
......@@ -270,15 +270,15 @@ class BadOptimization(DebugModeError):
print(" New Value: ", str(self.new_r_val), file=sio)
try:
ov = numpy.asarray(self.old_r_val)
nv = numpy.asarray(self.new_r_val)
ov = np.asarray(self.old_r_val)
nv = np.asarray(self.new_r_val)
ssio = StringIO()
abs_diff = numpy.absolute(nv - ov)
print(" Max Abs Diff: ", numpy.max(abs_diff), file=ssio)
print(" Mean Abs Diff: ", numpy.mean(abs_diff), file=ssio)
print(" Median Abs Diff: ", numpy.median(abs_diff), file=ssio)
print(" Std Abs Diff: ", numpy.std(abs_diff), file=ssio)
arg_max_val = numpy.argmax(abs_diff)
abs_diff = np.absolute(nv - ov)
print(" Max Abs Diff: ", np.max(abs_diff), file=ssio)
print(" Mean Abs Diff: ", np.mean(abs_diff), file=ssio)
print(" Median Abs Diff: ", np.median(abs_diff), file=ssio)
print(" Std Abs Diff: ", np.std(abs_diff), file=ssio)
arg_max_val = np.argmax(abs_diff)
values_at_max = (nv.flatten()[arg_max_val],
ov.flatten()[arg_max_val])
print(" Value at Max Diff: ", values_at_max, file=ssio)
......@@ -286,13 +286,13 @@ class BadOptimization(DebugModeError):
# N.B. the maximum(..., 1e-8) protects against div by 0 when
# nv == ov == 0
reldiff = (abs_diff /
numpy.maaximum(numpy.absolute(nv) + numpy.absolute(ov),
1e-8))
print(" Max Rel Diff: ", numpy.max(reldiff), file=ssio)
print(" Mean Rel Diff: ", numpy.mean(reldiff), file=ssio)
print(" Median Rel Diff: ", numpy.median(reldiff), file=ssio)
print(" Std Rel Diff: ", numpy.std(reldiff), file=ssio)
arg_max_val = numpy.argmax(reldiff)
np.maximum(np.absolute(nv) + np.absolute(ov),
1e-8))
print(" Max Rel Diff: ", np.max(reldiff), file=ssio)
print(" Mean Rel Diff: ", np.mean(reldiff), file=ssio)
print(" Median Rel Diff: ", np.median(reldiff), file=ssio)
print(" Std Rel Diff: ", np.std(reldiff), file=ssio)
arg_max_val = np.argmax(reldiff)
values_at_max = (nv.flatten()[arg_max_val],
ov.flatten()[arg_max_val])
print(" Value at Max Diff: ", values_at_max, file=ssio)
......@@ -342,8 +342,8 @@ class BadDestroyMap(DebugModeError):
print(" repr (old val):", repr(self.old_val), file=sio)
print(" repr (new val):", repr(self.new_val), file=sio)
try:
npy_old_val = numpy.asarray(self.old_val)
npy_new_val = numpy.asarray(self.new_val)
npy_old_val = np.asarray(self.old_val)
npy_new_val = np.asarray(self.new_val)
print(" value dtype (new <space> old):", npy_new_val.dtype,
npy_old_val.dtype, file=sio)
print(" value shape (new <space> old):", npy_new_val.shape,
......@@ -356,13 +356,13 @@ class BadDestroyMap(DebugModeError):
print(" value min (new-old):", delta.min(), file=sio)
print(" value max (new-old):", delta.max(), file=sio)
print(" value argmin (new-old):",
numpy.unravel_index(delta.argmin(), npy_new_val.shape),
np.unravel_index(delta.argmin(), npy_new_val.shape),
file=sio)
print(" value argmax (new-old):",
numpy.unravel_index(delta.argmax(), npy_new_val.shape),
np.unravel_index(delta.argmax(), npy_new_val.shape),
file=sio)
print(" location of first 10 mismatches:",
numpy.transpose(numpy.nonzero(delta))[:10], file=sio)
np.transpose(np.nonzero(delta))[:10], file=sio)
print("", file=sio)
except Exception as e:
print("(Numpy-hints failed with: %s)" % str(e), file=sio)
......@@ -453,7 +453,7 @@ class InvalidValueError(DebugModeError):
v_dtype = v.dtype
v_min = v.min()
v_max = v.max()
v_isfinite = numpy.all(numpy.isfinite(v))
v_isfinite = np.all(np.isfinite(v))
except Exception:
pass
client_node = self.client_node
......@@ -1025,7 +1025,7 @@ def _lessbroken_deepcopy(a):
# this exists because copy.deepcopy on numpy arrays is broken
# This logic is also in link.py
from theano.gof.type import _cdata_type
if type(a) in (numpy.ndarray, numpy.memmap):
if type(a) in (np.ndarray, np.memmap):
rval = a.copy()
elif type(a) is _cdata_type:
# This is not copyable (and should be used for constant data).
......@@ -1034,7 +1034,7 @@ def _lessbroken_deepcopy(a):
rval = copy.deepcopy(a)
assert type(rval) == type(a), (type(rval), type(a))
if isinstance(rval, numpy.ndarray):
if isinstance(rval, np.ndarray):
assert rval.dtype == a.dtype
return rval
......@@ -1241,7 +1241,7 @@ def _get_preallocated_maps(node, thunk, prealloc_modes, def_val,
# There is no risk to overwrite inputs, since r does not work
# inplace.
if isinstance(r.type, (TensorType, CudaNdarrayType)):
reuse_outputs[r][...] = numpy.asarray(
reuse_outputs[r][...] = np.asarray(
def_val).astype(r.type.dtype)
if reuse_outputs:
......@@ -1259,7 +1259,7 @@ def _get_preallocated_maps(node, thunk, prealloc_modes, def_val,
new_buf = r.type.value_zeros(r_vals[r].shape)
# CudaNdarray don't have flags field
# assert new_buf.flags["C_CONTIGUOUS"]
new_buf[...] = numpy.asarray(def_val).astype(r.type.dtype)
new_buf[...] = np.asarray(def_val).astype(r.type.dtype)
c_cont_outputs[r] = new_buf
......@@ -1273,7 +1273,7 @@ def _get_preallocated_maps(node, thunk, prealloc_modes, def_val,
f_cont_outputs = {}
for r in considered_outputs:
if isinstance(r.type, (TensorType, CudaNdarrayType)):
new_buf = numpy.zeros(
new_buf = np.zeros(
shape=r_vals[r].shape,
dtype=r_vals[r].dtype,
order='F')
......@@ -1331,7 +1331,7 @@ def _get_preallocated_maps(node, thunk, prealloc_modes, def_val,
else:
buf_shape.append(s * 2)
new_buf = r.type.value_zeros(buf_shape)
new_buf[...] = numpy.asarray(def_val).astype(r.type.dtype)
new_buf[...] = np.asarray(def_val).astype(r.type.dtype)
init_strided[r] = new_buf
# The number of combinations is exponential in the number of
......@@ -1377,7 +1377,7 @@ def _get_preallocated_maps(node, thunk, prealloc_modes, def_val,
r_buf = r_buf[tuple(strides)][tuple(shapes)]
assert r_buf.shape == r_vals[r].shape
r_buf[...] = numpy.asarray(def_val).astype(r_buf.dtype)
r_buf[...] = np.asarray(def_val).astype(r_buf.dtype)
strided[r] = r_buf
if strided:
......@@ -1405,7 +1405,7 @@ def _get_preallocated_maps(node, thunk, prealloc_modes, def_val,
for s, sd in zip(r_vals[r].shape,
r_shape_diff)]
new_buf = r.type.value_zeros(out_shape)
new_buf[...] = numpy.asarray(
new_buf[...] = np.asarray(
def_val).astype(r.type.dtype)
wrong_size[r] = new_buf
......@@ -2261,7 +2261,7 @@ class _Linker(gof.link.LocalLinker):
# HACK TO LOOK LIKE A REAL DESTRUCTIVE ACTION
# TOOK PLACE
if ((type(dr_vals[r][0]) in
(numpy.ndarray, numpy.memmap)) and
(np.ndarray, np.memmap)) and
(dr_vals[r][0].dtype ==
storage_map[r][0].dtype) and
(dr_vals[r][0].shape ==
......
......@@ -13,7 +13,7 @@ from six import string_types
from theano.compile.io import In
from theano.compile.function_module import orig_function
from theano.compile.pfunc import pfunc
from numpy import any
import numpy as np
import warnings
from theano import compat
......@@ -286,7 +286,7 @@ def function(inputs, outputs=None, mode=None, updates=None, givens=None,
"input.")
# compute some features of the arguments:
uses_tuple = any([isinstance(i, (list, tuple)) for i in inputs])
uses_tuple = np.any([isinstance(i, (list, tuple)) for i in inputs])
uses_updates = bool(updates)
uses_givens = bool(givens)
......
......@@ -12,7 +12,7 @@ import six.moves.cPickle as pickle
from itertools import chain
import time
import warnings
import numpy
import numpy as np
import theano
from theano import config, gof
......@@ -837,9 +837,9 @@ class Function(object):
in args_share_memory[j]],
[self.input_storage[k].storage[0] for k
in args_share_memory[j]])
if numpy.any([(var.type is i_var.type and
var.type.may_share_memory(val, i_val))
for (var, val) in group_j]):
if np.any([(var.type is i_var.type and
var.type.may_share_memory(val, i_val))
for (var, val) in group_j]):
is_aliased = True
args_share_memory[j].append(i)
......@@ -1028,9 +1028,9 @@ def _pickle_Function(f):
all_data = input_storage + inputs_data
for i, d_i in enumerate(all_data):
for j, d_j in enumerate(all_data):
if ((i < j) and isinstance(d_i, numpy.ndarray) and
isinstance(d_j, numpy.ndarray)):
if numpy.may_share_memory(d_i, d_j):
if ((i < j) and isinstance(d_i, np.ndarray) and
isinstance(d_j, np.ndarray)):
if np.may_share_memory(d_i, d_j):
if f.pickle_aliased_memory_strategy == 'warn':
_logger.warning('aliased relationship between '
'Function arguments %s, %s '
......@@ -1050,7 +1050,7 @@ def _constructor_Function(maker, input_storage, inputs_data):
assert len(f.input_storage) == len(inputs_data)
for container, x in zip(f.input_storage, inputs_data):
assert (container.data is x) or \
(isinstance(x, numpy.ndarray) and (container.data == x).all()) or \
(isinstance(x, np.ndarray) and (container.data == x).all()) or \
(container.data == x)
return f
......
from __future__ import absolute_import, print_function, division
# Note: this code was initially copied from the 'pyutools' package by its
# original author, and re-licensed under Theano's license.
import numpy
import numpy as np
import theano
from theano.compile.mode import Mode
......@@ -93,8 +93,8 @@ class MonitorMode(Mode):
def detect_nan(i, node, fn):
for output in fn.outputs:
if (not isinstance(output[0], numpy.random.RandomState) and
numpy.isnan(output[0]).any()):
if (not isinstance(output[0], np.random.RandomState) and
np.isnan(output[0]).any()):
print('*** NaN detected ***')
theano.printing.debugprint(node)
print('Inputs : %s' % [input[0] for input in fn.inputs])
......
......@@ -17,7 +17,7 @@ from six import iteritems, integer_types
from six.moves import xrange
import numpy
import numpy as np
def register_view_op_c_code(type, code, version=()):
......@@ -338,7 +338,7 @@ class Shape_i(gof.Op):
def __init__(self, i):
# As i will be used in the hash and that ndarray are not hashable,
# we need to convert it to an int as it is hashable.
if isinstance(i, numpy.ndarray):
if isinstance(i, np.ndarray):
assert i.dtype in theano.tensor.integer_dtypes
assert i == int(i)
i = int(i)
......@@ -665,11 +665,11 @@ class Rebroadcast(gof.Op):
items = sorted(axis)
self.axis = OrderedDict(items)
for axis, broad in iteritems(self.axis):
if not isinstance(axis, (numpy.integer, integer_types)):
if not isinstance(axis, (np.integer, integer_types)):
raise TypeError("Rebroadcast needs integer axes. "
"Got {}".format(axis))
if not isinstance(broad, (numpy.bool_, bool)):
if not isinstance(broad, (np.bool_, bool)):
raise TypeError("Rebroadcast needs bool for new broadcast "
"pattern. Got {}".format(broad))
......@@ -835,8 +835,8 @@ class SpecifyShape(gof.Op):
x, shape = inp
out, = out_
assert x.ndim == shape.size
assert numpy.all(x.shape == shape), ("got shape", x.shape,
"expected", shape)
assert np.all(x.shape == shape), ("got shape", x.shape,
"expected", shape)
out[0] = x
def infer_shape(self, node, shapes):
......
......@@ -27,7 +27,7 @@ import sys
import time
from collections import defaultdict
import numpy
import numpy as np
import theano
from six import iteritems
......@@ -477,7 +477,7 @@ class ProfileStats(object):
hs += ['<#apply>']
es += [' %4d ']
upto_length = numpy.sum([len(x) for x in hs]) + len(hs)
upto_length = np.sum([len(x) for x in hs]) + len(hs)
maxlen = max(self.line_width - upto_length, 0)
hs += ['<Class name>']
es += ['%s']
......@@ -559,7 +559,7 @@ class ProfileStats(object):
hs += ['<#apply>']
es += [' %4d ']
upto_length = numpy.sum([len(x) for x in hs]) + len(hs)
upto_length = np.sum([len(x) for x in hs]) + len(hs)
maxlen = max(self.line_width - upto_length, 0)
hs += ['<Op name>']
es += ['%s']
......@@ -627,7 +627,7 @@ class ProfileStats(object):
if self.variable_shape:
hs += ['<Mflops>', '<Gflops/s>']
upto_length = numpy.sum([len(x) for x in hs]) + len(hs)
upto_length = np.sum([len(x) for x in hs]) + len(hs)
maxlen = max(self.line_width - upto_length, 0)
hs += ['<Apply name>']
es += ['%s']
......@@ -929,7 +929,7 @@ class ProfileStats(object):
node_list = list(node_list)
mem_count = 0
max_mem_count = 0
mem_bound = numpy.inf
mem_bound = np.inf
# This take only the inputs/outputs dependencies.
dependencies = fgraph.profile.dependencies
done_set = set([])
......
......@@ -9,7 +9,7 @@ import copy
import logging
# Third-party imports
import numpy
import numpy as np
# Theano imports
from theano.gof import Container, Variable, generic, utils
......@@ -187,7 +187,7 @@ class SharedVariable(Variable):
# implemented at all, but with a more explicit error message to help
# Theano users figure out the root of the problem more easily.
value = self.get_value(borrow=True)
if isinstance(value, numpy.ndarray):
if isinstance(value, np.ndarray):
# Array probably had an unknown dtype.
msg = ("a Numpy array with dtype: '%s'. This data type is not "
"currently recognized by Theano tensors: please cast "
......
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
from theano import config, shared
......@@ -23,14 +23,14 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
f = op(x, y, z) - op(y, z, x)
fn = function([x, y, z], f)
xv = numpy.ones((2, 2), dtype=config.floatX)
yv = numpy.ones((2, 2), dtype=config.floatX) * 3
zv = numpy.ones((2, 2), dtype=config.floatX) * 5
xv = np.ones((2, 2), dtype=config.floatX)
yv = np.ones((2, 2), dtype=config.floatX) * 3
zv = np.ones((2, 2), dtype=config.floatX) * 5
# print function, function.__module__
# print fn.maker.fgraph.toposort()
fn(xv, yv, zv)
assert numpy.all(8.0 == fn(xv, yv, zv))
assert numpy.all(8.0 == fn(xv, yv, zv))
assert np.all(8.0 == fn(xv, yv, zv))
assert np.all(8.0 == fn(xv, yv, zv))
def test_size_changes(self):
x, y, z = T.matrices('xyz')
......@@ -38,15 +38,15 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
op = OpFromGraph([x, y], [e])
f = op(x, op(y, z))
fn = function([x, y, z], f)
xv = numpy.ones((2, 3), dtype=config.floatX)
yv = numpy.ones((3, 4), dtype=config.floatX) * 3
zv = numpy.ones((4, 5), dtype=config.floatX) * 5
xv = np.ones((2, 3), dtype=config.floatX)
yv = np.ones((3, 4), dtype=config.floatX) * 3
zv = np.ones((4, 5), dtype=config.floatX) * 5
res = fn(xv, yv, zv)
assert res.shape == (2, 5)
assert numpy.all(180.0 == res)
assert np.all(180.0 == res)
res = fn(xv, yv, zv)
assert res.shape == (2, 5)
assert numpy.all(180.0 == res)
assert np.all(180.0 == res)
def test_grad(self):
x, y, z = T.matrices('xyz')
......@@ -55,10 +55,10 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
f = op(x, y, z)
f = f - T.grad(T.sum(f), y)
fn = function([x, y, z], f)
xv = numpy.ones((2, 2), dtype=config.floatX)
yv = numpy.ones((2, 2), dtype=config.floatX) * 3
zv = numpy.ones((2, 2), dtype=config.floatX) * 5
assert numpy.all(11.0 == fn(xv, yv, zv))
xv = np.ones((2, 2), dtype=config.floatX)
yv = np.ones((2, 2), dtype=config.floatX) * 3
zv = np.ones((2, 2), dtype=config.floatX) * 5
assert np.all(11.0 == fn(xv, yv, zv))
def test_grad_grad(self):
x, y, z = T.matrices('xyz')
......@@ -68,47 +68,47 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
f = f - T.grad(T.sum(f), y)
f = f - T.grad(T.sum(f), y)
fn = function([x, y, z], f)
xv = numpy.ones((2, 2), dtype=config.floatX)
yv = numpy.ones((2, 2), dtype=config.floatX) * 3
zv = numpy.ones((2, 2), dtype=config.floatX) * 5
assert numpy.allclose(6.0, fn(xv, yv, zv))
xv = np.ones((2, 2), dtype=config.floatX)
yv = np.ones((2, 2), dtype=config.floatX) * 3
zv = np.ones((2, 2), dtype=config.floatX) * 5
assert np.allclose(6.0, fn(xv, yv, zv))
def test_shared(self):
x, y, z = T.matrices('xyz')
s = shared(numpy.random.rand(2, 2).astype(config.floatX))
s = shared(np.random.rand(2, 2).astype(config.floatX))
e = x + y * z + s
op = OpFromGraph([x, y, z], [e])
# (1+3*5=array of 16) - (3+1*5=array of 8)
f = op(x, y, z) - op(y, z, x)
fn = function([x, y, z], f)
xv = numpy.ones((2, 2), dtype=config.floatX)
yv = numpy.ones((2, 2), dtype=config.floatX) * 3
zv = numpy.ones((2, 2), dtype=config.floatX) * 5
xv = np.ones((2, 2), dtype=config.floatX)
yv = np.ones((2, 2), dtype=config.floatX) * 3
zv = np.ones((2, 2), dtype=config.floatX) * 5
# print function, function.__module__
# print fn.maker.fgraph.toposort()
assert numpy.allclose(8.0, fn(xv, yv, zv))
assert numpy.allclose(8.0, fn(xv, yv, zv))
assert np.allclose(8.0, fn(xv, yv, zv))
assert np.allclose(8.0, fn(xv, yv, zv))
def test_shared_grad(self):
x, y, z = T.matrices('xyz')
s = shared(numpy.random.rand(2, 2).astype(config.floatX))
s = shared(np.random.rand(2, 2).astype(config.floatX))
e = x + y * z + s
op = OpFromGraph([x, y, z], [e])
f = op(x, y, z)
f = f - T.grad(T.sum(f), y)
fn = function([x, y, z], f)
xv = numpy.ones((2, 2), dtype=config.floatX)
yv = numpy.ones((2, 2), dtype=config.floatX) * 3
zv = numpy.ones((2, 2), dtype=config.floatX) * 5
assert numpy.allclose(11.0 + s.get_value(), fn(xv, yv, zv))
xv = np.ones((2, 2), dtype=config.floatX)
yv = np.ones((2, 2), dtype=config.floatX) * 3
zv = np.ones((2, 2), dtype=config.floatX) * 5
assert np.allclose(11.0 + s.get_value(), fn(xv, yv, zv))
# grad again the shared variable
f = op(x, y, z)
f = f - T.grad(T.sum(f), s)
fn = function([x, y, z], f)
assert numpy.allclose(15.0 + s.get_value(),
fn(xv, yv, zv))
assert np.allclose(15.0 + s.get_value(),
fn(xv, yv, zv))
def test_connection_pattern(self):
# Basic case
......@@ -163,6 +163,6 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
p = T.matrix('p')
self._compile_and_check([q, p],
op_graph(q, p),
[numpy.ones([3, 4], dtype=config.floatX),
numpy.ones([3, 4], dtype=config.floatX)],
[np.ones([3, 4], dtype=config.floatX),
np.ones([3, 4], dtype=config.floatX)],
OpFromGraph)
......@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
import unittest
import numpy
import numpy as np
from theano import config
from theano import gof
......@@ -316,7 +316,7 @@ def test_just_c_code():
x = theano.tensor.dvector()
f = theano.function([x], wb2(x),
mode=debugmode.DebugMode(check_py_code=False))
assert numpy.all(f([1, 2]) == [2, 4])
assert np.all(f([1, 2]) == [2, 4])
def test_baddestroymap():
......@@ -349,7 +349,7 @@ def test_baddestroymap_c():
f = theano.function([x], wb2i(x),
mode=debugmode.DebugMode(check_py_code=False))
try:
assert numpy.all(f([1, 2]) == [2, 4])
assert np.all(f([1, 2]) == [2, 4])
assert False # failed to raise error
except debugmode.BadDestroyMap:
pass
......@@ -445,8 +445,8 @@ class Test_ViewMap(unittest.TestCase):
r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])
assert numpy.all(r0 == [1, 2, 3, 4])
assert numpy.all(r1 == [2, 3, 4])
assert np.all(r0 == [1, 2, 3, 4])
assert np.all(r1 == [2, 3, 4])
def test_aliased_outputs_ok_output(self):
# here aliased outputs is ok because they are both outputs of the
......@@ -470,8 +470,8 @@ class Test_ViewMap(unittest.TestCase):
r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])
assert numpy.all(r0 == [2, 4, 6, 8])
assert numpy.all(r1 == [4, 6, 8])
assert np.all(r0 == [2, 4, 6, 8])
assert np.all(r1 == [4, 6, 8])
def test_aliased_outputs_ok_shadow(self):
# here the alias between outputs is ok because one of them is not used
......@@ -496,7 +496,7 @@ class Test_ViewMap(unittest.TestCase):
r0 = f([1, 2, 3, 4], [5, 6, 7, 8])
assert numpy.all(r0 == [2, 4, 6, 8])
assert np.all(r0 == [2, 4, 6, 8])
def test_aliased_outputs_bad(self):
# here the alias between outputs is not ok because destroying one
......@@ -555,31 +555,31 @@ class Test_check_isfinite(unittest.TestCase):
g = theano.function([x], theano.tensor.log(x), mode='DEBUG_MODE')
# this should work
f(numpy.log([3, 4, 5]).astype(config.floatX))
f(np.log([3, 4, 5]).astype(config.floatX))
# if TensorType.filter_checks_isfinite were true, these would raise
# ValueError
# if not, DebugMode will check internally, and raise InvalidValueError
# passing an invalid value as an input should trigger ValueError
self.assertRaises(debugmode.InvalidValueError, f,
numpy.log([3, -4, 5]).astype(config.floatX))
np.log([3, -4, 5]).astype(config.floatX))
self.assertRaises(debugmode.InvalidValueError, f,
(numpy.asarray([0, 1.0, 0]) / 0).astype(config.floatX))
(np.asarray([0, 1.0, 0]) / 0).astype(config.floatX))
self.assertRaises(debugmode.InvalidValueError, f,
(numpy.asarray([1.0, 1.0, 1.0]) / 0).astype(config.floatX))
(np.asarray([1.0, 1.0, 1.0]) / 0).astype(config.floatX))
# generating an invalid value internally should trigger
# InvalidValueError
self.assertRaises(debugmode.InvalidValueError, g,
numpy.asarray([3, -4, 5], dtype=config.floatX))
np.asarray([3, -4, 5], dtype=config.floatX))
# this should disable the exception
theano.tensor.TensorType.filter_checks_isfinite = False
theano.compile.mode.predefined_modes[
'DEBUG_MODE'].check_isfinite = False
# insert several Inf
f(numpy.asarray(numpy.asarray([1.0, 1.0, 1.0]) / 0,
dtype=config.floatX))
f(np.asarray(np.asarray([1.0, 1.0, 1.0]) / 0,
dtype=config.floatX))
def test_check_isfinite_disabled(self):
x = theano.tensor.dvector()
......@@ -587,10 +587,10 @@ class Test_check_isfinite(unittest.TestCase):
mode=debugmode.DebugMode(check_isfinite=False))
# nan should go through
f(numpy.log([3, -4, 5]))
f(np.log([3, -4, 5]))
# inf should go through
infs = numpy.asarray([1.0, 1., 1.]) / 0
infs = np.asarray([1.0, 1., 1.]) / 0
# print infs
f(infs)
return
......@@ -721,14 +721,14 @@ class VecAsRowAndCol(gof.Op):
class Test_preallocated_output(unittest.TestCase):
def setUp(self):
self.rng = numpy.random.RandomState(seed=utt.fetch_seed())
self.rng = np.random.RandomState(seed=utt.fetch_seed())
def test_f_contiguous(self):
a = theano.tensor.fmatrix('a')
b = theano.tensor.fmatrix('b')
z = BrokenCImplementationAdd()(a, b)
# In this test, we do not want z to be an output of the graph.
out = theano.tensor.dot(z, numpy.eye(7))
out = theano.tensor.dot(z, np.eye(7))
a_val = self.rng.randn(7, 7).astype('float32')
b_val = self.rng.randn(7, 7).astype('float32')
......
......@@ -5,7 +5,7 @@ import shutil
import tempfile
import unittest
import numpy
import numpy as np
import theano
from theano.compile.io import In
......@@ -27,7 +27,7 @@ def test_function_dump():
fct2 = theano.function(**l)
x = [1, 2, 3]
assert numpy.allclose(fct1(x), fct2(x))
assert np.allclose(fct1(x), fct2(x))
class TestFunctionIn(unittest.TestCase):
......@@ -40,14 +40,14 @@ class TestFunctionIn(unittest.TestCase):
f = theano.function([In(a, strict=False)], out)
# works, rand generates float64 by default
f(numpy.random.rand(8))
f(np.random.rand(8))
# works, casting is allowed
f(numpy.array([1, 2, 3, 4], dtype='int32'))
f(np.array([1, 2, 3, 4], dtype='int32'))
f = theano.function([In(a, strict=True)], out)
try:
# fails, f expects float64
f(numpy.array([1, 2, 3, 4], dtype='int32'))
f(np.array([1, 2, 3, 4], dtype='int32'))
except TypeError:
pass
......@@ -70,17 +70,17 @@ class TestFunctionIn(unittest.TestCase):
# using mutable=True will let f change the value in aval
f = theano.function([In(a, mutable=True)], a_out, mode='FAST_RUN')
aval = numpy.random.rand(10)
aval = np.random.rand(10)
aval2 = aval.copy()
assert numpy.all(f(aval) == (aval2 * 2))
assert not numpy.all(aval == aval2)
assert np.all(f(aval) == (aval2 * 2))
assert not np.all(aval == aval2)
# using mutable=False should leave the input untouched
f = theano.function([In(a, mutable=False)], a_out, mode='FAST_RUN')
aval = numpy.random.rand(10)
aval = np.random.rand(10)
aval2 = aval.copy()
assert numpy.all(f(aval) == (aval2 * 2))
assert numpy.all(aval == aval2)
assert np.all(f(aval) == (aval2 * 2))
assert np.all(aval == aval2)
def test_in_update(self):
a = theano.tensor.dscalar('a')
......@@ -115,7 +115,7 @@ class TestFunctionIn(unittest.TestCase):
# changes occur at the same time and one doesn't overwrite the other.
for i in range(5):
f()
assert numpy.allclose(shared_var.get_value(), i % 2)
assert np.allclose(shared_var.get_value(), i % 2)
def test_in_allow_downcast_int(self):
a = theano.tensor.wvector('a') # int16
......@@ -128,16 +128,16 @@ class TestFunctionIn(unittest.TestCase):
# Both values are in range. Since they're not ndarrays (but lists),
# they will be converted, and their value checked.
assert numpy.all(f([3], [6], 1) == 10)
assert np.all(f([3], [6], 1) == 10)
# Values are in range, but a dtype too large has explicitly been given
# For performance reasons, no check of the data is explicitly performed
# (It might be OK to change this in the future.)
self.assertRaises(TypeError, f, [3], numpy.array([6], dtype='int16'),
self.assertRaises(TypeError, f, [3], np.array([6], dtype='int16'),
1)
# Value too big for a, silently ignored
assert numpy.all(f([2 ** 20], numpy.ones(1, dtype='int8'), 1) == 2)
assert np.all(f([2 ** 20], np.ones(1, dtype='int8'), 1) == 2)
# Value too big for b, raises TypeError
self.assertRaises(TypeError, f, [3], [312], 1)
......@@ -156,17 +156,17 @@ class TestFunctionIn(unittest.TestCase):
(a + b + c))
# If the values can be accurately represented, everything is OK
assert numpy.all(f(0, 0, 0) == 0)
assert np.all(f(0, 0, 0) == 0)
# If allow_downcast is True, idem
assert numpy.allclose(f(0.1, 0, 0), 0.1)
assert np.allclose(f(0.1, 0, 0), 0.1)
# If allow_downcast is False, nope
self.assertRaises(TypeError, f, 0, 0.1, 0)
# If allow_downcast is None, it should work iff floatX=float32
if theano.config.floatX == 'float32':
assert numpy.allclose(f(0, 0, 0.1), 0.1)
assert np.allclose(f(0, 0, 0.1), 0.1)
else:
self.assertRaises(TypeError, f, 0, 0, 0.1)
......@@ -182,10 +182,10 @@ class TestFunctionIn(unittest.TestCase):
# If the values can be accurately represented, everything is OK
z = [0]
assert numpy.all(f(z, z, z) == 0)
assert np.all(f(z, z, z) == 0)
# If allow_downcast is True, idem
assert numpy.allclose(f([0.1], z, z), 0.1)
assert np.allclose(f([0.1], z, z), 0.1)
# If allow_downcast is False, nope
self.assertRaises(TypeError, f, z, [0.1], z)
......
from __future__ import absolute_import, print_function, division
import copy
import six.moves.cPickle as pickle
import numpy
import numpy as np
import unittest
......@@ -18,8 +18,6 @@ from theano import tensor
from theano import tensor as T
import theano
import numpy as N
def PatternOptimizer(p1, p2, ign=True):
return gof.OpKeyOptimizer(gof.PatternSub(p1, p2), ignore_newtrees=ign)
......@@ -281,7 +279,7 @@ class T_function(unittest.TestCase):
def test_swap_SharedVariable(self):
i = T.iscalar()
x_list = theano.shared(value=numpy.random.rand(10).astype(config.floatX))
x_list = theano.shared(value=np.random.rand(10).astype(config.floatX))
x = T.scalar('x')
# SharedVariable for tests, one of them has update
......@@ -343,11 +341,11 @@ class T_function(unittest.TestCase):
A special testcase for logistic_sgd.py in Deep Learning Tutorial
This test assert that SharedVariable in different function have same storage
"""
train_x = theano.shared(value=numpy.random.rand(10, 10).astype(config.floatX))
test_x = theano.shared(value=numpy.random.rand(10, 10).astype(config.floatX))
train_x = theano.shared(value=np.random.rand(10, 10).astype(config.floatX))
test_x = theano.shared(value=np.random.rand(10, 10).astype(config.floatX))
train_y = theano.shared(value=numpy.random.rand(10, 1).astype(config.floatX))
test_y = theano.shared(value=numpy.random.rand(10, 1).astype(config.floatX))
train_y = theano.shared(value=np.random.rand(10, 1).astype(config.floatX))
test_y = theano.shared(value=np.random.rand(10, 1).astype(config.floatX))
i = T.iscalar('index')
x = T.vector('x')
......@@ -500,42 +498,42 @@ class T_function(unittest.TestCase):
when borrow=True is implemented.
"""
a = T.dmatrix()
aval = numpy.random.rand(3, 3)
aval = np.random.rand(3, 3)
# when borrow=False, test that a destroy map cannot alias output to input
f = theano.function([In(a, borrow=False)], Out(a + 1, borrow=True))
assert numpy.all(f(aval) == aval + 1)
assert not numpy.may_share_memory(aval, f(aval))
assert np.all(f(aval) == aval + 1)
assert not np.may_share_memory(aval, f(aval))
# when borrow=False, test that a viewmap cannot alias output to input
f = theano.function([In(a, borrow=False)], Out(a[0, :], borrow=True))
assert numpy.all(f(aval) == aval[0, :])
assert not numpy.may_share_memory(aval, f(aval))
assert np.all(f(aval) == aval[0, :])
assert not np.may_share_memory(aval, f(aval))
def test_borrow_output(self):
a = T.dmatrix()
f = function([a], Out(a, borrow=False))
o = N.ones((3, 3))
o = np.ones((3, 3))
assert o is not f(o) # function no longer permits aliasing outputs to inputs
f = function([a], Out(a * 4, borrow=False))
o = N.ones((3, 3))
o = np.ones((3, 3))
four = f(o)
assert numpy.all(four == 4)
assert np.all(four == 4)
f(o + .1) # should not clobber the memory used to store four
assert numpy.all(four == 4)
assert np.all(four == 4)
f = function([a], Out(a * 4, borrow=True), mode=theano.Mode('c|py_nogc', 'fast_run'))
o = N.ones((3, 3))
o = np.ones((3, 3))
four = f(o)
assert numpy.all(four == 4)
assert np.all(four == 4)
f(o + .1) # should clobber the memory used to store four
if theano.config.cxx:
assert not numpy.all(four == 4)
assert not np.all(four == 4)
else:
# The Elemwise.perform method don't reuse memory
# as some numpy version don't support that correctly.
assert numpy.all(four == 4)
assert np.all(four == 4)
def test_disconnected_input(self):
a = T.scalar('a')
......@@ -767,7 +765,7 @@ class T_picklefunction(unittest.TestCase):
assert f2.container[s].storage is f1.container[s].storage
# now put in a function with non-scalar
v_value = numpy.asarray([2, 3, 4.], dtype=config.floatX)
v_value = np.asarray([2, 3, 4.], dtype=config.floatX)
f3 = function([x, In(v, value=v_value)], x + v)
list_of_things.append(f3)
......@@ -814,13 +812,13 @@ class T_picklefunction(unittest.TestCase):
assert nl[5](3) == ol[5](3)
assert nl[4].value[nl[0]] == 6
assert numpy.all(nl[6][nl[2]] == numpy.asarray([2, 3., 4]))
assert np.all(nl[6][nl[2]] == np.asarray([2, 3., 4]))
def test_broken_pickle_with_shared(self):
saves = []
def pers_save(obj):
if isinstance(obj, numpy.ndarray):
if isinstance(obj, np.ndarray):
saves.append(obj)
return len(saves) - 1
else:
......@@ -829,7 +827,7 @@ class T_picklefunction(unittest.TestCase):
def pers_load(id):
return saves[id]
b = numpy.random.rand(5, 4)
b = np.random.rand(5, 4)
x = theano.tensor.matrix()
y = theano.shared(b)
......
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import unittest
from theano.compile.pfunc import pfunc
......@@ -20,8 +20,8 @@ class NNet(object):
self.input = input
self.target = target
self.lr = shared(lr, 'learning_rate')
self.w1 = shared(numpy.zeros((n_hidden, n_input)), 'w1')
self.w2 = shared(numpy.zeros((n_output, n_hidden)), 'w2')
self.w1 = shared(np.zeros((n_hidden, n_input)), 'w1')
self.w2 = shared(np.zeros((n_output, n_hidden)), 'w2')
# print self.lr.type
self.hidden = sigmoid(tensor.dot(self.w1, self.input))
......@@ -45,7 +45,7 @@ class NNet(object):
class TestNnet(unittest.TestCase):
def test_nnet(self):
rng = numpy.random.RandomState(1827)
rng = np.random.RandomState(1827)
data = rng.rand(10, 4)
nnet = NNet(n_input=3, n_hidden=10)
for epoch in range(3):
......@@ -60,4 +60,4 @@ class TestNnet(unittest.TestCase):
self.assertTrue(abs(mean_cost - 0.20588975452) < 1e-6)
# Just call functions to make sure they do not crash.
nnet.compute_output(input)
nnet.output_from_hidden(numpy.ones(10))
nnet.output_from_hidden(np.ones(10))
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import theano
......@@ -12,7 +12,7 @@ def test_detect_nan():
def detect_nan(i, node, fn):
for output in fn.outputs:
if numpy.isnan(output[0]).any():
if np.isnan(output[0]).any():
print('*** NaN detected ***')
theano.printing.debugprint(node)
print('Inputs : %s' % [input[0] for input in fn.inputs])
......@@ -36,7 +36,7 @@ def test_optimizer():
def detect_nan(i, node, fn):
for output in fn.outputs:
if numpy.isnan(output[0]).any():
if np.isnan(output[0]).any():
print('*** NaN detected ***')
theano.printing.debugprint(node)
print('Inputs : %s' % [input[0] for input in fn.inputs])
......@@ -65,7 +65,7 @@ def test_not_inplace():
def detect_nan(i, node, fn):
for output in fn.outputs:
if numpy.isnan(output[0]).any():
if np.isnan(output[0]).any():
print('*** NaN detected ***')
theano.printing.debugprint(node)
print('Inputs : %s' % [input[0] for input in fn.inputs])
......
......@@ -6,7 +6,7 @@ from __future__ import absolute_import, print_function, division
import logging
from nose.tools import assert_raises
import numpy
import numpy as np
from theano.compile.nanguardmode import NanGuardMode
import theano
......@@ -18,20 +18,20 @@ def test_NanGuardMode():
# intentionally. A working implementation should be able to capture all
# the abnormalties.
x = T.matrix()
w = theano.shared(numpy.random.randn(5, 7).astype(theano.config.floatX))
w = theano.shared(np.random.randn(5, 7).astype(theano.config.floatX))
y = T.dot(x, w)
fun = theano.function(
[x], y,
mode=NanGuardMode(nan_is_error=True, inf_is_error=True)
)
a = numpy.random.randn(3, 5).astype(theano.config.floatX)
infa = numpy.tile(
(numpy.asarray(100.) ** 1000000).astype(theano.config.floatX), (3, 5))
nana = numpy.tile(
numpy.asarray(numpy.nan).astype(theano.config.floatX), (3, 5))
biga = numpy.tile(
numpy.asarray(1e20).astype(theano.config.floatX), (3, 5))
a = np.random.randn(3, 5).astype(theano.config.floatX)
infa = np.tile(
(np.asarray(100.) ** 1000000).astype(theano.config.floatX), (3, 5))
nana = np.tile(
np.asarray(np.nan).astype(theano.config.floatX), (3, 5))
biga = np.tile(
np.asarray(1e20).astype(theano.config.floatX), (3, 5))
fun(a) # normal values
......@@ -46,14 +46,14 @@ def test_NanGuardMode():
_logger.propagate = True
# slices
a = numpy.random.randn(3, 4, 5).astype(theano.config.floatX)
infa = numpy.tile(
(numpy.asarray(100.) ** 1000000).astype(theano.config.floatX),
a = np.random.randn(3, 4, 5).astype(theano.config.floatX)
infa = np.tile(
(np.asarray(100.) ** 1000000).astype(theano.config.floatX),
(3, 4, 5))
nana = numpy.tile(
numpy.asarray(numpy.nan).astype(theano.config.floatX), (3, 4, 5))
biga = numpy.tile(
numpy.asarray(1e20).astype(theano.config.floatX), (3, 4, 5))
nana = np.tile(
np.asarray(np.nan).astype(theano.config.floatX), (3, 4, 5))
biga = np.tile(
np.asarray(1e20).astype(theano.config.floatX), (3, 4, 5))
x = T.tensor3()
y = x[:, T.arange(2), T.arange(2)]
......
......@@ -9,7 +9,6 @@ from theano.tests import unittest_tools as utt
from theano import function
import theano
from theano.tensor import dmatrix, dvector
from numpy import allclose
from theano.compile import as_op
import pickle
......@@ -34,7 +33,7 @@ class OpDecoratorTests(utt.InferShapeTester):
r = fn([[1.5, 5], [2, 2]])
r0 = np.array([1.5, 7.5, 15., 30.])
assert allclose(r, r0), (r, r0)
assert np.allclose(r, r0), (r, r0)
def test_2arg(self):
x = dmatrix('x')
......@@ -50,7 +49,7 @@ class OpDecoratorTests(utt.InferShapeTester):
r = fn([[1.5, 5], [2, 2]], [1, 100, 2, 200])
r0 = np.array([2.5, 107.5, 17., 230.])
assert allclose(r, r0), (r, r0)
assert np.allclose(r, r0), (r, r0)
def test_infer_shape(self):
x = dmatrix('x')
......
......@@ -6,7 +6,7 @@ from __future__ import absolute_import, print_function, division
import unittest
import numpy
import numpy as np
import theano
from six.moves import StringIO
......@@ -45,7 +45,7 @@ class Test_profiling(unittest.TestCase):
f = theano.function(x, z, profile=p, name="test_profiling",
mode=m)
inp = [numpy.arange(1024, dtype='float32') + 1 for i in range(len(x))]
inp = [np.arange(1024, dtype='float32') + 1 for i in range(len(x))]
f(*inp)
buf = StringIO()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论