提交 dded8355 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #5752 from Amrithasuresh/master

Updated numpy as np #4218 #5640
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import pdb
import theano
......@@ -102,7 +102,7 @@ class PdbBreakpoint(Op):
if condition:
try:
monitored = [numpy.asarray(inp) for inp in inputs[1:]]
monitored = [np.asarray(inp) for inp in inputs[1:]]
except:
raise ValueError("Some of the inputs to the PdbBreakpoint op "
"'%s' could not be casted to NumPy arrays" %
......
from __future__ import absolute_import, print_function, division
import unittest
import numpy
import numpy as np
import numpy.random
import theano
......@@ -28,7 +28,7 @@ class T_scipy(unittest.TestCase):
a = theano.tensor.vector('a') # declare variable
b = a + a**10 # build expression
f = theano.function([a], b) # compile function
assert numpy.all(f([0, 1, 2]) == numpy.array([0, 2, 1026]))
assert np.all(f([0, 1, 2]) == np.array([0, 2, 1026]))
def test_scipy_paper_example2(self):
''' This just sees if things compile well and if they run '''
......@@ -45,7 +45,7 @@ class T_scipy(unittest.TestCase):
x = T.matrix()
y = T.vector()
w = shared(rng.randn(100))
b = shared(numpy.zeros(()))
b = shared(np.zeros(()))
# Construct Theano expression graph
p_1 = 1 / (1 + T.exp(-T.dot(x, w) - b))
......
......@@ -6,7 +6,7 @@ from six.moves import xrange
from theano.tests import unittest_tools as utt
import theano
from theano import tensor
import numpy
import numpy as np
utt.seed_rng()
......@@ -14,27 +14,27 @@ utt.seed_rng()
def test001_jacobian_vector():
x = tensor.vector()
y = x * 2
rng = numpy.random.RandomState(seed=utt.fetch_seed())
rng = np.random.RandomState(seed=utt.fetch_seed())
# test when the jacobian is called with a tensor as wrt
Jx = tensor.jacobian(y, x)
f = theano.function([x], Jx)
vx = rng.uniform(size=(10,)).astype(theano.config.floatX)
assert numpy.allclose(f(vx), numpy.eye(10) * 2)
assert np.allclose(f(vx), np.eye(10) * 2)
# test when the jacobian is called with a tuple as wrt
Jx = tensor.jacobian(y, (x,))
assert isinstance(Jx, tuple)
f = theano.function([x], Jx[0])
vx = rng.uniform(size=(10,)).astype(theano.config.floatX)
assert numpy.allclose(f(vx), numpy.eye(10) * 2)
assert np.allclose(f(vx), np.eye(10) * 2)
# test when the jacobian is called with a list as wrt
Jx = tensor.jacobian(y, [x])
assert isinstance(Jx, list)
f = theano.function([x], Jx[0])
vx = rng.uniform(size=(10,)).astype(theano.config.floatX)
assert numpy.allclose(f(vx), numpy.eye(10) * 2)
assert np.allclose(f(vx), np.eye(10) * 2)
# test when the jacobian is called with a list of two elements
z = tensor.vector()
......@@ -44,19 +44,19 @@ def test001_jacobian_vector():
vx = rng.uniform(size=(10,)).astype(theano.config.floatX)
vz = rng.uniform(size=(10,)).astype(theano.config.floatX)
vJs = f(vx, vz)
evx = numpy.zeros((10, 10))
evz = numpy.zeros((10, 10))
numpy.fill_diagonal(evx, vx)
numpy.fill_diagonal(evz, vz)
assert numpy.allclose(vJs[0], evz)
assert numpy.allclose(vJs[1], evx)
evx = np.zeros((10, 10))
evz = np.zeros((10, 10))
np.fill_diagonal(evx, vx)
np.fill_diagonal(evz, vz)
assert np.allclose(vJs[0], evz)
assert np.allclose(vJs[1], evx)
def test002_jacobian_matrix():
x = tensor.matrix()
y = 2 * x.sum(axis=0)
rng = numpy.random.RandomState(seed=utt.fetch_seed())
ev = numpy.zeros((10, 10, 10))
rng = np.random.RandomState(seed=utt.fetch_seed())
ev = np.zeros((10, 10, 10))
for dx in xrange(10):
ev[dx, :, dx] = 2.
......@@ -64,21 +64,21 @@ def test002_jacobian_matrix():
Jx = tensor.jacobian(y, x)
f = theano.function([x], Jx)
vx = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
assert numpy.allclose(f(vx), ev)
assert np.allclose(f(vx), ev)
# test when the jacobian is called with a tuple as wrt
Jx = tensor.jacobian(y, (x,))
assert isinstance(Jx, tuple)
f = theano.function([x], Jx[0])
vx = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
assert numpy.allclose(f(vx), ev)
assert np.allclose(f(vx), ev)
# test when the jacobian is called with a list as wrt
Jx = tensor.jacobian(y, [x])
assert isinstance(Jx, list)
f = theano.function([x], Jx[0])
vx = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
assert numpy.allclose(f(vx), ev)
assert np.allclose(f(vx), ev)
# test when the jacobian is called with a list of two elements
z = tensor.matrix()
......@@ -88,51 +88,51 @@ def test002_jacobian_matrix():
vx = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
vz = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
vJs = f(vx, vz)
evx = numpy.zeros((10, 10, 10))
evz = numpy.zeros((10, 10, 10))
evx = np.zeros((10, 10, 10))
evz = np.zeros((10, 10, 10))
for dx in xrange(10):
evx[dx, dx, :] = vx[dx, :]
evz[dx, dx, :] = vz[dx, :]
assert numpy.allclose(vJs[0], evz)
assert numpy.allclose(vJs[1], evx)
assert np.allclose(vJs[0], evz)
assert np.allclose(vJs[1], evx)
def test003_jacobian_scalar():
x = tensor.scalar()
y = x * 2
rng = numpy.random.RandomState(seed=utt.fetch_seed())
rng = np.random.RandomState(seed=utt.fetch_seed())
# test when the jacobian is called with a tensor as wrt
Jx = tensor.jacobian(y, x)
f = theano.function([x], Jx)
vx = numpy.cast[theano.config.floatX](rng.uniform())
assert numpy.allclose(f(vx), 2)
vx = np.cast[theano.config.floatX](rng.uniform())
assert np.allclose(f(vx), 2)
# test when the jacobian is called with a tuple as wrt
Jx = tensor.jacobian(y, (x,))
assert isinstance(Jx, tuple)
f = theano.function([x], Jx[0])
vx = numpy.cast[theano.config.floatX](rng.uniform())
assert numpy.allclose(f(vx), 2)
vx = np.cast[theano.config.floatX](rng.uniform())
assert np.allclose(f(vx), 2)
# test when the jacobian is called with a list as wrt
Jx = tensor.jacobian(y, [x])
assert isinstance(Jx, list)
f = theano.function([x], Jx[0])
vx = numpy.cast[theano.config.floatX](rng.uniform())
assert numpy.allclose(f(vx), 2)
vx = np.cast[theano.config.floatX](rng.uniform())
assert np.allclose(f(vx), 2)
# test when the jacobian is called with a list of two elements
z = tensor.scalar()
y = x * z
Jx = tensor.jacobian(y, [x, z])
f = theano.function([x, z], Jx)
vx = numpy.cast[theano.config.floatX](rng.uniform())
vz = numpy.cast[theano.config.floatX](rng.uniform())
vx = np.cast[theano.config.floatX](rng.uniform())
vz = np.cast[theano.config.floatX](rng.uniform())
vJx = f(vx, vz)
assert numpy.allclose(vJx[0], vz)
assert numpy.allclose(vJx[1], vx)
assert np.allclose(vJx[0], vz)
assert np.allclose(vJx[1], vx)
def test004_hessian():
......@@ -140,8 +140,8 @@ def test004_hessian():
y = tensor.sum(x ** 2)
Hx = tensor.hessian(y, x)
f = theano.function([x], Hx)
vx = numpy.arange(10).astype(theano.config.floatX)
assert numpy.allclose(f(vx), numpy.eye(10) * 2)
vx = np.arange(10).astype(theano.config.floatX)
assert np.allclose(f(vx), np.eye(10) * 2)
def test_jacobian_disconnected_inputs():
......@@ -152,12 +152,12 @@ def test_jacobian_disconnected_inputs():
v2 = tensor.vector()
jacobian_v = theano.gradient.jacobian(1 + v1, v2, disconnected_inputs='ignore')
func_v = theano.function([v1, v2], jacobian_v)
val = numpy.arange(4.0).astype(theano.config.floatX)
assert numpy.allclose(func_v(val, val), numpy.zeros((4, 4)))
val = np.arange(4.0).astype(theano.config.floatX)
assert np.allclose(func_v(val, val), np.zeros((4, 4)))
s1 = tensor.scalar()
s2 = tensor.scalar()
jacobian_s = theano.gradient.jacobian(1 + s1, s2, disconnected_inputs='ignore')
func_s = theano.function([s2], jacobian_s)
val = numpy.array(1.0).astype(theano.config.floatX)
assert numpy.allclose(func_s(val), numpy.zeros(1))
val = np.array(1.0).astype(theano.config.floatX)
assert np.allclose(func_s(val), np.zeros(1))
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import theano
import theano.tensor as T
from theano.tests import unittest_tools as utt
......@@ -30,7 +30,7 @@ class TestPdbBreakpoint(utt.InferShapeTester):
def test_infer_shape(self):
input1_value = numpy.arange(6).reshape(2, 3).astype("float32")
input1_value = np.arange(6).reshape(2, 3).astype("float32")
input2_value = 10.0
self._compile_and_check([self.input1, self.input2],
......@@ -42,7 +42,7 @@ class TestPdbBreakpoint(utt.InferShapeTester):
def test_grad(self):
input1_value = numpy.arange(9).reshape(3, 3).astype("float32")
input1_value = np.arange(9).reshape(3, 3).astype("float32")
input2_value = 10.0
grads = [T.grad(self.monitored_input1.sum(), self.input1),
......@@ -56,22 +56,22 @@ class TestPdbBreakpoint(utt.InferShapeTester):
gradients = fct(input1_value, input2_value)[:-1]
expected_gradients = [numpy.ones((3, 3), dtype="float32"),
numpy.array(1., dtype="float32")]
expected_gradients = [np.ones((3, 3), dtype="float32"),
np.array(1., dtype="float32")]
for i in range(len(gradients)):
numpy.testing.assert_allclose(gradients[i], expected_gradients[i])
np.testing.assert_allclose(gradients[i], expected_gradients[i])
def test_fprop(self):
input1_value = numpy.arange(9).reshape(3, 3).astype("float32")
input1_value = np.arange(9).reshape(3, 3).astype("float32")
input2_value = 10.0
fct = theano.function([self.input1, self.input2],
[self.monitored_input1, self.monitored_input2])
output = fct(input1_value, input2_value)
numpy.testing.assert_allclose(output[0], input1_value)
numpy.testing.assert_allclose(output[1], input2_value)
np.testing.assert_allclose(output[0], input1_value)
np.testing.assert_allclose(output[1], input2_value)
def test_connection_pattern(self):
......
......@@ -14,7 +14,7 @@ The config option is in configdefaults.py
This note is written by Li Yao.
"""
from collections import OrderedDict
import numpy
import numpy as np
import six.moves.cPickle as pickle
import theano
import theano.tensor as T
......@@ -28,8 +28,8 @@ def test_pickle_unpickle_with_reoptimization():
mode = "FAST_RUN"
x1 = T.fmatrix('x1')
x2 = T.fmatrix('x2')
x3 = theano.shared(numpy.ones((10, 10), dtype=floatX))
x4 = theano.shared(numpy.ones((10, 10), dtype=floatX))
x3 = theano.shared(np.ones((10, 10), dtype=floatX))
x4 = theano.shared(np.ones((10, 10), dtype=floatX))
y = T.sum(T.sum(T.sum(x1 ** 2 + x2) + x3) + x4)
updates = OrderedDict()
......@@ -40,8 +40,8 @@ def test_pickle_unpickle_with_reoptimization():
# now pickle the compiled theano fn
string_pkl = pickle.dumps(f, -1)
in1 = numpy.ones((10, 10), dtype=floatX)
in2 = numpy.ones((10, 10), dtype=floatX)
in1 = np.ones((10, 10), dtype=floatX)
in2 = np.ones((10, 10), dtype=floatX)
# test unpickle with optimization
default = theano.config.reoptimize_unpickled_function
......@@ -60,8 +60,8 @@ def test_pickle_unpickle_without_reoptimization():
mode = "FAST_RUN"
x1 = T.fmatrix('x1')
x2 = T.fmatrix('x2')
x3 = theano.shared(numpy.ones((10, 10), dtype=floatX))
x4 = theano.shared(numpy.ones((10, 10), dtype=floatX))
x3 = theano.shared(np.ones((10, 10), dtype=floatX))
x4 = theano.shared(np.ones((10, 10), dtype=floatX))
y = T.sum(T.sum(T.sum(x1**2 + x2) + x3) + x4)
updates = OrderedDict()
......@@ -73,8 +73,8 @@ def test_pickle_unpickle_without_reoptimization():
string_pkl = pickle.dumps(f, -1)
# compute f value
in1 = numpy.ones((10, 10), dtype=floatX)
in2 = numpy.ones((10, 10), dtype=floatX)
in1 = np.ones((10, 10), dtype=floatX)
in2 = np.ones((10, 10), dtype=floatX)
# test unpickle without optimization
default = theano.config.reoptimize_unpickled_function
......
......@@ -5,7 +5,7 @@ from __future__ import absolute_import, print_function, division
import logging
from nose.plugins.skip import SkipTest
import numpy
import numpy as np
from six.moves import StringIO
......@@ -509,8 +509,8 @@ def test_scan_debugprint4():
def fn(a_m2, a_m1, b_m2, b_m1):
return a_m1 + a_m2, b_m1 + b_m2
a0 = theano.shared(numpy.arange(2, dtype='int64'))
b0 = theano.shared(numpy.arange(2, dtype='int64'))
a0 = theano.shared(np.arange(2, dtype='int64'))
b0 = theano.shared(np.arange(2, dtype='int64'))
(a, b), _ = theano.scan(
fn, outputs_info=[{'initial': a0, 'taps': [-2, -1]},
......
......@@ -18,7 +18,7 @@ from theano import function
import theano
from theano import tensor
import itertools
import numpy
import numpy as np
from theano.gof import Op, Apply
from theano.gradient import grad_undefined
from theano.tests.unittest_tools import SkipTest
......@@ -64,7 +64,7 @@ class RopLop_checker(unittest.TestCase):
# computations using scan
self.x = tensor.vector('x')
self.v = tensor.vector('v')
self.rng = numpy.random.RandomState(utt.fetch_seed())
self.rng = np.random.RandomState(utt.fetch_seed())
self.in_shape = (5 + self.rng.randint(3),)
self.mx = tensor.matrix('mx')
self.mv = tensor.matrix('mv')
......@@ -103,10 +103,10 @@ class RopLop_checker(unittest.TestCase):
If you want to test an Op with an output matrix, add a sum
after the Op you want to test.
"""
vx = numpy.asarray(self.rng.uniform(size=self.mat_in_shape),
theano.config.floatX)
vv = numpy.asarray(self.rng.uniform(size=self.mat_in_shape),
theano.config.floatX)
vx = np.asarray(self.rng.uniform(size=self.mat_in_shape),
theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=self.mat_in_shape),
theano.config.floatX)
yv = tensor.Rop(y, self.mx, self.mv)
rop_f = function([self.mx, self.mv], yv, on_unused_input='ignore')
sy, _ = theano.scan(lambda i, y, x, v:
......@@ -118,11 +118,11 @@ class RopLop_checker(unittest.TestCase):
v1 = rop_f(vx, vv)
v2 = scan_f(vx, vv)
assert numpy.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
assert np.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
self.check_nondiff_rop(theano.clone(y, replace={self.mx: break_op(self.mx)}))
vv = numpy.asarray(self.rng.uniform(size=out_shape), theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=out_shape), theano.config.floatX)
yv = tensor.Lop(y, self.mx, self.v)
lop_f = function([self.mx, self.v], yv)
......@@ -131,7 +131,7 @@ class RopLop_checker(unittest.TestCase):
v1 = lop_f(vx, vv)
v2 = scan_f(vx, vv)
assert numpy.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
assert np.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
def check_rop_lop(self, y, out_shape):
"""
......@@ -140,10 +140,10 @@ class RopLop_checker(unittest.TestCase):
"""
# TEST ROP
vx = numpy.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vv = numpy.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vx = np.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
yv = tensor.Rop(y, self.x, self.v)
rop_f = function([self.x, self.v], yv, on_unused_input='ignore')
......@@ -156,7 +156,7 @@ class RopLop_checker(unittest.TestCase):
v1 = rop_f(vx, vv)
v2 = scan_f(vx, vv)
assert numpy.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
assert np.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
known_fail = False
try:
self.check_nondiff_rop(theano.clone(y, replace={self.x: break_op(self.x)}))
......@@ -165,10 +165,10 @@ class RopLop_checker(unittest.TestCase):
# TEST LOP
vx = numpy.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vv = numpy.asarray(self.rng.uniform(size=out_shape),
theano.config.floatX)
vx = np.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=out_shape),
theano.config.floatX)
yv = tensor.Lop(y, self.x, self.v)
lop_f = function([self.x, self.v], yv, on_unused_input='ignore')
......@@ -181,7 +181,7 @@ class RopLop_checker(unittest.TestCase):
v1 = lop_f(vx, vv)
v2 = scan_f(vx, vv)
assert numpy.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
assert np.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
if known_fail:
raise SkipTest('Rop does not handle non-differentiable inputs '
......@@ -213,22 +213,22 @@ class test_RopLop(RopLop_checker):
self.check_rop_lop(self.x[:4], (4,))
def test_incsubtensor1(self):
tv = numpy.asarray(self.rng.uniform(size=(3,)),
theano.config.floatX)
tv = np.asarray(self.rng.uniform(size=(3,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.inc_subtensor(self.x[:3], t)
self.check_rop_lop(out, self.in_shape)
def test_incsubtensor2(self):
tv = numpy.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
tv = np.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.inc_subtensor(t[:4], self.x[:4])
self.check_rop_lop(out, (10,))
def test_setsubtensor1(self):
tv = numpy.asarray(self.rng.uniform(size=(3,)),
theano.config.floatX)
tv = np.asarray(self.rng.uniform(size=(3,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.set_subtensor(self.x[:3], t)
self.check_rop_lop(out, self.in_shape)
......@@ -238,8 +238,8 @@ class test_RopLop(RopLop_checker):
self.check_rop_lop(out, self.in_shape)
def test_setsubtensor2(self):
tv = numpy.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
tv = np.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.set_subtensor(t[:4], self.x[:4])
self.check_rop_lop(out, (10,))
......@@ -258,7 +258,7 @@ class test_RopLop(RopLop_checker):
(1,))
def test_downsample(self):
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
# ws, shp
examples = (
((2,), (16,)),
......@@ -296,7 +296,7 @@ class test_RopLop(RopLop_checker):
scan_f = function([], sy, on_unused_input='ignore', mode=mode)
v1 = rop_f()
v2 = scan_f()
assert numpy.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))
assert np.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))
def test_conv(self):
for conv_op in [conv.conv2d, conv2d]:
......@@ -337,25 +337,25 @@ class test_RopLop(RopLop_checker):
scan_f = function([input, filters, ev_input, ev_filters], sy,
on_unused_input='ignore', mode=mode)
dtype = theano.config.floatX
image_data = numpy.random.random(image_shape).astype(dtype)
filter_data = numpy.random.random(filter_shape).astype(dtype)
ev_image_data = numpy.random.random(image_shape).astype(dtype)
ev_filter_data = numpy.random.random(filter_shape).astype(dtype)
image_data = np.random.random(image_shape).astype(dtype)
filter_data = np.random.random(filter_shape).astype(dtype)
ev_image_data = np.random.random(image_shape).astype(dtype)
ev_filter_data = np.random.random(filter_shape).astype(dtype)
v1 = rop_f(image_data, filter_data, ev_image_data, ev_filter_data)
v2 = scan_f(image_data, filter_data, ev_image_data, ev_filter_data)
assert numpy.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))
assert np.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))
def test_join(self):
tv = numpy.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
tv = np.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.join(0, self.x, t)
self.check_rop_lop(out, (self.in_shape[0] + 10,))
def test_dot(self):
insh = self.in_shape[0]
vW = numpy.asarray(self.rng.uniform(size=(insh, insh)),
theano.config.floatX)
vW = np.asarray(self.rng.uniform(size=(insh, insh)),
theano.config.floatX)
W = theano.shared(vW)
self.check_rop_lop(tensor.dot(self.x, W), self.in_shape)
......@@ -367,7 +367,7 @@ class test_RopLop(RopLop_checker):
self.in_shape)
def test_reshape(self):
new_shape = tensor.constant(numpy.asarray([
new_shape = tensor.constant(np.asarray([
self.mat_in_shape[0] * self.mat_in_shape[1]],
dtype='int64'))
......@@ -435,6 +435,6 @@ class test_RopLop(RopLop_checker):
# one differentiable path (i.e. there is no gradient wrt to one of
# the inputs).
x = tensor.arange(20.0).reshape([1, 20])
v = theano.shared(numpy.ones([20]))
v = theano.shared(np.ones([20]))
d = tensor.dot(x, v).sum()
tensor.Rop(tensor.grad(d, v), v, v)
......@@ -17,7 +17,7 @@ except ImportError:
def func(f):
return f
return func
import numpy
import numpy as np
import theano
import theano.tensor as T
......@@ -48,7 +48,7 @@ def fetch_seed(pseed=None):
None, which is equivalent to seeding with a random seed.
Useful for seeding RandomState objects.
>>> rng = numpy.random.RandomState(unittest_tools.fetch_seed())
>>> rng = np.random.RandomState(unittest_tools.fetch_seed())
"""
seed = pseed or config.unittests.rseed
......@@ -76,7 +76,7 @@ def seed_rng(pseed=None):
seed = fetch_seed(pseed)
if pseed and pseed != seed:
print('Warning: using seed given by config.unittests.rseed=%i' 'instead of seed %i given as parameter' % (seed, pseed), file=sys.stderr)
numpy.random.seed(seed)
np.random.seed(seed)
return seed
......@@ -87,7 +87,7 @@ def verify_grad(op, pt, n_tests=2, rng=None, *args, **kwargs):
"""
if rng is None:
seed_rng()
rng = numpy.random
rng = np.random
T.verify_grad(op, pt, n_tests, rng, *args, **kwargs)
#
......@@ -110,12 +110,12 @@ class MockRandomState:
self.val = val
def rand(self, *shape):
return numpy.zeros(shape, dtype='float64') + self.val
return np.zeros(shape, dtype='float64') + self.val
def randint(self, minval, maxval=None, size=1):
if maxval is None:
minval, maxval = 0, minval
out = numpy.zeros(size, dtype='int64')
out = np.zeros(size, dtype='int64')
if self.val == 0:
return out + minval
else:
......@@ -270,7 +270,7 @@ class InferShapeTester(unittest.TestCase):
numeric_outputs = outputs_function(*numeric_inputs)
numeric_shapes = shapes_function(*numeric_inputs)
for out, shape in zip(numeric_outputs, numeric_shapes):
assert numpy.all(out.shape == shape), (out.shape, shape)
assert np.all(out.shape == shape), (out.shape, shape)
def str_diagnostic(expected, value, rtol, atol):
......@@ -287,8 +287,8 @@ def str_diagnostic(expected, value, rtol, atol):
print(expected.strides, end=' ', file=ssio)
print(expected.min(), end=' ', file=ssio)
print(expected.max(), end=' ', file=ssio)
print(numpy.isinf(expected).sum(), end=' ', file=ssio)
print(numpy.isnan(expected).sum(), end=' ', file=ssio)
print(np.isinf(expected).sum(), end=' ', file=ssio)
print(np.isnan(expected).sum(), end=' ', file=ssio)
# only if all succeeds to we add anything to sio
print(ssio.getvalue(), file=sio)
except Exception:
......@@ -301,8 +301,8 @@ def str_diagnostic(expected, value, rtol, atol):
print(value.strides, end=' ', file=ssio)
print(value.min(), end=' ', file=ssio)
print(value.max(), end=' ', file=ssio)
print(numpy.isinf(value).sum(), end=' ', file=ssio)
print(numpy.isnan(value).sum(), end=' ', file=ssio)
print(np.isinf(value).sum(), end=' ', file=ssio)
print(np.isnan(value).sum(), end=' ', file=ssio)
# only if all succeeds to we add anything to sio
print(ssio.getvalue(), file=sio)
except Exception:
......@@ -312,19 +312,19 @@ def str_diagnostic(expected, value, rtol, atol):
print(" value :", value, file=sio)
try:
ov = numpy.asarray(expected)
nv = numpy.asarray(value)
ov = np.asarray(expected)
nv = np.asarray(value)
ssio = StringIO()
absdiff = numpy.absolute(nv - ov)
print(" Max Abs Diff: ", numpy.max(absdiff), file=ssio)
print(" Mean Abs Diff: ", numpy.mean(absdiff), file=ssio)
print(" Median Abs Diff: ", numpy.median(absdiff), file=ssio)
print(" Std Abs Diff: ", numpy.std(absdiff), file=ssio)
reldiff = numpy.absolute(nv - ov) / numpy.absolute(ov)
print(" Max Rel Diff: ", numpy.max(reldiff), file=ssio)
print(" Mean Rel Diff: ", numpy.mean(reldiff), file=ssio)
print(" Median Rel Diff: ", numpy.median(reldiff), file=ssio)
print(" Std Rel Diff: ", numpy.std(reldiff), file=ssio)
absdiff = np.absolute(nv - ov)
print(" Max Abs Diff: ", np.max(absdiff), file=ssio)
print(" Mean Abs Diff: ", np.mean(absdiff), file=ssio)
print(" Median Abs Diff: ", np.median(absdiff), file=ssio)
print(" Std Abs Diff: ", np.std(absdiff), file=ssio)
reldiff = np.absolute(nv - ov) / np.absolute(ov)
print(" Max Rel Diff: ", np.max(reldiff), file=ssio)
print(" Mean Rel Diff: ", np.mean(reldiff), file=ssio)
print(" Median Rel Diff: ", np.median(reldiff), file=ssio)
print(" Std Rel Diff: ", np.std(reldiff), file=ssio)
# only if all succeeds to we add anything to sio
print(ssio.getvalue(), file=sio)
except Exception:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论