提交 dded8355 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #5752 from Amrithasuresh/master

Updated numpy as np #4218 #5640
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import pdb
import theano
......@@ -102,7 +102,7 @@ class PdbBreakpoint(Op):
if condition:
try:
monitored = [numpy.asarray(inp) for inp in inputs[1:]]
monitored = [np.asarray(inp) for inp in inputs[1:]]
except:
raise ValueError("Some of the inputs to the PdbBreakpoint op "
"'%s' could not be casted to NumPy arrays" %
......
from __future__ import absolute_import, print_function, division
import unittest
import numpy
import numpy as np
import numpy.random
import theano
......@@ -28,7 +28,7 @@ class T_scipy(unittest.TestCase):
a = theano.tensor.vector('a') # declare variable
b = a + a**10 # build expression
f = theano.function([a], b) # compile function
assert numpy.all(f([0, 1, 2]) == numpy.array([0, 2, 1026]))
assert np.all(f([0, 1, 2]) == np.array([0, 2, 1026]))
def test_scipy_paper_example2(self):
''' This just sees if things compile well and if they run '''
......@@ -45,7 +45,7 @@ class T_scipy(unittest.TestCase):
x = T.matrix()
y = T.vector()
w = shared(rng.randn(100))
b = shared(numpy.zeros(()))
b = shared(np.zeros(()))
# Construct Theano expression graph
p_1 = 1 / (1 + T.exp(-T.dot(x, w) - b))
......
......@@ -6,7 +6,7 @@ from six.moves import xrange
from theano.tests import unittest_tools as utt
import theano
from theano import tensor
import numpy
import numpy as np
utt.seed_rng()
......@@ -14,27 +14,27 @@ utt.seed_rng()
def test001_jacobian_vector():
x = tensor.vector()
y = x * 2
rng = numpy.random.RandomState(seed=utt.fetch_seed())
rng = np.random.RandomState(seed=utt.fetch_seed())
# test when the jacobian is called with a tensor as wrt
Jx = tensor.jacobian(y, x)
f = theano.function([x], Jx)
vx = rng.uniform(size=(10,)).astype(theano.config.floatX)
assert numpy.allclose(f(vx), numpy.eye(10) * 2)
assert np.allclose(f(vx), np.eye(10) * 2)
# test when the jacobian is called with a tuple as wrt
Jx = tensor.jacobian(y, (x,))
assert isinstance(Jx, tuple)
f = theano.function([x], Jx[0])
vx = rng.uniform(size=(10,)).astype(theano.config.floatX)
assert numpy.allclose(f(vx), numpy.eye(10) * 2)
assert np.allclose(f(vx), np.eye(10) * 2)
# test when the jacobian is called with a list as wrt
Jx = tensor.jacobian(y, [x])
assert isinstance(Jx, list)
f = theano.function([x], Jx[0])
vx = rng.uniform(size=(10,)).astype(theano.config.floatX)
assert numpy.allclose(f(vx), numpy.eye(10) * 2)
assert np.allclose(f(vx), np.eye(10) * 2)
# test when the jacobian is called with a list of two elements
z = tensor.vector()
......@@ -44,19 +44,19 @@ def test001_jacobian_vector():
vx = rng.uniform(size=(10,)).astype(theano.config.floatX)
vz = rng.uniform(size=(10,)).astype(theano.config.floatX)
vJs = f(vx, vz)
evx = numpy.zeros((10, 10))
evz = numpy.zeros((10, 10))
numpy.fill_diagonal(evx, vx)
numpy.fill_diagonal(evz, vz)
assert numpy.allclose(vJs[0], evz)
assert numpy.allclose(vJs[1], evx)
evx = np.zeros((10, 10))
evz = np.zeros((10, 10))
np.fill_diagonal(evx, vx)
np.fill_diagonal(evz, vz)
assert np.allclose(vJs[0], evz)
assert np.allclose(vJs[1], evx)
def test002_jacobian_matrix():
x = tensor.matrix()
y = 2 * x.sum(axis=0)
rng = numpy.random.RandomState(seed=utt.fetch_seed())
ev = numpy.zeros((10, 10, 10))
rng = np.random.RandomState(seed=utt.fetch_seed())
ev = np.zeros((10, 10, 10))
for dx in xrange(10):
ev[dx, :, dx] = 2.
......@@ -64,21 +64,21 @@ def test002_jacobian_matrix():
Jx = tensor.jacobian(y, x)
f = theano.function([x], Jx)
vx = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
assert numpy.allclose(f(vx), ev)
assert np.allclose(f(vx), ev)
# test when the jacobian is called with a tuple as wrt
Jx = tensor.jacobian(y, (x,))
assert isinstance(Jx, tuple)
f = theano.function([x], Jx[0])
vx = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
assert numpy.allclose(f(vx), ev)
assert np.allclose(f(vx), ev)
# test when the jacobian is called with a list as wrt
Jx = tensor.jacobian(y, [x])
assert isinstance(Jx, list)
f = theano.function([x], Jx[0])
vx = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
assert numpy.allclose(f(vx), ev)
assert np.allclose(f(vx), ev)
# test when the jacobian is called with a list of two elements
z = tensor.matrix()
......@@ -88,51 +88,51 @@ def test002_jacobian_matrix():
vx = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
vz = rng.uniform(size=(10, 10)).astype(theano.config.floatX)
vJs = f(vx, vz)
evx = numpy.zeros((10, 10, 10))
evz = numpy.zeros((10, 10, 10))
evx = np.zeros((10, 10, 10))
evz = np.zeros((10, 10, 10))
for dx in xrange(10):
evx[dx, dx, :] = vx[dx, :]
evz[dx, dx, :] = vz[dx, :]
assert numpy.allclose(vJs[0], evz)
assert numpy.allclose(vJs[1], evx)
assert np.allclose(vJs[0], evz)
assert np.allclose(vJs[1], evx)
def test003_jacobian_scalar():
x = tensor.scalar()
y = x * 2
rng = numpy.random.RandomState(seed=utt.fetch_seed())
rng = np.random.RandomState(seed=utt.fetch_seed())
# test when the jacobian is called with a tensor as wrt
Jx = tensor.jacobian(y, x)
f = theano.function([x], Jx)
vx = numpy.cast[theano.config.floatX](rng.uniform())
assert numpy.allclose(f(vx), 2)
vx = np.cast[theano.config.floatX](rng.uniform())
assert np.allclose(f(vx), 2)
# test when the jacobian is called with a tuple as wrt
Jx = tensor.jacobian(y, (x,))
assert isinstance(Jx, tuple)
f = theano.function([x], Jx[0])
vx = numpy.cast[theano.config.floatX](rng.uniform())
assert numpy.allclose(f(vx), 2)
vx = np.cast[theano.config.floatX](rng.uniform())
assert np.allclose(f(vx), 2)
# test when the jacobian is called with a list as wrt
Jx = tensor.jacobian(y, [x])
assert isinstance(Jx, list)
f = theano.function([x], Jx[0])
vx = numpy.cast[theano.config.floatX](rng.uniform())
assert numpy.allclose(f(vx), 2)
vx = np.cast[theano.config.floatX](rng.uniform())
assert np.allclose(f(vx), 2)
# test when the jacobian is called with a list of two elements
z = tensor.scalar()
y = x * z
Jx = tensor.jacobian(y, [x, z])
f = theano.function([x, z], Jx)
vx = numpy.cast[theano.config.floatX](rng.uniform())
vz = numpy.cast[theano.config.floatX](rng.uniform())
vx = np.cast[theano.config.floatX](rng.uniform())
vz = np.cast[theano.config.floatX](rng.uniform())
vJx = f(vx, vz)
assert numpy.allclose(vJx[0], vz)
assert numpy.allclose(vJx[1], vx)
assert np.allclose(vJx[0], vz)
assert np.allclose(vJx[1], vx)
def test004_hessian():
......@@ -140,8 +140,8 @@ def test004_hessian():
y = tensor.sum(x ** 2)
Hx = tensor.hessian(y, x)
f = theano.function([x], Hx)
vx = numpy.arange(10).astype(theano.config.floatX)
assert numpy.allclose(f(vx), numpy.eye(10) * 2)
vx = np.arange(10).astype(theano.config.floatX)
assert np.allclose(f(vx), np.eye(10) * 2)
def test_jacobian_disconnected_inputs():
......@@ -152,12 +152,12 @@ def test_jacobian_disconnected_inputs():
v2 = tensor.vector()
jacobian_v = theano.gradient.jacobian(1 + v1, v2, disconnected_inputs='ignore')
func_v = theano.function([v1, v2], jacobian_v)
val = numpy.arange(4.0).astype(theano.config.floatX)
assert numpy.allclose(func_v(val, val), numpy.zeros((4, 4)))
val = np.arange(4.0).astype(theano.config.floatX)
assert np.allclose(func_v(val, val), np.zeros((4, 4)))
s1 = tensor.scalar()
s2 = tensor.scalar()
jacobian_s = theano.gradient.jacobian(1 + s1, s2, disconnected_inputs='ignore')
func_s = theano.function([s2], jacobian_s)
val = numpy.array(1.0).astype(theano.config.floatX)
assert numpy.allclose(func_s(val), numpy.zeros(1))
val = np.array(1.0).astype(theano.config.floatX)
assert np.allclose(func_s(val), np.zeros(1))
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import theano
import theano.tensor as T
from theano.tests import unittest_tools as utt
......@@ -30,7 +30,7 @@ class TestPdbBreakpoint(utt.InferShapeTester):
def test_infer_shape(self):
input1_value = numpy.arange(6).reshape(2, 3).astype("float32")
input1_value = np.arange(6).reshape(2, 3).astype("float32")
input2_value = 10.0
self._compile_and_check([self.input1, self.input2],
......@@ -42,7 +42,7 @@ class TestPdbBreakpoint(utt.InferShapeTester):
def test_grad(self):
input1_value = numpy.arange(9).reshape(3, 3).astype("float32")
input1_value = np.arange(9).reshape(3, 3).astype("float32")
input2_value = 10.0
grads = [T.grad(self.monitored_input1.sum(), self.input1),
......@@ -56,22 +56,22 @@ class TestPdbBreakpoint(utt.InferShapeTester):
gradients = fct(input1_value, input2_value)[:-1]
expected_gradients = [numpy.ones((3, 3), dtype="float32"),
numpy.array(1., dtype="float32")]
expected_gradients = [np.ones((3, 3), dtype="float32"),
np.array(1., dtype="float32")]
for i in range(len(gradients)):
numpy.testing.assert_allclose(gradients[i], expected_gradients[i])
np.testing.assert_allclose(gradients[i], expected_gradients[i])
def test_fprop(self):
input1_value = numpy.arange(9).reshape(3, 3).astype("float32")
input1_value = np.arange(9).reshape(3, 3).astype("float32")
input2_value = 10.0
fct = theano.function([self.input1, self.input2],
[self.monitored_input1, self.monitored_input2])
output = fct(input1_value, input2_value)
numpy.testing.assert_allclose(output[0], input1_value)
numpy.testing.assert_allclose(output[1], input2_value)
np.testing.assert_allclose(output[0], input1_value)
np.testing.assert_allclose(output[1], input2_value)
def test_connection_pattern(self):
......
......@@ -4,7 +4,7 @@
from __future__ import absolute_import, print_function, division
import unittest
import numpy
import numpy as np
from nose.plugins.skip import SkipTest
from six.moves import reduce
......@@ -42,16 +42,16 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
c = tensor.iscalar('c')
f = theano.function([c, x, y], ifelse(c, x, y), mode=self.mode)
self.assertFunctionContains1(f, self.get_ifelse(1))
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)
vx = np.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = np.asarray(rng.uniform(size=(ylen,)), self.dtype)
assert numpy.allclose(vx, f(1, vx, vy))
assert numpy.allclose(vy, f(0, vx, vy))
assert np.allclose(vx, f(1, vx, vy))
assert np.allclose(vy, f(0, vx, vy))
def test_not_lazy_if_inplace(self):
# Tests that if the outputs are scalars and the graph is big,
......@@ -71,16 +71,16 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
if isinstance(n.op, IfElse)]
assert len(ifnode) == 1
assert not ifnode[0].op.as_view
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)
vx = np.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = np.asarray(rng.uniform(size=(ylen,)), self.dtype)
assert numpy.allclose(vx, f(1, vx, vy))
assert numpy.allclose(vy + sum(range(200)), f(0, vx, vy))
assert np.allclose(vx, f(1, vx, vy))
assert np.allclose(vy + sum(range(200)), f(0, vx, vy))
def test_mixed_dtype(self):
x1 = tensor.vector('x1', dtype='int32')
......@@ -91,23 +91,23 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
f = theano.function([c, x1, x2, y1, y2],
ifelse(c, (x1, x2), (y1, y2)), mode=self.mode)
self.assertFunctionContains1(f, self.get_ifelse(2))
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
vx1 = numpy.asarray(rng.uniform(size=(xlen,)) * 3, 'int32')
vx2 = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy1 = numpy.asarray(rng.uniform(size=(ylen,)) * 3, 'int32')
vy2 = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)
vx1 = np.asarray(rng.uniform(size=(xlen,)) * 3, 'int32')
vx2 = np.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy1 = np.asarray(rng.uniform(size=(ylen,)) * 3, 'int32')
vy2 = np.asarray(rng.uniform(size=(ylen,)), self.dtype)
o1, o2 = f(1, vx1, vx2, vy1, vy2)
assert numpy.allclose(vx1, o1)
assert numpy.allclose(vx2, o2)
assert np.allclose(vx1, o1)
assert np.allclose(vx2, o2)
o1, o2 = f(0, vx1, vx2, vy1, vy2)
assert numpy.allclose(vy1, o1)
assert numpy.allclose(vy2, o2)
assert np.allclose(vy1, o1)
assert np.allclose(vy2, o2)
def test_lazy_if_on_generics(self):
x = theano.generic()
......@@ -134,24 +134,24 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
# There is only 2 of the 3 ifelse that are moved on the GPU.
# The one that stay on the CPU is for the shape.
self.assertFunctionContains(f, self.get_ifelse(1), min=2, max=3)
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)
vx = np.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = np.asarray(rng.uniform(size=(ylen,)), self.dtype)
gx0, gy0 = f(1, vx, vy)
assert numpy.allclose(gx0.shape, vx.shape)
assert numpy.allclose(gy0.shape, vy.shape)
assert numpy.all(numpy.asarray(gx0) == 1.)
assert numpy.all(numpy.asarray(gy0) == 0.)
assert np.allclose(gx0.shape, vx.shape)
assert np.allclose(gy0.shape, vy.shape)
assert np.all(np.asarray(gx0) == 1.)
assert np.all(np.asarray(gy0) == 0.)
gx0, gy0 = f(0, vx, vy)
assert numpy.allclose(gx0.shape, vx.shape)
assert numpy.allclose(gy0.shape, vy.shape)
assert numpy.all(numpy.asarray(gx0) == 0.)
assert numpy.all(numpy.asarray(gy0) == 1.)
assert np.allclose(gx0.shape, vx.shape)
assert np.allclose(gy0.shape, vy.shape)
assert np.all(np.asarray(gx0) == 0.)
assert np.all(np.asarray(gy0) == 1.)
def test_grad_cast_input(self):
# Tests the gradient when both inputs are on the GPU.
......@@ -178,24 +178,24 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
if isinstance(x.op, IfElse)][0]
assert len(ifnode.outputs) == 2
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
x1len = rng.randint(200)
x2len = rng.randint(200)
y1len = rng.randint(200)
y2len = rng.randint(200)
vx1 = numpy.asarray(rng.uniform(size=(x1len,)), self.dtype)
vx2 = numpy.asarray(rng.uniform(size=(x2len,)), self.dtype)
vy1 = numpy.asarray(rng.uniform(size=(y1len,)), self.dtype)
vy2 = numpy.asarray(rng.uniform(size=(y2len,)), self.dtype)
vx1 = np.asarray(rng.uniform(size=(x1len,)), self.dtype)
vx2 = np.asarray(rng.uniform(size=(x2len,)), self.dtype)
vy1 = np.asarray(rng.uniform(size=(y1len,)), self.dtype)
vy2 = np.asarray(rng.uniform(size=(y2len,)), self.dtype)
ovx1, ovx2 = f(1, vx1, vx2, vy1, vy2)
ovy1, ovy2 = f(0, vx1, vx2, vy1, vy2)
assert numpy.allclose(vx1, ovx1)
assert numpy.allclose(vy1, ovy1)
assert numpy.allclose(vx2, ovx2)
assert numpy.allclose(vy2, ovy2)
assert np.allclose(vx1, ovx1)
assert np.allclose(vy1, ovy1)
assert np.allclose(vx2, ovx2)
assert np.allclose(vy2, ovy2)
def test_multiple_out_grad(self):
# Tests that we can compute the gradients through lazy if
......@@ -209,35 +209,35 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
[x1, x2, y1, y2])
f = theano.function([c, x1, x2, y1, y2], grads)
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
lens = [rng.randint(200) for i in range(4)]
values = [numpy.asarray(rng.uniform(size=(l,)), theano.config.floatX)
values = [np.asarray(rng.uniform(size=(l,)), theano.config.floatX)
for l in lens]
outs_1 = f(1, *values)
assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
assert numpy.all(outs_1[0] == 1.)
assert numpy.all(outs_1[1] == 1.)
assert numpy.all(outs_1[2] == 0.)
assert numpy.all(outs_1[3] == 0.)
assert np.all(outs_1[0] == 1.)
assert np.all(outs_1[1] == 1.)
assert np.all(outs_1[2] == 0.)
assert np.all(outs_1[3] == 0.)
outs_0 = f(0, *values)
assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
assert numpy.all(outs_0[0] == 0.)
assert numpy.all(outs_0[1] == 0.)
assert numpy.all(outs_0[2] == 1.)
assert numpy.all(outs_0[3] == 1.)
assert np.all(outs_0[0] == 0.)
assert np.all(outs_0[1] == 0.)
assert np.all(outs_0[2] == 1.)
assert np.all(outs_0[3] == 1.)
def test_multiple_out_crash(self):
# This test failed up to commit 2faeb62c38
p0 = self.shared(numpy.asarray(numpy.random.random([4, 8]),
dtype=self.dtype))
p1 = self.shared(numpy.asarray(numpy.random.random(8),
dtype=self.dtype))
p2 = self.shared(numpy.asarray(numpy.random.random([8, 3]),
dtype=self.dtype))
p3 = self.shared(numpy.asarray(numpy.random.random(3),
dtype=self.dtype))
p0 = self.shared(np.asarray(np.random.random([4, 8]),
dtype=self.dtype))
p1 = self.shared(np.asarray(np.random.random(8),
dtype=self.dtype))
p2 = self.shared(np.asarray(np.random.random([8, 3]),
dtype=self.dtype))
p3 = self.shared(np.asarray(np.random.random(3),
dtype=self.dtype))
p = [p0, p1, p2, p3]
# in my code these vars are the result of applying scan
......@@ -264,15 +264,15 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
updates=new_updates, mode=self.mode)
self.assertFunctionContains1(f, self.get_ifelse(4))
i1 = numpy.asarray(numpy.random.random([19, 4, 8]), dtype=self.dtype)
i2 = numpy.asarray(numpy.random.random([19, 8]), dtype=self.dtype)
i3 = numpy.asarray(numpy.random.random([19, 8, 3]), dtype=self.dtype)
i4 = numpy.asarray(numpy.random.random([19, 3]), dtype=self.dtype)
i1 = np.asarray(np.random.random([19, 4, 8]), dtype=self.dtype)
i2 = np.asarray(np.random.random([19, 8]), dtype=self.dtype)
i3 = np.asarray(np.random.random([19, 8, 3]), dtype=self.dtype)
i4 = np.asarray(np.random.random([19, 3]), dtype=self.dtype)
f(i1, i2, i3, i4)
def test_dtype_mismatch(self):
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
data = rng.rand(5).astype(self.dtype)
x = self.shared(data)
y = tensor.cast(x * 10, 'int8')
......@@ -282,7 +282,7 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
self.assertRaises(TypeError, ifelse, cond, y, x)
def test_ndim_mismatch(self):
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
data = rng.rand(5).astype(self.dtype)
x = self.shared(data)
y = tensor.col('y', self.dtype)
......@@ -292,7 +292,7 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
self.assertRaises(TypeError, ifelse, cond, y, x)
def test_broadcast_mismatch(self):
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
data = rng.rand(5).astype(self.dtype)
x = self.shared(data)
# print x.broadcastable
......@@ -307,7 +307,7 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
import theano.sparse
if not theano.sparse.enable_sparse:
raise SkipTest("Optimization temporarily disabled")
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
data = rng.rand(2, 3).astype(self.dtype)
x = self.shared(data)
y = theano.sparse.matrix('csc', dtype=self.dtype, name='y')
......@@ -375,7 +375,7 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
f = theano.function([x1, x2, y1, y2, w1, w2, c], out,
allow_input_downcast=True)
assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
vx1 = rng.uniform()
vx2 = rng.uniform()
vy1 = rng.uniform()
......@@ -383,10 +383,10 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
vw1 = rng.uniform()
vw2 = rng.uniform()
assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
vx1 * vy1 * vw1)
assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
vx2 * vy2 * vw2)
assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
vx1 * vy1 * vw1)
assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
vx2 * vy2 * vw2)
def test_pushout3(self):
raise SkipTest("Optimization temporarily disabled")
......@@ -394,23 +394,23 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
y1 = tensor.scalar('x2')
y2 = tensor.scalar('y2')
c = tensor.iscalar('c')
two = numpy.asarray(2, dtype=theano.config.floatX)
two = np.asarray(2, dtype=theano.config.floatX)
x, y = ifelse(c, (x1, y1), (two, y2), name='f1')
o3 = numpy.asarray(0.3, dtype=theano.config.floatX)
o2 = numpy.asarray(0.2, dtype=theano.config.floatX)
o3 = np.asarray(0.3, dtype=theano.config.floatX)
o2 = np.asarray(0.2, dtype=theano.config.floatX)
z = ifelse(c, o3, o2, name='f2')
out = x * z * y
f = theano.function([x1, y1, y2, c], out,
allow_input_downcast=True)
assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
vx1 = rng.uniform()
vy1 = rng.uniform()
vy2 = rng.uniform()
assert numpy.allclose(f(vx1, vy1, vy2, 1), vx1 * vy1 * 0.3)
assert numpy.allclose(f(vx1, vy1, vy2, 0), 2 * vy2 * 0.2)
assert np.allclose(f(vx1, vy1, vy2, 1), vx1 * vy1 * 0.3)
assert np.allclose(f(vx1, vy1, vy2, 0), 2 * vy2 * 0.2)
def test_pushout2(self):
raise SkipTest("Optimization temporarily disabled")
......@@ -428,7 +428,7 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
f = theano.function([x1, x2, y1, y2, w1, w2, c], out,
allow_input_downcast=True)
assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
vx1 = rng.uniform()
vx2 = rng.uniform()
vy1 = rng.uniform()
......@@ -439,15 +439,15 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
vw = vw1
else:
vw = vw2
assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
vx1 * vy1 * vw)
assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
vx1 * vy1 * vw)
if vx2 > vy2:
vw = vw1
else:
vw = vw2
assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
vx2 * vy2 * vw)
assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
vx2 * vy2 * vw)
def test_merge_ifs_true_false(self):
raise SkipTest("Optimization temporarily disabled")
......@@ -467,17 +467,17 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
assert len([x for x in f.maker.fgraph.toposort()
if isinstance(x.op, IfElse)]) == 1
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
vx1 = rng.uniform()
vx2 = rng.uniform()
vy1 = rng.uniform()
vy2 = rng.uniform()
vw1 = rng.uniform()
vw2 = rng.uniform()
assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
vx1 + vy1 + vw1)
assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
vx2 + vy2 + vw2)
assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
vx1 + vy1 + vw1)
assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
vx2 + vy2 + vw2)
def test_grad_test_values(self):
"""
......@@ -494,8 +494,8 @@ class test_ifelse(unittest.TestCase, utt.TestOptimizationMixin):
theano.config.compute_test_value = backup
def test_grad_int_value(self):
w = theano.shared(numpy.random.rand(10))
b = theano.shared(numpy.random.rand())
w = theano.shared(np.random.rand(10))
b = theano.shared(np.random.rand())
params = [w, b]
x = tensor.vector()
......
......@@ -14,7 +14,7 @@ The config option is in configdefaults.py
This note is written by Li Yao.
"""
from collections import OrderedDict
import numpy
import numpy as np
import six.moves.cPickle as pickle
import theano
import theano.tensor as T
......@@ -28,8 +28,8 @@ def test_pickle_unpickle_with_reoptimization():
mode = "FAST_RUN"
x1 = T.fmatrix('x1')
x2 = T.fmatrix('x2')
x3 = theano.shared(numpy.ones((10, 10), dtype=floatX))
x4 = theano.shared(numpy.ones((10, 10), dtype=floatX))
x3 = theano.shared(np.ones((10, 10), dtype=floatX))
x4 = theano.shared(np.ones((10, 10), dtype=floatX))
y = T.sum(T.sum(T.sum(x1 ** 2 + x2) + x3) + x4)
updates = OrderedDict()
......@@ -40,8 +40,8 @@ def test_pickle_unpickle_with_reoptimization():
# now pickle the compiled theano fn
string_pkl = pickle.dumps(f, -1)
in1 = numpy.ones((10, 10), dtype=floatX)
in2 = numpy.ones((10, 10), dtype=floatX)
in1 = np.ones((10, 10), dtype=floatX)
in2 = np.ones((10, 10), dtype=floatX)
# test unpickle with optimization
default = theano.config.reoptimize_unpickled_function
......@@ -60,8 +60,8 @@ def test_pickle_unpickle_without_reoptimization():
mode = "FAST_RUN"
x1 = T.fmatrix('x1')
x2 = T.fmatrix('x2')
x3 = theano.shared(numpy.ones((10, 10), dtype=floatX))
x4 = theano.shared(numpy.ones((10, 10), dtype=floatX))
x3 = theano.shared(np.ones((10, 10), dtype=floatX))
x4 = theano.shared(np.ones((10, 10), dtype=floatX))
y = T.sum(T.sum(T.sum(x1**2 + x2) + x3) + x4)
updates = OrderedDict()
......@@ -73,8 +73,8 @@ def test_pickle_unpickle_without_reoptimization():
string_pkl = pickle.dumps(f, -1)
# compute f value
in1 = numpy.ones((10, 10), dtype=floatX)
in2 = numpy.ones((10, 10), dtype=floatX)
in1 = np.ones((10, 10), dtype=floatX)
in2 = np.ones((10, 10), dtype=floatX)
# test unpickle without optimization
default = theano.config.reoptimize_unpickled_function
......
......@@ -5,7 +5,7 @@ from __future__ import absolute_import, print_function, division
import logging
from nose.plugins.skip import SkipTest
import numpy
import numpy as np
from six.moves import StringIO
......@@ -509,8 +509,8 @@ def test_scan_debugprint4():
def fn(a_m2, a_m1, b_m2, b_m1):
return a_m1 + a_m2, b_m1 + b_m2
a0 = theano.shared(numpy.arange(2, dtype='int64'))
b0 = theano.shared(numpy.arange(2, dtype='int64'))
a0 = theano.shared(np.arange(2, dtype='int64'))
b0 = theano.shared(np.arange(2, dtype='int64'))
(a, b), _ = theano.scan(
fn, outputs_info=[{'initial': a0, 'taps': [-2, -1]},
......
......@@ -18,7 +18,7 @@ from theano import function
import theano
from theano import tensor
import itertools
import numpy
import numpy as np
from theano.gof import Op, Apply
from theano.gradient import grad_undefined
from theano.tests.unittest_tools import SkipTest
......@@ -64,7 +64,7 @@ class RopLop_checker(unittest.TestCase):
# computations using scan
self.x = tensor.vector('x')
self.v = tensor.vector('v')
self.rng = numpy.random.RandomState(utt.fetch_seed())
self.rng = np.random.RandomState(utt.fetch_seed())
self.in_shape = (5 + self.rng.randint(3),)
self.mx = tensor.matrix('mx')
self.mv = tensor.matrix('mv')
......@@ -103,10 +103,10 @@ class RopLop_checker(unittest.TestCase):
If you want to test an Op with an output matrix, add a sum
after the Op you want to test.
"""
vx = numpy.asarray(self.rng.uniform(size=self.mat_in_shape),
theano.config.floatX)
vv = numpy.asarray(self.rng.uniform(size=self.mat_in_shape),
theano.config.floatX)
vx = np.asarray(self.rng.uniform(size=self.mat_in_shape),
theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=self.mat_in_shape),
theano.config.floatX)
yv = tensor.Rop(y, self.mx, self.mv)
rop_f = function([self.mx, self.mv], yv, on_unused_input='ignore')
sy, _ = theano.scan(lambda i, y, x, v:
......@@ -118,11 +118,11 @@ class RopLop_checker(unittest.TestCase):
v1 = rop_f(vx, vv)
v2 = scan_f(vx, vv)
assert numpy.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
assert np.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
self.check_nondiff_rop(theano.clone(y, replace={self.mx: break_op(self.mx)}))
vv = numpy.asarray(self.rng.uniform(size=out_shape), theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=out_shape), theano.config.floatX)
yv = tensor.Lop(y, self.mx, self.v)
lop_f = function([self.mx, self.v], yv)
......@@ -131,7 +131,7 @@ class RopLop_checker(unittest.TestCase):
v1 = lop_f(vx, vv)
v2 = scan_f(vx, vv)
assert numpy.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
assert np.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
def check_rop_lop(self, y, out_shape):
"""
......@@ -140,10 +140,10 @@ class RopLop_checker(unittest.TestCase):
"""
# TEST ROP
vx = numpy.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vv = numpy.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vx = np.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
yv = tensor.Rop(y, self.x, self.v)
rop_f = function([self.x, self.v], yv, on_unused_input='ignore')
......@@ -156,7 +156,7 @@ class RopLop_checker(unittest.TestCase):
v1 = rop_f(vx, vv)
v2 = scan_f(vx, vv)
assert numpy.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
assert np.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
known_fail = False
try:
self.check_nondiff_rop(theano.clone(y, replace={self.x: break_op(self.x)}))
......@@ -165,10 +165,10 @@ class RopLop_checker(unittest.TestCase):
# TEST LOP
vx = numpy.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vv = numpy.asarray(self.rng.uniform(size=out_shape),
theano.config.floatX)
vx = np.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=out_shape),
theano.config.floatX)
yv = tensor.Lop(y, self.x, self.v)
lop_f = function([self.x, self.v], yv, on_unused_input='ignore')
......@@ -181,7 +181,7 @@ class RopLop_checker(unittest.TestCase):
v1 = lop_f(vx, vv)
v2 = scan_f(vx, vv)
assert numpy.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
assert np.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
if known_fail:
raise SkipTest('Rop does not handle non-differentiable inputs '
......@@ -213,22 +213,22 @@ class test_RopLop(RopLop_checker):
self.check_rop_lop(self.x[:4], (4,))
def test_incsubtensor1(self):
tv = numpy.asarray(self.rng.uniform(size=(3,)),
theano.config.floatX)
tv = np.asarray(self.rng.uniform(size=(3,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.inc_subtensor(self.x[:3], t)
self.check_rop_lop(out, self.in_shape)
def test_incsubtensor2(self):
tv = numpy.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
tv = np.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.inc_subtensor(t[:4], self.x[:4])
self.check_rop_lop(out, (10,))
def test_setsubtensor1(self):
tv = numpy.asarray(self.rng.uniform(size=(3,)),
theano.config.floatX)
tv = np.asarray(self.rng.uniform(size=(3,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.set_subtensor(self.x[:3], t)
self.check_rop_lop(out, self.in_shape)
......@@ -238,8 +238,8 @@ class test_RopLop(RopLop_checker):
self.check_rop_lop(out, self.in_shape)
def test_setsubtensor2(self):
tv = numpy.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
tv = np.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.set_subtensor(t[:4], self.x[:4])
self.check_rop_lop(out, (10,))
......@@ -258,7 +258,7 @@ class test_RopLop(RopLop_checker):
(1,))
def test_downsample(self):
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
# ws, shp
examples = (
((2,), (16,)),
......@@ -296,7 +296,7 @@ class test_RopLop(RopLop_checker):
scan_f = function([], sy, on_unused_input='ignore', mode=mode)
v1 = rop_f()
v2 = scan_f()
assert numpy.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))
assert np.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))
def test_conv(self):
for conv_op in [conv.conv2d, conv2d]:
......@@ -337,25 +337,25 @@ class test_RopLop(RopLop_checker):
scan_f = function([input, filters, ev_input, ev_filters], sy,
on_unused_input='ignore', mode=mode)
dtype = theano.config.floatX
image_data = numpy.random.random(image_shape).astype(dtype)
filter_data = numpy.random.random(filter_shape).astype(dtype)
ev_image_data = numpy.random.random(image_shape).astype(dtype)
ev_filter_data = numpy.random.random(filter_shape).astype(dtype)
image_data = np.random.random(image_shape).astype(dtype)
filter_data = np.random.random(filter_shape).astype(dtype)
ev_image_data = np.random.random(image_shape).astype(dtype)
ev_filter_data = np.random.random(filter_shape).astype(dtype)
v1 = rop_f(image_data, filter_data, ev_image_data, ev_filter_data)
v2 = scan_f(image_data, filter_data, ev_image_data, ev_filter_data)
assert numpy.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))
assert np.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))
def test_join(self):
tv = numpy.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
tv = np.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.join(0, self.x, t)
self.check_rop_lop(out, (self.in_shape[0] + 10,))
def test_dot(self):
insh = self.in_shape[0]
vW = numpy.asarray(self.rng.uniform(size=(insh, insh)),
theano.config.floatX)
vW = np.asarray(self.rng.uniform(size=(insh, insh)),
theano.config.floatX)
W = theano.shared(vW)
self.check_rop_lop(tensor.dot(self.x, W), self.in_shape)
......@@ -367,7 +367,7 @@ class test_RopLop(RopLop_checker):
self.in_shape)
def test_reshape(self):
new_shape = tensor.constant(numpy.asarray([
new_shape = tensor.constant(np.asarray([
self.mat_in_shape[0] * self.mat_in_shape[1]],
dtype='int64'))
......@@ -435,6 +435,6 @@ class test_RopLop(RopLop_checker):
# one differentiable path (i.e. there is no gradient wrt to one of
# the inputs).
x = tensor.arange(20.0).reshape([1, 20])
v = theano.shared(numpy.ones([20]))
v = theano.shared(np.ones([20]))
d = tensor.dot(x, v).sum()
tensor.Rop(tensor.grad(d, v), v, v)
......@@ -17,7 +17,7 @@ except ImportError:
def func(f):
return f
return func
import numpy
import numpy as np
import theano
import theano.tensor as T
......@@ -48,7 +48,7 @@ def fetch_seed(pseed=None):
None, which is equivalent to seeding with a random seed.
Useful for seeding RandomState objects.
>>> rng = numpy.random.RandomState(unittest_tools.fetch_seed())
>>> rng = np.random.RandomState(unittest_tools.fetch_seed())
"""
seed = pseed or config.unittests.rseed
......@@ -76,7 +76,7 @@ def seed_rng(pseed=None):
seed = fetch_seed(pseed)
if pseed and pseed != seed:
print('Warning: using seed given by config.unittests.rseed=%i' 'instead of seed %i given as parameter' % (seed, pseed), file=sys.stderr)
numpy.random.seed(seed)
np.random.seed(seed)
return seed
......@@ -87,7 +87,7 @@ def verify_grad(op, pt, n_tests=2, rng=None, *args, **kwargs):
"""
if rng is None:
seed_rng()
rng = numpy.random
rng = np.random
T.verify_grad(op, pt, n_tests, rng, *args, **kwargs)
#
......@@ -110,12 +110,12 @@ class MockRandomState:
self.val = val
def rand(self, *shape):
return numpy.zeros(shape, dtype='float64') + self.val
return np.zeros(shape, dtype='float64') + self.val
def randint(self, minval, maxval=None, size=1):
if maxval is None:
minval, maxval = 0, minval
out = numpy.zeros(size, dtype='int64')
out = np.zeros(size, dtype='int64')
if self.val == 0:
return out + minval
else:
......@@ -270,7 +270,7 @@ class InferShapeTester(unittest.TestCase):
numeric_outputs = outputs_function(*numeric_inputs)
numeric_shapes = shapes_function(*numeric_inputs)
for out, shape in zip(numeric_outputs, numeric_shapes):
assert numpy.all(out.shape == shape), (out.shape, shape)
assert np.all(out.shape == shape), (out.shape, shape)
def str_diagnostic(expected, value, rtol, atol):
......@@ -287,8 +287,8 @@ def str_diagnostic(expected, value, rtol, atol):
print(expected.strides, end=' ', file=ssio)
print(expected.min(), end=' ', file=ssio)
print(expected.max(), end=' ', file=ssio)
print(numpy.isinf(expected).sum(), end=' ', file=ssio)
print(numpy.isnan(expected).sum(), end=' ', file=ssio)
print(np.isinf(expected).sum(), end=' ', file=ssio)
print(np.isnan(expected).sum(), end=' ', file=ssio)
# only if all succeeds to we add anything to sio
print(ssio.getvalue(), file=sio)
except Exception:
......@@ -301,8 +301,8 @@ def str_diagnostic(expected, value, rtol, atol):
print(value.strides, end=' ', file=ssio)
print(value.min(), end=' ', file=ssio)
print(value.max(), end=' ', file=ssio)
print(numpy.isinf(value).sum(), end=' ', file=ssio)
print(numpy.isnan(value).sum(), end=' ', file=ssio)
print(np.isinf(value).sum(), end=' ', file=ssio)
print(np.isnan(value).sum(), end=' ', file=ssio)
# only if all succeeds to we add anything to sio
print(ssio.getvalue(), file=sio)
except Exception:
......@@ -312,19 +312,19 @@ def str_diagnostic(expected, value, rtol, atol):
print(" value :", value, file=sio)
try:
ov = numpy.asarray(expected)
nv = numpy.asarray(value)
ov = np.asarray(expected)
nv = np.asarray(value)
ssio = StringIO()
absdiff = numpy.absolute(nv - ov)
print(" Max Abs Diff: ", numpy.max(absdiff), file=ssio)
print(" Mean Abs Diff: ", numpy.mean(absdiff), file=ssio)
print(" Median Abs Diff: ", numpy.median(absdiff), file=ssio)
print(" Std Abs Diff: ", numpy.std(absdiff), file=ssio)
reldiff = numpy.absolute(nv - ov) / numpy.absolute(ov)
print(" Max Rel Diff: ", numpy.max(reldiff), file=ssio)
print(" Mean Rel Diff: ", numpy.mean(reldiff), file=ssio)
print(" Median Rel Diff: ", numpy.median(reldiff), file=ssio)
print(" Std Rel Diff: ", numpy.std(reldiff), file=ssio)
absdiff = np.absolute(nv - ov)
print(" Max Abs Diff: ", np.max(absdiff), file=ssio)
print(" Mean Abs Diff: ", np.mean(absdiff), file=ssio)
print(" Median Abs Diff: ", np.median(absdiff), file=ssio)
print(" Std Abs Diff: ", np.std(absdiff), file=ssio)
reldiff = np.absolute(nv - ov) / np.absolute(ov)
print(" Max Rel Diff: ", np.max(reldiff), file=ssio)
print(" Mean Rel Diff: ", np.mean(reldiff), file=ssio)
print(" Median Rel Diff: ", np.median(reldiff), file=ssio)
print(" Std Rel Diff: ", np.std(reldiff), file=ssio)
# only if all succeeds to we add anything to sio
print(ssio.getvalue(), file=sio)
except Exception:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论