提交 429a8fae authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #5843 from Amrithasuresh/master

Updated numpy as np #4218
from __future__ import absolute_import, print_function, division
import unittest
import numpy
import numpy as np
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises, assert_true
......@@ -238,8 +237,8 @@ class TestAssertShape(unittest.TestCase):
expected_shape = [None, s1, s2, None]
f = theano.function([x, s1, s2], assert_shape(x, expected_shape))
v = numpy.zeros((3, 5, 7, 11), dtype='float32')
self.assertEqual(0, numpy.sum(f(v, 5, 7)))
v = np.zeros((3, 5, 7, 11), dtype='float32')
self.assertEqual(0, np.sum(f(v, 5, 7)))
assert_raises(AssertionError, f, v, 5, 0)
assert_raises(AssertionError, f, v, 5, 9)
......@@ -257,12 +256,12 @@ class TestAssertShape(unittest.TestCase):
f = theano.function([input, filters], out)
# mismatched input_shape
assert_raises(AssertionError, f,
numpy.zeros((3, 5, 9, 11), dtype='float32'),
numpy.zeros((7, 5, 3, 3), dtype='float32'))
np.zeros((3, 5, 9, 11), dtype='float32'),
np.zeros((7, 5, 3, 3), dtype='float32'))
# mismatched filter_shape
assert_raises(AssertionError, f,
numpy.zeros((3, 5, 7, 11), dtype='float32'),
numpy.zeros((7, 5, 2, 2), dtype='float32'))
np.zeros((3, 5, 7, 11), dtype='float32'),
np.zeros((7, 5, 2, 2), dtype='float32'))
@change_flags([("conv.assert_shape", True)])
def test_shape_check_conv3d(self):
......@@ -275,12 +274,12 @@ class TestAssertShape(unittest.TestCase):
f = theano.function([input, filters], out)
# mismatched input_shape
assert_raises(AssertionError, f,
numpy.zeros((3, 5, 9, 11, 13), dtype='float32'),
numpy.zeros((7, 5, 3, 3, 3), dtype='float32'))
np.zeros((3, 5, 9, 11, 13), dtype='float32'),
np.zeros((7, 5, 3, 3, 3), dtype='float32'))
# mismatched filter_shape
assert_raises(AssertionError, f,
numpy.zeros((3, 5, 7, 11, 13), dtype='float32'),
numpy.zeros((7, 5, 2, 2, 2), dtype='float32'))
np.zeros((3, 5, 7, 11, 13), dtype='float32'),
np.zeros((7, 5, 2, 2, 2), dtype='float32'))
@change_flags([("conv.assert_shape", True)])
def test_shape_check_conv2d_grad_wrt_inputs(self):
......@@ -293,8 +292,8 @@ class TestAssertShape(unittest.TestCase):
f = theano.function([output_grad, filters], out)
# mismatched filter_shape
assert_raises(AssertionError, f,
numpy.zeros((3, 6, 5, 9), dtype='float32'),
numpy.zeros((7, 6, 3, 3), dtype='float32'))
np.zeros((3, 6, 5, 9), dtype='float32'),
np.zeros((7, 6, 3, 3), dtype='float32'))
@change_flags([("conv.assert_shape", True)])
def test_shape_check_conv3d_grad_wrt_inputs(self):
......@@ -307,8 +306,8 @@ class TestAssertShape(unittest.TestCase):
f = theano.function([output_grad, filters], out)
# mismatched filter_shape
assert_raises(AssertionError, f,
numpy.zeros((3, 6, 5, 9, 11), dtype='float32'),
numpy.zeros((7, 6, 3, 3, 3), dtype='float32'))
np.zeros((3, 6, 5, 9, 11), dtype='float32'),
np.zeros((7, 6, 3, 3, 3), dtype='float32'))
@change_flags([("conv.assert_shape", True)])
def test_shape_check_conv2d_grad_wrt_weights(self):
......@@ -321,8 +320,8 @@ class TestAssertShape(unittest.TestCase):
f = theano.function([input, output_grad], out)
# mismatched filter_shape
assert_raises(AssertionError, f,
numpy.zeros((3, 6, 7, 11), dtype='float32'),
numpy.zeros((3, 7, 5, 9), dtype='float32'))
np.zeros((3, 6, 7, 11), dtype='float32'),
np.zeros((3, 7, 5, 9), dtype='float32'))
@change_flags([("conv.assert_shape", True)])
def test_shape_check_conv3d_grad_wrt_weights(self):
......@@ -335,8 +334,8 @@ class TestAssertShape(unittest.TestCase):
f = theano.function([input, output_grad], out)
# mismatched filter_shape
assert_raises(AssertionError, f,
numpy.zeros((3, 6, 7, 11, 13), dtype='float32'),
numpy.zeros((3, 7, 5, 9, 11), dtype='float32'))
np.zeros((3, 6, 7, 11, 13), dtype='float32'),
np.zeros((3, 7, 5, 9, 11), dtype='float32'))
class BaseTestConv(object):
......@@ -371,8 +370,8 @@ class BaseTestConv(object):
if filter_dilation is None:
filter_dilation = (1,) * (len(inputs_shape) - 2)
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
inputs_val = np.random.random(inputs_shape).astype('float32')
filters_val = np.random.random(filters_shape).astype('float32')
# scale down values to prevent rounding errors
inputs_val /= 10
......@@ -414,8 +413,8 @@ class BaseTestConv(object):
if check_trace:
assert_true(check_stack_trace(f, ops_to_check=target_op))
res_ref = numpy.array(f_ref())
res = numpy.array(f())
res_ref = np.array(f_ref())
res = np.array(f())
utt.assert_allclose(res_ref, res)
if verify_grad and inputs_val.size > 0 and filters_val.size > 0 and res.size > 0:
utt.verify_grad(conv_op(border_mode=border_mode,
......@@ -436,8 +435,8 @@ class BaseTestConv(object):
if filter_dilation is None:
filter_dilation = (1,) * (len(inputs_shape) - 2)
inputs_val = numpy.random.random(inputs_shape).astype('float32')
output_val = numpy.random.random(output_shape).astype('float32')
inputs_val = np.random.random(inputs_shape).astype('float32')
output_val = np.random.random(output_shape).astype('float32')
inputs = self.shared(inputs_val)
output = self.shared(output_val)
......@@ -473,8 +472,8 @@ class BaseTestConv(object):
if check_trace:
assert_true(check_stack_trace(f, ops_to_check=target_op))
res_ref = numpy.array(f_ref())
res = numpy.array(f())
res_ref = np.array(f_ref())
res = np.array(f())
utt.assert_allclose(res_ref, res)
def abstract_conv_gradweight(inputs_val, output_val):
......@@ -499,8 +498,8 @@ class BaseTestConv(object):
if filter_dilation is None:
filter_dilation = (1,) * (len(inputs_shape) - 2)
output_val = numpy.random.random(output_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
output_val = np.random.random(output_shape).astype('float32')
filters_val = np.random.random(filters_shape).astype('float32')
output = self.shared(output_val)
filters = self.shared(filters_val)
......@@ -537,10 +536,10 @@ class BaseTestConv(object):
if check_trace:
assert_true(check_stack_trace(f, ops_to_check=target_op))
res = numpy.array(f())
res = np.array(f())
if ref is not None:
res_ref = numpy.array(f_ref())
res_ref = np.array(f_ref())
utt.assert_allclose(res_ref, res)
def abstract_conv_gradinputs(filters_val, output_val):
......@@ -1272,7 +1271,7 @@ class TestConvTypes(unittest.TestCase):
self.filters = tensor.ftensor4()
self.topgrad = tensor.ftensor4()
self.constant_tensor = numpy.zeros((3, 5, 7, 11), dtype='float32')
self.constant_tensor = np.zeros((3, 5, 7, 11), dtype='float32')
def test_grad_types(self):
# This function simply tests the behaviour of the AbstractConv
......@@ -1582,7 +1581,7 @@ class TestConv2dTranspose(unittest.TestCase):
output_shape=(2, 1, 10, 10),
input_dilation=(2, 2)),
mode=mode)()
expected_output = numpy.array(
expected_output = np.array(
[[[[2, 2, 4, 4, 4, 4, 4, 4, 2, 2],
[2, 2, 4, 4, 4, 4, 4, 4, 2, 2],
[4, 4, 8, 8, 8, 8, 8, 8, 4, 4],
......@@ -1593,7 +1592,7 @@ class TestConv2dTranspose(unittest.TestCase):
[4, 4, 8, 8, 8, 8, 8, 8, 4, 4],
[2, 2, 4, 4, 4, 4, 4, 4, 2, 2],
[2, 2, 4, 4, 4, 4, 4, 4, 2, 2]]]] * 2)
numpy.testing.assert_equal(output, expected_output)
np.testing.assert_equal(output, expected_output)
class TestConv2dGrads(unittest.TestCase):
......@@ -1604,7 +1603,7 @@ class TestConv2dGrads(unittest.TestCase):
theano.config.mode == "FAST_COMPILE"):
raise SkipTest("Need blas to test conv2d")
self.random_stream = numpy.random.RandomState(utt.fetch_seed())
self.random_stream = np.random.RandomState(utt.fetch_seed())
self.inputs_shapes = [(8, 1, 12, 12), (1, 1, 5, 5), (1, 1, 5, 6), (1, 1, 6, 6)]
self.filters_shapes = [(5, 1, 2, 2), (1, 1, 3, 3)]
......
......@@ -3,7 +3,7 @@
"""
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
from numpy.random import randn
import theano
......@@ -41,10 +41,10 @@ class BlockSparse_Gemv_and_Outer(utt.InferShapeTester):
batchSize = 2
input = randn(batchSize, inputWindowSize, inputSize).astype('float32')
permutation = numpy.random.permutation
inputIndice = numpy.vstack(permutation(nInputBlock)[:inputWindowSize]
for _ in range(batchSize)).astype('int32')
outputIndice = numpy.vstack(
permutation = np.random.permutation
inputIndice = np.vstack(permutation(nInputBlock)[:inputWindowSize]
for _ in range(batchSize)).astype('int32')
outputIndice = np.vstack(
permutation(nOutputBlock)[:outputWindowSize]
for _ in range(batchSize)).astype('int32')
weight = randn(nInputBlock, nOutputBlock,
......@@ -66,11 +66,11 @@ class BlockSparse_Gemv_and_Outer(utt.InferShapeTester):
o = randn(nInputBlock, nOutputBlock, xSize, ySize).astype('float32')
x = randn(batchSize, xWindowSize, xSize).astype('float32')
y = randn(batchSize, yWindowSize, ySize).astype('float32')
randint = numpy.random.randint
xIdx = numpy.vstack(randint(0, nInputBlock, size=xWindowSize)
for _ in range(batchSize)).astype('int32')
yIdx = numpy.vstack(randint(0, nOutputBlock, size=yWindowSize)
for _ in range(batchSize)).astype('int32')
randint = np.random.randint
xIdx = np.vstack(randint(0, nInputBlock, size=xWindowSize)
for _ in range(batchSize)).astype('int32')
yIdx = np.vstack(randint(0, nOutputBlock, size=yWindowSize)
for _ in range(batchSize)).astype('int32')
return o, x, y, xIdx, yIdx
......@@ -82,7 +82,7 @@ class BlockSparse_Gemv_and_Outer(utt.InferShapeTester):
for i in range(h.shape[1]):
inputIdx = iIdx[b, i]
w = W[inputIdx, outputIdx]
o[b, j, :] += numpy.dot(h[b, i], w)
o[b, j, :] += np.dot(h[b, i], w)
return o
@staticmethod
......@@ -94,7 +94,7 @@ class BlockSparse_Gemv_and_Outer(utt.InferShapeTester):
for b in range(o.shape[0]):
w = W[ix_(iIdx[b], oIdx[b])].swapaxes(1, 2)
w = w.reshape((w.shape[0] * w.shape[1], w.shape[2] * w.shape[3]))
o[b] += numpy.dot(h[b].ravel(), w).reshape(o.shape[1:])
o[b] += np.dot(h[b].ravel(), w).reshape(o.shape[1:])
return o
@staticmethod
......@@ -108,8 +108,8 @@ class BlockSparse_Gemv_and_Outer(utt.InferShapeTester):
# The next three lines do the same operation. The last one is the
# fastest
# o[b] += (h[b][:, None, :, None] * w).sum(axis=(0, 2))
# o[b] += numpy.tensordot(h[b], w, [(0,1),(0,2)])
o[b] += numpy.einsum('ik,ijkl', h[b], w)
# o[b] += np.tensordot(h[b], w, [(0,1),(0,2)])
o[b] += np.einsum('ik,ijkl', h[b], w)
return o
@staticmethod
......@@ -117,8 +117,8 @@ class BlockSparse_Gemv_and_Outer(utt.InferShapeTester):
for b in range(x.shape[0]):
for i in range(xIdx.shape[1]):
for j in range(yIdx.shape[1]):
o[xIdx[b, i], yIdx[b, j]] += numpy.outer(x[b, i, :],
y[b, j, :])
o[xIdx[b, i], yIdx[b, j]] += np.outer(x[b, i, :],
y[b, j, :])
return o
def test_sparseblockdot(self):
......@@ -190,7 +190,7 @@ class BlockSparse_Gemv_and_Outer(utt.InferShapeTester):
W_val, h_val, iIdx_val, b_val, oIdx_val = \
BlockSparse_Gemv_and_Outer.gemv_data()
th_out = f(numpy.swapaxes(W_val, 2, 3), h_val, iIdx_val, b_val,
th_out = f(np.swapaxes(W_val, 2, 3), h_val, iIdx_val, b_val,
oIdx_val)
ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)
......@@ -218,8 +218,8 @@ class BlockSparse_Gemv_and_Outer(utt.InferShapeTester):
def test_sparseblockgemv_grad_1(self):
# Test that we correctly handle cases where dimensions are 1.
h_val = randn(1, 1, 1).astype('float32')
iIdx_val = numpy.random.permutation(1)[:1][None, :]
oIdx_val = numpy.random.permutation(1)[:1][None, :]
iIdx_val = np.random.permutation(1)[:1][None, :]
oIdx_val = np.random.permutation(1)[:1][None, :]
W_val = randn(1, 1, 1, 1).astype('float32')
b_val = randn(1, 1).astype('float32')
......
......@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
import theano
import theano.tensor as T
from theano.tests import unittest_tools as utt
import numpy
import numpy as np
from theano.tensor.nnet import bn
......@@ -17,12 +17,12 @@ def test_BNComposite():
n = (x - M) / V
return n * G + B
numpy.random.seed(1234)
X = 1 + numpy.random.random([10, 20]).astype('float32')
B = 1 + numpy.random.random([20]).astype('float32')
G = 1 + numpy.random.random([20]).astype('float32')
M = 1 + numpy.random.random([20]).astype('float32')
V = 1 + numpy.random.random([20]).astype('float32')
np.random.seed(1234)
X = 1 + np.random.random([10, 20]).astype('float32')
B = 1 + np.random.random([20]).astype('float32')
G = 1 + np.random.random([20]).astype('float32')
M = 1 + np.random.random([20]).astype('float32')
V = 1 + np.random.random([20]).astype('float32')
x = theano.tensor.matrix('x')
b = theano.tensor.vector('b')
......@@ -30,11 +30,11 @@ def test_BNComposite():
m = theano.tensor.vector('m')
v = theano.tensor.vector('v')
x.tag.test_value = numpy.random.rand(2, 2).astype(theano.config.floatX)
b.tag.test_value = numpy.random.rand(2).astype(theano.config.floatX)
g.tag.test_value = numpy.random.rand(2).astype(theano.config.floatX)
m.tag.test_value = numpy.random.rand(2).astype(theano.config.floatX)
v.tag.test_value = numpy.random.rand(2).astype(theano.config.floatX)
x.tag.test_value = np.random.rand(2, 2).astype(theano.config.floatX)
b.tag.test_value = np.random.rand(2).astype(theano.config.floatX)
g.tag.test_value = np.random.rand(2).astype(theano.config.floatX)
m.tag.test_value = np.random.rand(2).astype(theano.config.floatX)
v.tag.test_value = np.random.rand(2).astype(theano.config.floatX)
bn_ref_op = bn_ref(x, g, b, m, v)
f_ref = theano.function([x, b, g, m, v], [bn_ref_op])
......@@ -54,12 +54,12 @@ def test_batch_normalization():
n = (x - M) / V
return n * G + B
numpy.random.seed(1234)
X = 1 + numpy.random.random([10, 20]).astype('float32')
B = 1 + numpy.random.random([20]).astype('float32')
G = 1 + numpy.random.random([20]).astype('float32')
M = 1 + numpy.random.random([20]).astype('float32')
V = 1 + numpy.random.random([20]).astype('float32')
np.random.seed(1234)
X = 1 + np.random.random([10, 20]).astype('float32')
B = 1 + np.random.random([20]).astype('float32')
G = 1 + np.random.random([20]).astype('float32')
M = 1 + np.random.random([20]).astype('float32')
V = 1 + np.random.random([20]).astype('float32')
x = theano.tensor.matrix('x')
b = theano.tensor.vector('b')
......@@ -92,7 +92,7 @@ def test_batch_normalization():
def bn_f(inputs, gamma, beta, mean, std):
return bn.batch_normalization(inputs, gamma, beta, mean, std, mode=mode)
utt.verify_grad(bn_f, [X, G, B,
X.mean(axis=0)[numpy.newaxis], X.std(axis=0)[numpy.newaxis]])
X.mean(axis=0)[np.newaxis], X.std(axis=0)[np.newaxis]])
def test_bn_feature_maps():
......@@ -101,12 +101,12 @@ def test_bn_feature_maps():
n = (x - M) / V
return n * G + B
numpy.random.seed(1234)
X = 1 + numpy.random.random([2, 3, 4, 4]).astype('float32')
B = 1 + numpy.random.random([3]).astype('float32')
G = 1 + numpy.random.random([3]).astype('float32')
M = 1 + numpy.random.random([3]).astype('float32')
V = 1 + numpy.random.random([3]).astype('float32')
np.random.seed(1234)
X = 1 + np.random.random([2, 3, 4, 4]).astype('float32')
B = 1 + np.random.random([3]).astype('float32')
G = 1 + np.random.random([3]).astype('float32')
M = 1 + np.random.random([3]).astype('float32')
V = 1 + np.random.random([3]).astype('float32')
x = theano.tensor.tensor4('x')
b = theano.tensor.vector('b')
......@@ -205,20 +205,20 @@ def test_batch_normalization_train():
data_shape = data_shape[:ndim]
param_shape = tuple(1 if d in axes2 else s
for d, s in enumerate(data_shape))
X = 4 + 3 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
Dy = -1 + 2 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
Scale = numpy.random.randn(*param_shape).astype(theano.config.floatX)
Bias = numpy.random.randn(*param_shape).astype(theano.config.floatX)
Running_mean = numpy.random.randn(*param_shape).astype(theano.config.floatX)
Running_var = numpy.random.randn(*param_shape).astype(theano.config.floatX)
X = 4 + 3 * np.random.randn(*data_shape).astype(theano.config.floatX)
Dy = -1 + 2 * np.random.randn(*data_shape).astype(theano.config.floatX)
Scale = np.random.randn(*param_shape).astype(theano.config.floatX)
Bias = np.random.randn(*param_shape).astype(theano.config.floatX)
Running_mean = np.random.randn(*param_shape).astype(theano.config.floatX)
Running_var = np.random.randn(*param_shape).astype(theano.config.floatX)
outputs = f(X, Scale, Bias, Running_mean, Running_var, Dy)
# compare outputs
utt.assert_allclose(outputs[0], outputs[0 + 5]) # out
utt.assert_allclose(outputs[1], outputs[1 + 5]) # mean
utt.assert_allclose(outputs[2], outputs[2 + 5]) # invstd
utt.assert_allclose(outputs[3], outputs[3 + 5]) # running_mean
utt.assert_allclose(numpy.nan_to_num(outputs[4]),
numpy.nan_to_num(outputs[4 + 5])) # running_var
utt.assert_allclose(np.nan_to_num(outputs[4]),
np.nan_to_num(outputs[4 + 5])) # running_var
# compare gradients
utt.assert_allclose(outputs[10], outputs[10 + 3], atol=1e-4) # dx
utt.assert_allclose(outputs[11], outputs[11 + 3], rtol=2e-4, atol=1e-4) # dscale
......@@ -245,10 +245,10 @@ def test_batch_normalization_train_without_running_averages():
bn.AbstractBatchNormTrainGrad))
for n in f.maker.fgraph.toposort()])
# run
X = 4 + 3 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
Dy = -1 + 2 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
Scale = numpy.random.randn(*param_shape).astype(theano.config.floatX)
Bias = numpy.random.randn(*param_shape).astype(theano.config.floatX)
X = 4 + 3 * np.random.randn(*data_shape).astype(theano.config.floatX)
Dy = -1 + 2 * np.random.randn(*data_shape).astype(theano.config.floatX)
Scale = np.random.randn(*param_shape).astype(theano.config.floatX)
Bias = np.random.randn(*param_shape).astype(theano.config.floatX)
f(X, Scale, Bias, Dy)
......@@ -330,7 +330,7 @@ def test_batch_normalization_train_broadcast():
if theano.config.mode != "FAST_COMPILE":
assert len(nodes) == 1
assert isinstance(nodes[0].op, theano.compile.DeepCopyOp)
inputs = [numpy.asarray(numpy.random.rand(*((4,) * n)), x.dtype)
inputs = [np.asarray(np.random.rand(*((4,) * n)), x.dtype)
for n in [x.ndim, scale.ndim, bias.ndim,
running_mean.ndim, running_var.ndim]]
assert 0.0 == f(*inputs)
......@@ -381,12 +381,12 @@ def test_batch_normalization_test():
data_shape = data_shape[:ndim]
param_shape = tuple(1 if d in axes2 else s
for d, s in enumerate(data_shape))
X = 4 + 3 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
Dy = -1 + 2 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
Scale = numpy.random.randn(*param_shape).astype(theano.config.floatX)
Bias = numpy.random.randn(*param_shape).astype(theano.config.floatX)
Mean = numpy.random.randn(*param_shape).astype(theano.config.floatX)
Var = numpy.random.rand(*param_shape).astype(theano.config.floatX)
X = 4 + 3 * np.random.randn(*data_shape).astype(theano.config.floatX)
Dy = -1 + 2 * np.random.randn(*data_shape).astype(theano.config.floatX)
Scale = np.random.randn(*param_shape).astype(theano.config.floatX)
Bias = np.random.randn(*param_shape).astype(theano.config.floatX)
Mean = np.random.randn(*param_shape).astype(theano.config.floatX)
Var = np.random.rand(*param_shape).astype(theano.config.floatX)
outputs = f(X, Scale, Bias, Mean, Var, Dy)
# compare outputs
utt.assert_allclose(outputs[0], outputs[1]) # out
......
......@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
import time
from nose.plugins.skip import SkipTest
import numpy
import numpy as np
import theano
import theano.tensor as T
from theano.tests import unittest_tools as utt
......@@ -79,8 +79,8 @@ class TestConv2D(utt.InferShapeTester):
theano_conv = theano.function([input, filters], output, mode=self.mode)
# initialize input and compute result
image_data = numpy.random.random(N_image_shape).astype(self.dtype)
filter_data = numpy.random.random(N_filter_shape).astype(self.dtype)
image_data = np.random.random(N_image_shape).astype(self.dtype)
filter_data = np.random.random(N_filter_shape).astype(self.dtype)
try:
theano_output = theano_conv(image_data, filter_data)
except ValueError:
......@@ -97,20 +97,20 @@ class TestConv2D(utt.InferShapeTester):
orig_image_data = image_data
if border_mode is not 'full':
s = -1.
out_shape2d = numpy.array(N_image_shape[-2:]) +\
s * numpy.array(N_filter_shape[-2:]) - s
out_shape2d = numpy.ceil(out_shape2d / numpy.array(subsample))
out_shape2d = np.array(N_image_shape[-2:]) +\
s * np.array(N_filter_shape[-2:]) - s
out_shape2d = np.ceil(out_shape2d / np.array(subsample))
# avoid numpy deprecation
out_shape2d = out_shape2d.astype('int32')
out_shape = (N_image_shape[0], N_filter_shape[0]) + tuple(out_shape2d)
ref_output = numpy.zeros(out_shape)
ref_output = np.zeros(out_shape)
# loop over output feature maps
ref_output.fill(0)
if border_mode == 'full':
image_data2 = numpy.zeros((N_image_shape[0], N_image_shape[1],
N_image_shape[2] + 2 * N_filter_shape[2] - 2,
N_image_shape[3] + 2 * N_filter_shape[3] - 2))
image_data2 = np.zeros((N_image_shape[0], N_image_shape[1],
N_image_shape[2] + 2 * N_filter_shape[2] - 2,
N_image_shape[3] + 2 * N_filter_shape[3] - 2))
image_data2[
:, :, N_filter_shape[2] - 1:N_filter_shape[2] - 1 + N_image_shape[2],
N_filter_shape[3] - 1:N_filter_shape[3] - 1 + N_image_shape[3]] = image_data
......@@ -160,17 +160,17 @@ class TestConv2D(utt.InferShapeTester):
def test_uint_image_shape_datatype(self):
"""Tests for uint datatype in image_shape.
"""
self.validate((2, 2, 3, numpy.uint8(3)), (3, 2, 3, 3), 'valid', verify_grad=False)
self.validate((numpy.uint16(2), 2, 3, 3), (3, 2, 3, 3), 'valid', verify_grad=False)
self.validate((2, numpy.uint32(2), 3, 3), (3, 2, 3, 3), 'valid', verify_grad=False)
self.validate((2, 2, 3, np.uint8(3)), (3, 2, 3, 3), 'valid', verify_grad=False)
self.validate((np.uint16(2), 2, 3, 3), (3, 2, 3, 3), 'valid', verify_grad=False)
self.validate((2, np.uint32(2), 3, 3), (3, 2, 3, 3), 'valid', verify_grad=False)
def test_uint_filter_shape_datatype(self):
"""Tests for uint datatype in filter_shape
"""
self.validate((3, 2, 3, 3), (2, 2, 3, numpy.uint8(3)), 'valid', verify_grad=False)
self.validate((3, 2, 3, 3), (numpy.uint16(2), 2, 3, 3), 'valid', verify_grad=False)
self.validate((3, 2, 3, 3), (2, numpy.uint32(2), 3, 3), 'valid', verify_grad=False)
self.validate((3, 2, 3, 3), (2, 2, 3, np.uint8(3)), 'valid', verify_grad=False)
self.validate((3, 2, 3, 3), (np.uint16(2), 2, 3, 3), 'valid', verify_grad=False)
self.validate((3, 2, 3, 3), (2, np.uint32(2), 3, 3), 'valid', verify_grad=False)
def test_img_kernel_same_shape(self):
self.validate((3, 2, 3, 3), (4, 2, 3, 3), 'full')
......@@ -474,8 +474,8 @@ class TestConv2D(utt.InferShapeTester):
print("filter_shapes", filter_shapes)
for filter_shape in filter_shapes:
input = theano.shared(numpy.random.random(image_shape))
filters = theano.shared(numpy.random.random(filter_shape))
input = theano.shared(np.random.random(image_shape))
filters = theano.shared(np.random.random(filter_shape))
output = self.conv2d(
input, filters,
......@@ -498,7 +498,7 @@ class TestConv2D(utt.InferShapeTester):
# must be provided explicitly
def rand(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype='float64')
r = np.asarray(np.random.rand(*shape), dtype='float64')
return r * 2 - 1
adtens = T.dtensor4()
......
......@@ -3,7 +3,7 @@ import time
from nose.plugins.skip import SkipTest
from nose_parameterized import parameterized
import numpy
import numpy as np
try:
from scipy import ndimage
except ImportError:
......@@ -17,14 +17,14 @@ import theano.tests.unittest_tools as utt
def test_get_diagonal_subtensor_view(wrap=lambda a: a):
x = numpy.arange(20).reshape(5, 4).astype('float32')
x = np.arange(20).reshape(5, 4).astype('float32')
x = wrap(x)
xv01 = get_diagonal_subtensor_view(x, 0, 1)
# test that it works in 2d
assert numpy.all(numpy.asarray(xv01) == [[12, 9, 6, 3], [16, 13, 10, 7]])
assert np.all(np.asarray(xv01) == [[12, 9, 6, 3], [16, 13, 10, 7]])
x = numpy.arange(24).reshape(4, 3, 2)
x = np.arange(24).reshape(4, 3, 2)
xv01 = get_diagonal_subtensor_view(x, 0, 1)
xv02 = get_diagonal_subtensor_view(x, 0, 2)
xv12 = get_diagonal_subtensor_view(x, 1, 2)
......@@ -32,11 +32,11 @@ def test_get_diagonal_subtensor_view(wrap=lambda a: a):
# print 'x', x
# print 'xv01', xv01
# print 'xv02', xv02
assert numpy.all(numpy.asarray(xv01) == [
assert np.all(np.asarray(xv01) == [
[[12, 13], [8, 9], [4, 5]],
[[18, 19], [14, 15], [10, 11]]])
assert numpy.all(numpy.asarray(xv02) == [
assert np.all(np.asarray(xv02) == [
[[6, 1], [8, 3], [10, 5]],
[[12, 7], [14, 9], [16, 11]],
[[18, 13], [20, 15], [22, 17]],
......@@ -45,7 +45,7 @@ def test_get_diagonal_subtensor_view(wrap=lambda a: a):
# diagonal views of each leading matrix is the same
# as the slices out of the diagonal view of the entire 3d tensor
for xi, xvi in zip(x, xv12):
assert numpy.all(xvi == get_diagonal_subtensor_view(xi, 0, 1))
assert np.all(xvi == get_diagonal_subtensor_view(xi, 0, 1))
def pyconv3d(signals, filters, border_mode='valid'):
......@@ -68,8 +68,8 @@ def pyconv3d(signals, filters, border_mode='valid'):
if Tpad > 0 or Hpad > 0 or Wpad > 0:
# zero-pad signals
signals_padded = numpy.zeros((Ns, Ts + 2 * Tpad, C,
Hs + 2 * Hpad, Ws + 2 * Wpad), 'float32')
signals_padded = np.zeros((Ns, Ts + 2 * Tpad, C,
Hs + 2 * Hpad, Ws + 2 * Wpad), 'float32')
signals_padded[:, Tpad:(Ts + Tpad), :, Hpad:(Hs + Hpad),
Wpad:(Ws + Wpad)] = signals
Ns, Ts, C, Hs, Ws = signals_padded.shape
......@@ -79,7 +79,7 @@ def pyconv3d(signals, filters, border_mode='valid'):
Hf2 = Hf // 2
Wf2 = Wf // 2
rval = numpy.zeros((Ns, Ts - Tf + 1, Nf, Hs - Hf + 1, Ws - Wf + 1))
rval = np.zeros((Ns, Ts - Tf + 1, Nf, Hs - Hf + 1, Ws - Wf + 1))
for ns in xrange(Ns):
for nf in xrange(Nf):
for c in xrange(C):
......@@ -113,8 +113,8 @@ def test_conv3d(border_mode):
Ns, Ts, C, Hs, Ws = 3, 10, 3, 32, 32
Nf, Tf, C, Hf, Wf = 32, 5, 3, 5, 5
signals = numpy.arange(Ns * Ts * C * Hs * Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
filters = numpy.arange(Nf * Tf * C * Hf * Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
signals = np.arange(Ns * Ts * C * Hs * Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
filters = np.arange(Nf * Tf * C * Hf * Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
t0 = time.time()
pyres = pyconv3d(signals, filters, border_mode)
......@@ -153,8 +153,8 @@ def test_conv3d(border_mode):
Ns, Ts, C, Hs, Ws = 3, 3, 3, 5, 5
Nf, Tf, C, Hf, Wf = 4, 2, 3, 2, 2
signals = numpy.random.rand(Ns, Ts, C, Hs, Ws).astype('float32')
filters = numpy.random.rand(Nf, Tf, C, Hf, Wf).astype('float32')
signals = np.random.rand(Ns, Ts, C, Hs, Ws).astype('float32')
filters = np.random.rand(Nf, Tf, C, Hf, Wf).astype('float32')
utt.verify_grad(lambda s, f: conv3d(s, f, border_mode=border_mode),
[signals, filters], eps=1e-1, mode=mode)
......@@ -162,8 +162,8 @@ def test_conv3d(border_mode):
Ns, Ts, C, Hs, Ws = 3, 10, 3, 32, 32
Nf, Tf, C, Hf, Wf = 32, 1, 3, 5, 5
signals = numpy.arange(Ns * Ts * C * Hs * Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
filters = numpy.arange(Nf * Tf * C * Hf * Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
signals = np.arange(Ns * Ts * C * Hs * Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
filters = np.arange(Nf * Tf * C * Hf * Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
t0 = time.time()
pyres = pyconv3d(signals, filters, border_mode)
......@@ -200,7 +200,7 @@ def test_conv3d(border_mode):
Ns, Ts, C, Hs, Ws = 3, 3, 3, 5, 5
Nf, Tf, C, Hf, Wf = 4, 1, 3, 2, 2
signals = numpy.random.rand(Ns, Ts, C, Hs, Ws).astype('float32')
filters = numpy.random.rand(Nf, Tf, C, Hf, Wf).astype('float32')
signals = np.random.rand(Ns, Ts, C, Hs, Ws).astype('float32')
filters = np.random.rand(Nf, Tf, C, Hf, Wf).astype('float32')
utt.verify_grad(lambda s, f: conv3d(s, f, border_mode=border_mode),
[signals, filters], eps=1e-1, mode=mode)
......@@ -3,7 +3,7 @@ from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
from nose.tools import assert_equals
import numpy
import numpy as np
from six import integer_types
import theano
......@@ -66,15 +66,15 @@ class TestCorr2D(utt.InferShapeTester):
theano_corr = theano.function([input, filters], output, mode=self.mode)
# initialize input and compute result
image_data = numpy.random.random(N_image_shape).astype(self.dtype)
filter_data = numpy.random.random(N_filter_shape).astype(self.dtype)
image_data = np.random.random(N_image_shape).astype(self.dtype)
filter_data = np.random.random(N_filter_shape).astype(self.dtype)
if non_contiguous:
image_data = numpy.transpose(image_data, axes=(0, 1, 3, 2))
image_data = np.transpose(image_data, axes=(0, 1, 3, 2))
image_data = image_data.copy()
image_data = numpy.transpose(image_data, axes=(0, 1, 3, 2))
filter_data = numpy.transpose(filter_data, axes=(0, 1, 3, 2))
image_data = np.transpose(image_data, axes=(0, 1, 3, 2))
filter_data = np.transpose(filter_data, axes=(0, 1, 3, 2))
filter_data = filter_data.copy()
filter_data = numpy.transpose(filter_data, axes=(0, 1, 3, 2))
filter_data = np.transpose(filter_data, axes=(0, 1, 3, 2))
assert not image_data.flags['CONTIGUOUS']
assert not filter_data.flags['CONTIGUOUS']
......@@ -82,38 +82,38 @@ class TestCorr2D(utt.InferShapeTester):
# REFERENCE IMPLEMENTATION
# Testing correlation, not convolution. Reverse filters.
filter_data_corr = numpy.array(filter_data[:, :, ::-1, ::-1],
copy=True,
order='C')
filter_data_corr = np.array(filter_data[:, :, ::-1, ::-1],
copy=True,
order='C')
orig_image_data = image_data
img_shape2d = numpy.array(N_image_shape[-2:])
fil_shape2d = numpy.array(N_filter_shape[-2:])
dil_shape2d = numpy.array(filter_dilation)
img_shape2d = np.array(N_image_shape[-2:])
fil_shape2d = np.array(N_filter_shape[-2:])
dil_shape2d = np.array(filter_dilation)
dil_fil_shape2d = (fil_shape2d - 1) * dil_shape2d + 1
subsample2d = numpy.array(subsample)
subsample2d = np.array(subsample)
if border_mode == 'full':
padHW = (dil_fil_shape2d - 1)
elif border_mode == 'valid':
padHW = numpy.array([0, 0])
padHW = np.array([0, 0])
elif border_mode == 'half':
padHW = numpy.floor(dil_fil_shape2d / 2).astype('int32')
padHW = np.floor(dil_fil_shape2d / 2).astype('int32')
elif isinstance(border_mode, tuple):
padHW = numpy.array(border_mode)
padHW = np.array(border_mode)
elif isinstance(border_mode, integer_types):
padHW = numpy.array([border_mode, border_mode])
padHW = np.array([border_mode, border_mode])
else:
raise NotImplementedError('Unsupported border_mode {}'.format(border_mode))
out_shape2d = numpy.floor((img_shape2d + 2 * (padHW) - dil_fil_shape2d) / subsample2d) + 1
out_shape2d = np.floor((img_shape2d + 2 * (padHW) - dil_fil_shape2d) / subsample2d) + 1
# avoid numpy deprecation
out_shape2d = out_shape2d.astype('int32')
out_shape = (N_image_shape[0], N_filter_shape[0]) + tuple(out_shape2d)
ref_output = numpy.zeros(out_shape)
ref_output = np.zeros(out_shape)
# loop over output feature maps
ref_output.fill(0)
image_data2 = numpy.zeros((N_image_shape[0], N_image_shape[1],
N_image_shape[2] + 2 * padHW[0],
N_image_shape[3] + 2 * padHW[1]))
image_data2 = np.zeros((N_image_shape[0], N_image_shape[1],
N_image_shape[2] + 2 * padHW[0],
N_image_shape[3] + 2 * padHW[1]))
image_data2[:, :, padHW[0]:padHW[0] + N_image_shape[2],
padHW[1]:padHW[1] + N_image_shape[3]] = image_data
image_data = image_data2
......@@ -265,7 +265,7 @@ class TestCorr2D(utt.InferShapeTester):
Checks dtype upcast for CorrMM methods.
"""
def rand(shape, dtype='float64'):
r = numpy.asarray(numpy.random.rand(*shape), dtype=dtype)
r = np.asarray(np.random.rand(*shape), dtype=dtype)
return r * 2 - 1
if not theano.config.cxx:
raise SkipTest("Need cxx to test conv2d")
......@@ -296,7 +296,7 @@ class TestCorr2D(utt.InferShapeTester):
raise SkipTest("Need cxx for this test")
def rand(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype='float64')
r = np.asarray(np.random.rand(*shape), dtype='float64')
return r * 2 - 1
corrMM = corr.CorrMM
......@@ -329,7 +329,7 @@ class TestCorr2D(utt.InferShapeTester):
raise SkipTest("Need cxx for this test")
def rand(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype='float64')
r = np.asarray(np.random.rand(*shape), dtype='float64')
return r * 2 - 1
corrMM = corr.CorrMM
gradW = corr.CorrMM_gradWeights
......@@ -369,7 +369,7 @@ class TestCorr2D(utt.InferShapeTester):
raise SkipTest("Need cxx for this test")
def rand(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype='float64')
r = np.asarray(np.random.rand(*shape), dtype='float64')
return r * 2 - 1
corrMM = corr.CorrMM
gradI = corr.CorrMM_gradInputs
......
......@@ -3,7 +3,7 @@ from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
from nose.tools import assert_equals
import numpy
import numpy as np
from six import integer_types
import theano
......@@ -67,17 +67,17 @@ class TestCorr3D(utt.InferShapeTester):
theano_corr = theano.function([input, filters], output, mode=self.mode)
# initialize input and compute result
image_data = numpy.random.random(N_image_shape).astype(self.dtype)
filter_data = numpy.random.random(N_filter_shape).astype(self.dtype)
image_data = np.random.random(N_image_shape).astype(self.dtype)
filter_data = np.random.random(N_filter_shape).astype(self.dtype)
image_data /= 10
filter_data /= 10
if non_contiguous:
image_data = numpy.transpose(image_data, axes=(0, 1, 4, 3, 2))
image_data = np.transpose(image_data, axes=(0, 1, 4, 3, 2))
image_data = image_data.copy()
image_data = numpy.transpose(image_data, axes=(0, 1, 4, 3, 2))
filter_data = numpy.transpose(filter_data, axes=(0, 1, 4, 3, 2))
image_data = np.transpose(image_data, axes=(0, 1, 4, 3, 2))
filter_data = np.transpose(filter_data, axes=(0, 1, 4, 3, 2))
filter_data = filter_data.copy()
filter_data = numpy.transpose(filter_data, axes=(0, 1, 4, 3, 2))
filter_data = np.transpose(filter_data, axes=(0, 1, 4, 3, 2))
assert not image_data.flags['CONTIGUOUS']
assert not filter_data.flags['CONTIGUOUS']
......@@ -85,39 +85,39 @@ class TestCorr3D(utt.InferShapeTester):
# REFERENCE IMPLEMENTATION
# Testing correlation, not convolution. Reverse filters.
filter_data_corr = numpy.array(filter_data[:, :, ::-1, ::-1, ::-1],
copy=True,
order='C')
filter_data_corr = np.array(filter_data[:, :, ::-1, ::-1, ::-1],
copy=True,
order='C')
orig_image_data = image_data
img_shape3d = numpy.array(N_image_shape[-3:])
fil_shape3d = numpy.array(N_filter_shape[-3:])
dil_shape3d = numpy.array(filter_dilation)
img_shape3d = np.array(N_image_shape[-3:])
fil_shape3d = np.array(N_filter_shape[-3:])
dil_shape3d = np.array(filter_dilation)
dil_fil_shape3d = (fil_shape3d - 1) * dil_shape3d + 1
subsample3d = numpy.array(subsample)
subsample3d = np.array(subsample)
if border_mode == 'full':
padHWD = (dil_fil_shape3d - 1)
elif border_mode == 'valid':
padHWD = numpy.array([0, 0, 0])
padHWD = np.array([0, 0, 0])
elif border_mode == 'half':
padHWD = numpy.floor(dil_fil_shape3d / 2).astype('int32')
padHWD = np.floor(dil_fil_shape3d / 2).astype('int32')
elif isinstance(border_mode, tuple):
padHWD = numpy.array(border_mode)
padHWD = np.array(border_mode)
elif isinstance(border_mode, integer_types):
padHWD = numpy.array([border_mode, border_mode, border_mode])
padHWD = np.array([border_mode, border_mode, border_mode])
else:
raise NotImplementedError('Unsupported border_mode {}'.format(border_mode))
out_shape3d = numpy.floor((img_shape3d + 2 * (padHWD) - dil_fil_shape3d) / subsample3d) + 1
out_shape3d = np.floor((img_shape3d + 2 * (padHWD) - dil_fil_shape3d) / subsample3d) + 1
# avoid numpy deprecation
out_shape3d = out_shape3d.astype('int32')
out_shape = (N_image_shape[0], N_filter_shape[0]) + tuple(out_shape3d)
ref_output = numpy.zeros(out_shape)
ref_output = np.zeros(out_shape)
# loop over output feature maps
ref_output.fill(0)
image_data2 = numpy.zeros((N_image_shape[0], N_image_shape[1],
N_image_shape[2] + 2 * padHWD[0],
N_image_shape[3] + 2 * padHWD[1],
N_image_shape[4] + 2 * padHWD[2]))
image_data2 = np.zeros((N_image_shape[0], N_image_shape[1],
N_image_shape[2] + 2 * padHWD[0],
N_image_shape[3] + 2 * padHWD[1],
N_image_shape[4] + 2 * padHWD[2]))
image_data2[:, :,
padHWD[0]:padHWD[0] + N_image_shape[2],
padHWD[1]:padHWD[1] + N_image_shape[3],
......@@ -283,7 +283,7 @@ class TestCorr3D(utt.InferShapeTester):
raise SkipTest("Need cxx for this test")
def rand(shape, dtype='float64'):
r = numpy.asarray(numpy.random.rand(*shape), dtype=dtype)
r = np.asarray(np.random.rand(*shape), dtype=dtype)
return r * 2 - 1
ops = [corr3d.Corr3dMM, corr3d.Corr3dMM_gradWeights, corr3d.Corr3dMM_gradInputs]
......@@ -312,7 +312,7 @@ class TestCorr3D(utt.InferShapeTester):
raise SkipTest("Need cxx for this test")
def rand(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype='float64')
r = np.asarray(np.random.rand(*shape), dtype='float64')
return r * 2 - 1
corr3dMM = corr3d.Corr3dMM
......@@ -345,7 +345,7 @@ class TestCorr3D(utt.InferShapeTester):
raise SkipTest("Need cxx for this test")
def rand(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype='float64')
r = np.asarray(np.random.rand(*shape), dtype='float64')
return r * 2 - 1
corr3dMM = corr3d.Corr3dMM
gradW = corr3d.Corr3dMM_gradWeights
......@@ -386,7 +386,7 @@ class TestCorr3D(utt.InferShapeTester):
raise SkipTest("Need cxx for this test")
def rand(*shape):
r = numpy.asarray(numpy.random.rand(*shape), dtype='float64')
r = np.asarray(np.random.rand(*shape), dtype='float64')
return r * 2 - 1
corr3dMM = corr3d.Corr3dMM
gradI = corr3d.Corr3dMM_gradInputs
......
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import unittest
import theano
......@@ -25,7 +25,7 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
for border in ['valid', 'ignore_borders']:
for dtype in self.dtypes:
images = shared(
numpy.arange(numpy.prod(shape), dtype=dtype).reshape(shape))
np.arange(np.prod(shape), dtype=dtype).reshape(shape))
neib_shape = T.as_tensor_variable(pshape)
f = function([],
......@@ -42,13 +42,13 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
for node in f.maker.fgraph.toposort()])
# print g()
assert numpy.allclose(images.get_value(borrow=True), g())
assert np.allclose(images.get_value(borrow=True), g())
def test_neibs_manual(self):
shape = (2, 3, 4, 4)
for dtype in self.dtypes:
images = shared(
numpy.arange(numpy.prod(shape), dtype=dtype).reshape(shape))
np.arange(np.prod(shape), dtype=dtype).reshape(shape))
neib_shape = T.as_tensor_variable((2, 2))
for border in ['valid', 'ignore_borders']:
......@@ -60,7 +60,7 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
# print images.get_value(borrow=True)
neibs = f()
# print neibs
assert numpy.allclose(neibs, [
assert np.allclose(neibs, [
[0, 1, 4, 5],
[2, 3, 6, 7],
[8, 9, 12, 13],
......@@ -88,12 +88,12 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
g = function([], neibs2images(neibs, neib_shape, images.shape),
mode=self.mode)
assert numpy.allclose(images.get_value(borrow=True), g())
assert np.allclose(images.get_value(borrow=True), g())
def test_neibs_manual_step(self):
shape = (2, 3, 5, 5)
for dtype in self.dtypes:
images = shared(numpy.asarray(numpy.arange(numpy.prod(
images = shared(np.asarray(np.arange(np.prod(
shape)).reshape(shape), dtype=dtype))
neib_shape = T.as_tensor_variable((3, 3))
neib_step = T.as_tensor_variable((2, 2))
......@@ -107,7 +107,7 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
assert self.op in [type(node.op)
for node in f.maker.fgraph.toposort()]
assert numpy.allclose(neibs, [
assert np.allclose(neibs, [
[0, 1, 2, 5, 6, 7, 10, 11, 12],
[2, 3, 4, 7, 8, 9, 12, 13, 14],
[10, 11, 12, 15, 16, 17, 20, 21, 22],
......@@ -143,8 +143,8 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
def test_neibs_bad_shape(self):
shape = (2, 3, 10, 10)
for dtype in self.dtypes:
images = shared(numpy.arange(
numpy.prod(shape), dtype=dtype).reshape(shape))
images = shared(np.arange(
np.prod(shape), dtype=dtype).reshape(shape))
for neib_shape in [(3, 2), (2, 3)]:
neib_shape = T.as_tensor_variable(neib_shape)
......@@ -212,11 +212,11 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
for dtype in self.dtypes:
images = shared(numpy.asarray(numpy.arange(numpy.prod(
images = shared(np.asarray(np.arange(np.prod(
shape)).reshape(shape), dtype=dtype))
neib_shape = T.as_tensor_variable(neib_shape)
neib_step = T.as_tensor_variable(neib_step)
expected = numpy.asarray(expected)
expected = np.asarray(expected)
f = function([], images2neibs(images, neib_shape, neib_step,
mode="wrap_centered"),
......@@ -225,7 +225,7 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
if expected.size > 1:
for i in range(shape[0] * shape[1]):
assert numpy.allclose(
assert np.allclose(
neibs[i * expected.shape[0]:(i + 1) * expected.shape[0], :],
expected + 25 * i), "wrap_centered"
......@@ -240,8 +240,8 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
shape = (2, 3, 10, 10)
for dtype in self.dtypes:
images = shared(numpy.arange(
numpy.prod(shape), dtype=dtype
images = shared(np.arange(
np.prod(shape), dtype=dtype
).reshape(shape))
for neib_shape in [(3, 2), (2, 3)]:
......@@ -253,7 +253,7 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
self.assertRaises(TypeError, f)
for shape in [(2, 3, 2, 3), (2, 3, 3, 2)]:
images = shared(numpy.arange(numpy.prod(shape)).reshape(shape))
images = shared(np.arange(np.prod(shape)).reshape(shape))
neib_shape = T.as_tensor_variable((3, 3))
f = function([], images2neibs(images, neib_shape,
mode="wrap_centered"),
......@@ -262,7 +262,7 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
# Test a valid shapes
shape = (2, 3, 3, 3)
images = shared(numpy.arange(numpy.prod(shape)).reshape(shape))
images = shared(np.arange(np.prod(shape)).reshape(shape))
neib_shape = T.as_tensor_variable((3, 3))
f = function([],
......@@ -273,7 +273,7 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
def test_grad_wrap_centered(self):
# It is not implemented for now. So test that we raise an error.
shape = (2, 3, 6, 6)
images_val = numpy.random.rand(*shape).astype('float32')
images_val = np.random.rand(*shape).astype('float32')
def fn(images):
return images2neibs(images, (3, 3), mode='wrap_centered')
......@@ -283,7 +283,7 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
def test_grad_valid(self):
shape = (2, 3, 6, 6)
images_val = numpy.random.rand(*shape).astype('float32')
images_val = np.random.rand(*shape).astype('float32')
def fn(images):
return images2neibs(images, (2, 2))
......@@ -305,7 +305,7 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
def test_grad_ignore_border(self):
shape = (2, 3, 5, 5)
images_val = numpy.random.rand(*shape).astype('float32')
images_val = np.random.rand(*shape).astype('float32')
def fn(images):
return images2neibs(images, (2, 2),
......@@ -317,7 +317,7 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
def test_neibs2images_grad(self):
# say we had images of size (2, 3, 10, 10)
# then we extracted 2x2 neighbors on this, we get (2 * 3 * 5 * 5, 4)
neibs_val = numpy.random.rand(150, 4)
neibs_val = np.random.rand(150, 4)
def fn(neibs):
return neibs2images(neibs, (2, 2), (2, 3, 10, 10))
......@@ -327,8 +327,8 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
def test_neibs_valid_with_inconsistent_borders(self):
shape = (2, 3, 5, 5)
images = T.dtensor4()
images_val = numpy.arange(numpy.prod(shape),
dtype='float32').reshape(shape)
images_val = np.arange(np.prod(shape),
dtype='float32').reshape(shape)
def fn(images):
return T.sum(T.sqr(images2neibs(images, (2, 2), mode='valid')),
......@@ -356,7 +356,7 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
original_size, mode=mode)
f = theano.function([patsRecovery, original_size], out)
im_val = numpy.ones((1, 3, 320, 320), dtype=numpy.float32)
im_val = np.ones((1, 3, 320, 320), dtype=np.float32)
neibs = extractPatches(im_val)
f(neibs, im_val.shape)
# Wrong number of dimensions
......@@ -368,8 +368,8 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
def speed_neibs(self):
shape = (100, 40, 18, 18)
images = shared(numpy.arange(numpy.prod(shape),
dtype='float32').reshape(shape))
images = shared(np.arange(np.prod(shape),
dtype='float32').reshape(shape))
neib_shape = T.as_tensor_variable((3, 3))
f = function([], images2neibs(images, neib_shape),
......@@ -380,8 +380,8 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
def speed_neibs_wrap_centered(self):
shape = (100, 40, 18, 18)
images = shared(numpy.arange(numpy.prod(shape),
dtype='float32').reshape(shape))
images = shared(np.arange(np.prod(shape),
dtype='float32').reshape(shape))
neib_shape = T.as_tensor_variable((3, 3))
f = function([],
......@@ -393,7 +393,7 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
def test_infer_shape(self):
shape = (100, 40, 6, 3)
images = numpy.ones(shape).astype('float32')
images = np.ones(shape).astype('float32')
x = T.ftensor4()
self._compile_and_check(
[x], [images2neibs(x, neib_shape=(2, 1), mode='valid')],
......@@ -402,14 +402,14 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
[x], [images2neibs(x, neib_shape=(2, 3), mode='valid')],
[images], Images2Neibs)
shape = (100, 40, 5, 4)
images = numpy.ones(shape).astype('float32')
images = np.ones(shape).astype('float32')
x = T.ftensor4()
self._compile_and_check(
[x], [images2neibs(
x, neib_shape=(2, 1), mode='ignore_borders')],
[images], Images2Neibs)
shape = (100, 40, 5, 3)
images = numpy.ones(shape).astype('float32')
images = np.ones(shape).astype('float32')
x = T.ftensor4()
self._compile_and_check(
[x], [images2neibs(
......@@ -417,14 +417,14 @@ class T_Images2Neibs(unittest_tools.InferShapeTester):
[images], Images2Neibs)
shape = (100, 40, 6, 7)
images = numpy.ones(shape).astype('float32')
images = np.ones(shape).astype('float32')
x = T.ftensor4()
self._compile_and_check(
[x], [images2neibs(
x, neib_shape=(2, 2), mode='ignore_borders')],
[images], Images2Neibs)
shape = (100, 40, 5, 10)
images = numpy.ones(shape).astype('float32')
images = np.ones(shape).astype('float32')
x = T.ftensor4()
self._compile_and_check(
[x], [images2neibs(
......
from __future__ import absolute_import, print_function, division
import unittest
import numpy
import numpy as np
from nose.plugins.skip import SkipTest
from six.moves import xrange
......@@ -47,7 +47,7 @@ class T_sigmoid(unittest.TestCase):
utt.seed_rng()
def test_elemwise(self):
utt.verify_grad(sigmoid, [numpy.random.rand(3, 4)])
utt.verify_grad(sigmoid, [np.random.rand(3, 4)])
class T_softplus(unittest.TestCase):
......@@ -56,7 +56,7 @@ class T_softplus(unittest.TestCase):
utt.seed_rng()
def test_elemwise(self):
utt.verify_grad(softplus, [numpy.random.rand(3, 4)])
utt.verify_grad(softplus, [np.random.rand(3, 4)])
class T_Softmax(utt.InferShapeTester):
......@@ -64,26 +64,26 @@ class T_Softmax(utt.InferShapeTester):
def test0(self):
def f(a):
return softmax_op(a)[:, 0]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
utt.verify_grad(f, [np.random.rand(3, 4)])
def test1(self):
def f(a):
return softmax_op(a)[:, 1]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
utt.verify_grad(f, [np.random.rand(3, 4)])
def test2(self):
def f(a):
return softmax_op(a)[:, 2]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
utt.verify_grad(f, [np.random.rand(3, 4)])
def test3(self):
def f(a):
return softmax_op(a)[:, 3]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
utt.verify_grad(f, [np.random.rand(3, 4)])
def test_infer_shape(self):
admat = matrix()
admat_val = numpy.random.rand(3, 4).astype(config.floatX)
admat_val = np.random.rand(3, 4).astype(config.floatX)
self._compile_and_check([admat], [Softmax()(admat)],
[admat_val], Softmax)
......@@ -91,13 +91,13 @@ class T_Softmax(utt.InferShapeTester):
x = T.vector()
f = theano.function([x], softmax_op(x))
xv = numpy.random.randn(6).astype(config.floatX)
assert numpy.allclose(f(xv), numpy.exp(xv) / numpy.exp(xv).sum())
xv = np.random.randn(6).astype(config.floatX)
assert np.allclose(f(xv), np.exp(xv) / np.exp(xv).sum())
def test_vector_grad(self):
def f(a):
return softmax_op(a)
utt.verify_grad(f, [numpy.random.rand(4)])
utt.verify_grad(f, [np.random.rand(4)])
class T_SoftmaxWithBias(utt.InferShapeTester):
......@@ -105,35 +105,35 @@ class T_SoftmaxWithBias(utt.InferShapeTester):
def test0(self):
def f(a, b):
return softmax_with_bias(a, b)[:, 0]
utt.verify_grad(f, [numpy.random.rand(3, 4),
numpy.random.rand(4)])
utt.verify_grad(f, [np.random.rand(3, 4),
np.random.rand(4)])
def test1(self):
def f(a, b):
return softmax_with_bias(a, b)[:, 1]
utt.verify_grad(f, [numpy.random.rand(3, 4),
numpy.random.rand(4)])
utt.verify_grad(f, [np.random.rand(3, 4),
np.random.rand(4)])
def test2(self):
def f(a, b):
return softmax_with_bias(a, b)[:, 2]
utt.verify_grad(f, [numpy.random.rand(3, 4),
numpy.random.rand(4)])
utt.verify_grad(f, [np.random.rand(3, 4),
np.random.rand(4)])
def test3(self):
def f(a, b):
return softmax_with_bias(a, b)[:, 3]
utt.verify_grad(f, [numpy.random.rand(3, 4),
numpy.random.rand(4)])
utt.verify_grad(f, [np.random.rand(3, 4),
np.random.rand(4)])
def test_broadcast(self):
# test that we don't raise an error during optimization for no good
# reason as softmax_with_bias don't support correctly some/all
# broadcasted inputs pattern
initial_W = numpy.asarray([[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1]],
dtype=theano.config.floatX)
initial_W = np.asarray([[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1]],
dtype=theano.config.floatX)
W = theano.shared(value=initial_W, name='W')
vbias = theano.shared(value=0.1, name='vbias') # 0.01
hid = T.vector('hid')
......@@ -148,8 +148,8 @@ class T_SoftmaxWithBias(utt.InferShapeTester):
def test_softmax_with_bias_trace(self):
a = theano.shared(
numpy.random.randn(3).astype(config.floatX))
b = theano.shared(numpy.float32(numpy.random.randn()))
np.random.randn(3).astype(config.floatX))
b = theano.shared(np.float32(np.random.randn()))
sm = T.nnet.softmax(a + b)
f = theano.function([], sm)
assert check_stack_trace(f, ops_to_check='last')
......@@ -157,8 +157,8 @@ class T_SoftmaxWithBias(utt.InferShapeTester):
def test_infer_shape(self):
admat = matrix()
advec = vector()
admat_val = numpy.random.rand(3, 4).astype(config.floatX)
advec_val = numpy.random.rand(4).astype(config.floatX)
admat_val = np.random.rand(3, 4).astype(config.floatX)
advec_val = np.random.rand(4).astype(config.floatX)
self._compile_and_check([admat, advec],
[SoftmaxWithBias()(admat, advec)],
[admat_val, advec_val], SoftmaxWithBias)
......@@ -169,40 +169,40 @@ class T_LogSoftmax(utt.InferShapeTester):
def test0(self):
def f(a):
return logsoftmax_op(a)[:, 0]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
utt.verify_grad(f, [np.random.rand(3, 4)])
def test1(self):
def f(a):
return logsoftmax_op(a)[:, 1]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
utt.verify_grad(f, [np.random.rand(3, 4)])
def test2(self):
def f(a):
return logsoftmax_op(a)[:, 2]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
utt.verify_grad(f, [np.random.rand(3, 4)])
def test3(self):
def f(a):
return logsoftmax_op(a)[:, 3]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
utt.verify_grad(f, [np.random.rand(3, 4)])
def test_matrix(self):
def f(a):
return logsoftmax_op(a)
utt.verify_grad(f, [numpy.random.rand(3, 4)])
utt.verify_grad(f, [np.random.rand(3, 4)])
def test_vector(self):
x = T.vector()
f = theano.function([x], logsoftmax_op(x))
xv = numpy.random.randn(6).astype(config.floatX)
assert numpy.allclose(f(xv),
numpy.log(numpy.exp(xv) / numpy.exp(xv).sum()))
xv = np.random.randn(6).astype(config.floatX)
assert np.allclose(f(xv),
np.log(np.exp(xv) / np.exp(xv).sum()))
def test_vector_grad(self):
def f(a):
return logsoftmax_op(a)
utt.verify_grad(f, [numpy.random.rand(4)])
utt.verify_grad(f, [np.random.rand(4)])
def test_allclose(self):
m = theano.config.mode
......@@ -220,9 +220,9 @@ class T_LogSoftmax(utt.InferShapeTester):
grad = tensor.grad(cm2.mean(), x)
# create some inputs into a softmax that are large and labels
a = numpy.exp(10 * numpy.random.rand(5, 10).astype(theano.config.floatX))
a = np.exp(10 * np.random.rand(5, 10).astype(theano.config.floatX))
# create some one-hot coded labels
b = numpy.eye(5, 10).astype(theano.config.floatX)
b = np.eye(5, 10).astype(theano.config.floatX)
# show equivalence of softmax and exponentiated numerically stable
# log-softmax
......@@ -241,7 +241,7 @@ class T_LogSoftmax(utt.InferShapeTester):
# while in the log-softmax case they don't
f3 = theano.function([x, y], [grad])
grad_ = f3(a, b)
assert not numpy.any(numpy.isnan(grad_))
assert not np.any(np.isnan(grad_))
def test_isclose(self):
def f(a):
......@@ -274,8 +274,8 @@ class T_LogSoftmax(utt.InferShapeTester):
m.check_isfinite = False
# some inputs that are large to make the gradient explode in the non
# optimized case
a = numpy.exp(
10 * numpy.random.rand(5, 10).astype(theano.config.floatX))
a = np.exp(
10 * np.random.rand(5, 10).astype(theano.config.floatX))
def myfunc(x):
sm = tensor.nnet.softmax(x)
......@@ -317,8 +317,8 @@ class T_SoftmaxGrad(utt.InferShapeTester):
def test_infer_shape(self):
admat = matrix()
bdmat = matrix()
admat_val = numpy.random.rand(3, 4).astype(config.floatX)
bdmat_val = numpy.random.rand(3, 4).astype(config.floatX)
admat_val = np.random.rand(3, 4).astype(config.floatX)
bdmat_val = np.random.rand(3, 4).astype(config.floatX)
self._compile_and_check([admat, bdmat], [SoftmaxGrad()(admat, bdmat)],
[admat_val, bdmat_val], SoftmaxGrad)
......@@ -333,29 +333,29 @@ class T_CrossentropySoftmax1Hot(unittest.TestCase):
def f(a, b):
return crossentropy_softmax_1hot_with_bias(a, b, y_idx)[0]
utt.verify_grad(f, [numpy.random.rand(3, 4),
numpy.random.rand(4)])
utt.verify_grad(f, [np.random.rand(3, 4),
np.random.rand(4)])
def test1(self):
y_idx = [0, 1, 3]
def f(a):
return crossentropy_softmax_1hot(a, y_idx)[0]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
utt.verify_grad(f, [np.random.rand(3, 4)])
def test_vector(self):
y_idx = [3]
def f(a):
return crossentropy_softmax_1hot(T.shape_padleft(a), y_idx)[0]
utt.verify_grad(f, [numpy.random.rand(4)])
utt.verify_grad(f, [np.random.rand(4)])
def test_vectors(self):
y_idx = [3]
def f(a, b):
return crossentropy_softmax_1hot(T.shape_padleft(a) + b, y_idx)[0]
utt.verify_grad(f, [numpy.random.rand(4), numpy.random.rand(4)])
utt.verify_grad(f, [np.random.rand(4), np.random.rand(4)])
class T_CrossentropySoftmax1HotWithBiasDx(utt.InferShapeTester):
......@@ -364,20 +364,20 @@ class T_CrossentropySoftmax1HotWithBiasDx(utt.InferShapeTester):
def ff(class_dtype):
def f(sm):
# Class indices
y = numpy.random.randint(low=0, high=5, size=10).astype(class_dtype)
y = np.random.randint(low=0, high=5, size=10).astype(class_dtype)
return theano.tensor.nnet.crossentropy_softmax_1hot_with_bias_dx(
numpy.random.rand(10), # Gradient w.r.t. NLL.
np.random.rand(10), # Gradient w.r.t. NLL.
sm, # Softmax output.
y)
return f
# Build a random softmax output whose rows sum to 1.
softmax_output = numpy.random.rand(10, 5)
softmax_output = np.random.rand(10, 5)
softmax_output /= softmax_output.sum(axis=1).reshape(10, 1)
for dtype in ['uint8', 'int8', 'uint64', 'int64']:
utt.verify_grad(ff(dtype), [softmax_output])
def test1(self):
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
softmax_output = rng.rand(10, 5)
softmax_output /= softmax_output.sum(axis=1).reshape(10, 1)
......@@ -392,7 +392,7 @@ class T_CrossentropySoftmax1HotWithBiasDx(utt.InferShapeTester):
admat = matrix()
advec = vector()
alvec = lvector()
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
admat_val = rng.rand(10, 5).astype(config.floatX)
admat_val /= admat_val.sum(axis=1).reshape(10, 1)
advec_val = rng.rand(10).astype(config.floatX)
......@@ -407,7 +407,7 @@ class T_CrossentropySoftmax1HotWithBiasDx(utt.InferShapeTester):
admat = matrix()
advec = vector()
alvec = lvector()
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
admat_val = rng.rand(10, 5).astype(config.floatX)
admat_val /= admat_val.sum(axis=1).reshape(10, 1)
advec_val = rng.rand(10).astype(config.floatX)
......@@ -431,28 +431,28 @@ class T_CrossentropySoftmaxArgmax1HotWithBias(utt.InferShapeTester):
# First test gradient when getting a gradient on the NLL output.
def grad_on_nll_dtype(dtype):
def grad_on_nll(x, b):
y_idx = numpy.random.randint(low=0, high=n_classes, size=n_samples).astype(dtype)
y_idx = np.random.randint(low=0, high=n_classes, size=n_samples).astype(dtype)
return self.op(x, b, y_idx=y_idx)[0]
return grad_on_nll
for dtype in ['uint8', 'int8', 'uint64', 'int64']:
utt.verify_grad(grad_on_nll_dtype(dtype),
[numpy.random.rand(n_samples, n_classes),
numpy.random.rand(n_classes)])
[np.random.rand(n_samples, n_classes),
np.random.rand(n_classes)])
# Then test gradient when getting a gradient on the softmax output.
def grad_on_softmax(x, b):
return self.op(x, b, y_idx=numpy.random.randint(
return self.op(x, b, y_idx=np.random.randint(
low=0, high=n_classes, size=n_samples))[1]
utt.verify_grad(
grad_on_softmax,
[numpy.random.rand(n_samples, n_classes),
numpy.random.rand(n_classes)])
[np.random.rand(n_samples, n_classes),
np.random.rand(n_classes)])
def test_infer_shape(self):
admat = matrix()
advec = vector()
alvec = lvector()
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
admat_val = rng.rand(3, 5).astype(config.floatX)
advec_val = rng.rand(5).astype(config.floatX)
alvec_val = rng.randint(low=0, high=5, size=3)
......@@ -466,7 +466,7 @@ class T_CrossentropySoftmaxArgmax1HotWithBias(utt.InferShapeTester):
admat = matrix()
advec = vector()
alvec = lvector()
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
admat_val = rng.rand(3, 5).astype(config.floatX)
advec_val = rng.rand(5).astype(config.floatX)
alvec_val = rng.randint(low=0, high=5, size=3)
......@@ -482,27 +482,27 @@ class T_prepend(utt.InferShapeTester):
x = tensor.matrix('x')
y = Prepend_scalar_constant_to_each_row(4.)(x)
f = theano.function([x], y)
m = numpy.random.rand(3, 5).astype(config.floatX)
m = np.random.rand(3, 5).astype(config.floatX)
my = f(m)
self.assertTrue(my.shape == (3, 6), my.shape)
self.assertTrue(numpy.all(my[:, 0] == 4.0))
self.assertTrue(np.all(my[:, 0] == 4.0))
def test1(self):
"basic functionality"
x = tensor.matrix('x')
y = Prepend_scalar_to_each_row()(5., x)
f = theano.function([x], y)
m = numpy.ones((3, 5), dtype="float32")
m = np.ones((3, 5), dtype="float32")
my = f(m)
self.assertTrue(my.shape == (3, 6))
self.assertTrue(numpy.all(my[:, 0] == 5.0))
self.assertTrue(np.all(my[:, 0] == 5.0))
def test_infer_shape(self):
admat = matrix()
adscal = scalar()
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
admat_val = rng.rand(3, 5).astype(config.floatX)
adscal_val = numpy.asarray(rng.rand(), dtype=config.floatX).item()
adscal_val = np.asarray(rng.rand(), dtype=config.floatX).item()
self._compile_and_check(
[admat],
[Prepend_scalar_constant_to_each_row(adscal_val)(admat)],
......@@ -522,7 +522,7 @@ class T_CrossentropyCategorical1HotGrad(utt.InferShapeTester):
advec = vector()
admat = matrix()
alvec = lvector()
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
advec_val = rng.rand(3).astype(config.floatX)
admat_val = rng.rand(3, 2).astype(config.floatX)
alvec_val = [0, 1, 0]
......@@ -541,21 +541,21 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
op = crossentropy_categorical_1hot
xe = op(x, one_of_n)
f = theano.function([x, one_of_n], xe)
x_val = numpy.asarray(
x_val = np.asarray(
[[.4, .6, .0], [.1, .8, .1]],
dtype=config.floatX)
xe_val = f(x_val, [0, 1])
assert numpy.allclose(xe_val, -numpy.log([.4, .8]))
assert np.allclose(xe_val, -np.log([.4, .8]))
def oplike(x):
return op(x, [0, 1])
tensor.verify_grad(oplike, [x_val], rng=numpy.random)
tensor.verify_grad(oplike, [x_val], rng=np.random)
def test_infer_shape(self):
admat = matrix()
alvec = lvector()
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
admat_val = rng.rand(3, 2).astype(config.floatX)
alvec_val = [0, 1, 0]
self._compile_and_check(
......@@ -775,10 +775,10 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
x_val = rng.randn(3, 5).astype(config.floatX)
b_val = rng.randn(5).astype(config.floatX)
y_val = numpy.asarray([2, 4, 1])
y_val = np.asarray([2, 4, 1])
x = T.matrix('x')
b = T.vector('b')
y = T.lvector('y')
......@@ -954,9 +954,9 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
x_val = rng.randn(3, 5).astype(config.floatX)
y_val = numpy.asarray([2, 4, 1], dtype='int64')
y_val = np.asarray([2, 4, 1], dtype='int64')
x = T.matrix('x')
y = T.lvector('y')
yi = T.cast(y, 'int32')
......@@ -1002,9 +1002,9 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
x_val = rng.randn(5).astype(config.floatX)
y_val = numpy.asarray([2])
y_val = np.asarray([2])
x = T.vector('x')
y = T.lvector('y')
......@@ -1047,10 +1047,10 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
x_val = rng.randn(5).astype(config.floatX)
b_val = rng.randn(5).astype(config.floatX)
y_val = numpy.asarray([2])
y_val = np.asarray([2])
x = T.vector('x')
b = T.vector('b')
......@@ -1107,10 +1107,10 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
x_val = rng.randn(5).astype(config.floatX)
b_val = rng.randn(5).astype(config.floatX)
y_val = numpy.asarray([2])
y_val = np.asarray([2])
x = T.vector('x')
b = T.vector('b')
......@@ -1169,10 +1169,10 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
x_val = rng.randn(5).astype(config.floatX)
b_val = rng.randn(5).astype(config.floatX)
y_val = numpy.asarray([2])
y_val = np.asarray([2])
x = T.vector('x')
b = T.vector('b')
......@@ -1228,9 +1228,9 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
x_val = rng.randn(3, 5).astype(config.floatX)
y_val = numpy.asarray([2, 4, 1])
y_val = np.asarray([2, 4, 1])
x = T.matrix('x')
y = T.lvector('y')
a = T.scalar('a')
......@@ -1442,21 +1442,21 @@ def test_asymptotic_32():
f = theano.function([x, y, x2], [c.sum(),
tensor.grad(c.sum(), x)], mode='FAST_RUN')
xval = numpy.zeros((5, 5), dtype=dtype).astype(dtype)
x2val = numpy.zeros(5, dtype=xval.dtype).astype(dtype)
xval = np.zeros((5, 5), dtype=dtype).astype(dtype)
x2val = np.zeros(5, dtype=xval.dtype).astype(dtype)
for i in xrange(100):
cval, gxval = f(xval, numpy.arange(5), x2val)
cval, gxval = f(xval, np.arange(5), x2val)
xval -= 100.3 * gxval
# print cval, gxval
assert cval == 0 # no problem going to zero error
# what about when x gets really big?
xval = numpy.zeros((5, 5), dtype=dtype)
x2val = numpy.zeros(5, dtype=xval.dtype)
xval = np.zeros((5, 5), dtype=dtype)
x2val = np.zeros(5, dtype=xval.dtype)
for i in xrange(100):
cval, gxval = f(xval, numpy.arange(5), x2val)
cval, gxval = f(xval, np.arange(5), x2val)
xval += 100000.3 * gxval
# print cval, gxval
......@@ -1477,7 +1477,7 @@ class Test_softmax_opt:
def setUp(self):
utt.seed_rng()
self.rng = numpy.random.RandomState(utt.fetch_seed())
self.rng = np.random.RandomState(utt.fetch_seed())
self.mode = theano.compile.mode.get_default_mode()
self.mode = self.mode.including('canonicalize')
......@@ -1581,7 +1581,7 @@ class Test_softmax_opt:
def test_softmax_graph():
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
x = theano.shared(rng.normal(size=(3, 4)))
def f(inputs):
......@@ -1592,7 +1592,7 @@ def test_softmax_graph():
def test_grad_softmax_grad():
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
x = theano.shared(rng.normal(size=(3, 4)))
def f(inputs):
......@@ -1618,39 +1618,39 @@ def test_stabilize_log_softmax():
# call the function so debug mode can verify the optimized
# version matches the unoptimized version
rng = numpy.random.RandomState([2012, 8, 22])
f(numpy.cast[config.floatX](rng.randn(2, 3)))
rng = np.random.RandomState([2012, 8, 22])
f(np.cast[config.floatX](rng.randn(2, 3)))
def test_relu():
x = matrix('x')
seed = theano.tests.unittest_tools.fetch_seed()
rng = numpy.random.RandomState(seed)
rng = np.random.RandomState(seed)
X = rng.randn(20, 30).astype(config.floatX)
# test the base case, without custom alpha value
y = relu(x).eval({x: X})
assert numpy.allclose(y, numpy.maximum(X, 0))
assert np.allclose(y, np.maximum(X, 0))
# test for different constant alpha values (also outside of [0, 1])
for alpha in 0, 0.3, 1, 2, -0.3, -1, -2:
y = relu(x, alpha).eval({x: X})
assert numpy.allclose(y, numpy.where(X > 0, X, alpha * X))
assert np.allclose(y, np.where(X > 0, X, alpha * X))
# test for variable alpha (scalar, vector and matrix)
for alpha in scalar(), vector(), matrix():
# create value for alpha (correct ndim and broadcastable against X)
A = numpy.array(rng.randn(*X.shape[::-1][:alpha.ndim][::-1]),
dtype=config.floatX)
A = np.array(rng.randn(*X.shape[::-1][:alpha.ndim][::-1]),
dtype=config.floatX)
y = relu(x, alpha).eval({x: X, alpha: A})
assert numpy.allclose(y, numpy.where(X > 0, X, A * X), rtol=3e-5)
assert np.allclose(y, np.where(X > 0, X, A * X), rtol=3e-5)
# test that for alpha of ndarray don't cause upcast.
x = matrix('x', dtype='float32')
rng = numpy.random.RandomState(seed)
rng = np.random.RandomState(seed)
X = rng.randn(20, 30).astype('float32')
alpha = numpy.asarray(.123, dtype='float32')
alpha = np.asarray(.123, dtype='float32')
y = relu(x, alpha).eval({x: X})
assert numpy.allclose(y, numpy.where(X > 0, X, alpha * X))
assert np.allclose(y, np.where(X > 0, X, alpha * X))
assert y.dtype == 'float32'
......@@ -1678,20 +1678,20 @@ def test_h_softmax():
shared = theano.shared
# First level of h_softmax
W1 = numpy.asarray(numpy.random.normal(
W1 = np.asarray(np.random.normal(
size=(input_size, h_softmax_level1_size)), dtype=floatX)
W1 = shared(W1)
b1 = shared(numpy.asarray(numpy.zeros((h_softmax_level1_size,)),
dtype=floatX))
b1 = shared(np.asarray(np.zeros((h_softmax_level1_size,)),
dtype=floatX))
# Second level of h_softmax
W2 = numpy.asarray(numpy.random.normal(
W2 = np.asarray(np.random.normal(
size=(h_softmax_level1_size, input_size, h_softmax_level2_size)),
dtype=floatX)
W2 = shared(W2)
b2 = shared(
numpy.asarray(numpy.zeros((h_softmax_level1_size,
h_softmax_level2_size)), dtype=floatX))
np.asarray(np.zeros((h_softmax_level1_size,
h_softmax_level2_size)), dtype=floatX))
#############
# Build graph
......@@ -1716,8 +1716,8 @@ def test_h_softmax():
#############
# Test
#############
x_mat = numpy.random.normal(size=(batch_size, input_size)).astype(floatX)
y_mat = numpy.random.randint(0, output_size, batch_size).astype('int32')
x_mat = np.random.normal(size=(batch_size, input_size)).astype(floatX)
y_mat = np.random.randint(0, output_size, batch_size).astype('int32')
tg_output = fun_output_tg(x_mat, y_mat)
all_outputs = fun_output(x_mat)
......@@ -1727,23 +1727,23 @@ def test_h_softmax():
# Verifies that the outputs computed by fun_output_tg are the same as those
# computed by fun_output.
utt.assert_allclose(
all_outputs[numpy.arange(0, batch_size), y_mat], tg_output)
all_outputs[np.arange(0, batch_size), y_mat], tg_output)
def test_elu():
x = matrix('x')
seed = theano.tests.unittest_tools.fetch_seed()
rng = numpy.random.RandomState(seed)
rng = np.random.RandomState(seed)
X = rng.randn(20, 30).astype(config.floatX)
# test the base case, without custom alpha value
y = elu(x).eval({x: X})
utt.assert_allclose(y, numpy.where(X > 0, X, numpy.exp(X) - 1))
utt.assert_allclose(y, np.where(X > 0, X, np.exp(X) - 1))
# test for different constant alpha values
for alpha in 1.5, 2, -1, -1.5, -2:
y = elu(x, alpha).eval({x: X})
utt.assert_allclose(y, numpy.where(X > 0, X, alpha * (numpy.exp(X) - 1)))
utt.assert_allclose(y, np.where(X > 0, X, alpha * (np.exp(X) - 1)))
def test_binary_crossentropy_reshape():
......@@ -1756,13 +1756,13 @@ def test_binary_crossentropy_reshape():
# This only works when "specialize" options are included
mode = theano.compile.get_default_mode().including('fast_run')
fga = theano.function([a], ga, mode=mode)
utt.assert_allclose(fga(numpy.array([[[[30.]]]], dtype=config.floatX)),
numpy.zeros((1, 1, 1, 1), dtype=config.floatX))
utt.assert_allclose(fga(np.array([[[[30.]]]], dtype=config.floatX)),
np.zeros((1, 1, 1, 1), dtype=config.floatX))
SoftsignTester = makeBroadcastTester(
op=softsign,
expected=upcast_int8_nfunc(lambda inputs: check_floatX(
inputs, inputs / (1.0 + numpy.fabs(inputs)))),
inputs, inputs / (1.0 + np.fabs(inputs)))),
good=_good_broadcast_unary_normal_float_no_complex,
name='SoftsignTester',
)
......@@ -1771,13 +1771,13 @@ SoftsignTester = makeBroadcastTester(
def test_confusion_matrix():
# Defining numpy implementation of confusion matrix
def numpy_conf_mat(actual, pred):
order = numpy.union1d(actual, pred)
colA = numpy.matrix(actual).T
colP = numpy.matrix(pred).T
order = np.union1d(actual, pred)
colA = np.matrix(actual).T
colP = np.matrix(pred).T
oneHotA = colA.__eq__(order).astype('int64')
oneHotP = colP.__eq__(order).astype('int64')
conf_mat = numpy.dot(oneHotA.T, oneHotP)
conf_mat = numpy.asarray(conf_mat)
conf_mat = np.dot(oneHotA.T, oneHotP)
conf_mat = np.asarray(conf_mat)
return [conf_mat, order]
x = tensor.vector()
......@@ -1787,8 +1787,8 @@ def test_confusion_matrix():
[[2, 0, 2, 2, 0, 1], [0, 0, 2, 2, 0, 2]]]
for case in list_inputs:
a = numpy.asarray(case[0])
b = numpy.asarray(case[1])
a = np.asarray(case[0])
b = np.asarray(case[1])
out_exp = numpy_conf_mat(a, b)
outs = f(case[0], case[1])
for exp, out in zip(out_exp, outs):
......
from __future__ import absolute_import, print_function, division
import unittest
import numpy
import numpy as np
from theano.compat import imap
import theano.tensor.inplace
......@@ -26,12 +26,12 @@ class T_sigmoid(unittest.TestCase):
utt.seed_rng()
def test_elemwise(self):
utt.verify_grad(sigmoid, [numpy.random.rand(3, 4)])
utt.verify_grad(sigmoid, [np.random.rand(3, 4)])
SigmoidTester = makeBroadcastTester(
op=sigmoid,
expected=upcast_int8_nfunc(lambda inputs: check_floatX(
inputs, 1 / (1 + numpy.exp(-inputs)))),
inputs, 1 / (1 + np.exp(-inputs)))),
good=copymod(_good_broadcast_unary_normal_no_complex,
without=['uint16']), # The reason that 'uint16' is excluted is that
# theano works well but numpy overflows resulting
......@@ -43,7 +43,7 @@ SigmoidTester = makeBroadcastTester(
UltraFastSigmoidTester = makeBroadcastTester(
op=ultra_fast_sigmoid,
expected=upcast_int8_nfunc(lambda inputs: check_floatX(
inputs, 1 / (1 + numpy.exp(-inputs)))),
inputs, 1 / (1 + np.exp(-inputs)))),
good=copymod(_good_broadcast_unary_normal_no_complex,
without=['uint16']), # numpy fucnting overflows with uint16.
# grad=_grad_broadcast_unary_normal,
......@@ -54,7 +54,7 @@ UltraFastSigmoidTester = makeBroadcastTester(
HardSigmoidTester = makeBroadcastTester(
op=hard_sigmoid,
expected=upcast_int8_nfunc(lambda inputs: check_floatX(
inputs, 1 / (1 + numpy.exp(-inputs)))),
inputs, 1 / (1 + np.exp(-inputs)))),
good=copymod(_good_broadcast_unary_normal_no_complex,
without=['uint16']), # numpy fucnting overflows with uint16.
# grad=_grad_broadcast_unary_normal,
......@@ -66,11 +66,11 @@ HardSigmoidTester = makeBroadcastTester(
SoftplusTester = makeBroadcastTester(
op=softplus,
expected=upcast_int8_nfunc(lambda inputs: check_floatX(
inputs, numpy.log1p(numpy.exp(inputs)))),
inputs, np.log1p(np.exp(inputs)))),
good=dict(copymod(_good_broadcast_unary_normal_no_complex,
without=['uint8', 'uint16']), # numpy fucnting overflows with uint16.
uint8=[numpy.arange(0, 89, dtype='uint8')], # the range is different in new added uint8.
int8=[numpy.arange(-127, 89, dtype='int8')]),
uint8=[np.arange(0, 89, dtype='uint8')], # the range is different in new added uint8.
int8=[np.arange(-127, 89, dtype='int8')]),
# grad=_grad_broadcast_unary_normal,
name='SoftplusTester',
)
......@@ -81,7 +81,7 @@ class T_softplus(unittest.TestCase):
utt.seed_rng()
def test_elemwise(self):
utt.verify_grad(softplus, [numpy.random.rand(3, 4)])
utt.verify_grad(softplus, [np.random.rand(3, 4)])
class T_sigmoid_opts(unittest.TestCase):
......@@ -112,7 +112,7 @@ class T_sigmoid_opts(unittest.TestCase):
m = self.get_mode(excluding=['local_elemwise_fusion'])
x = T.vector()
data = numpy.random.rand(54).astype(config.floatX)
data = np.random.rand(54).astype(config.floatX)
backup = config.warn.identify_1pexp_bug
config.warn.identify_1pexp_bug = False
......@@ -321,7 +321,7 @@ class T_sigmoid_opts(unittest.TestCase):
if not isinstance(mode, theano.compile.DebugMode):
f = theano.function([x, lr], ux, mode=mode)
ux_v = f([[50]], 0.1)
assert not numpy.isnan(ux_v)
assert not np.isnan(ux_v)
def test_local_ultra_fast_sigmoid(self):
x = tensor.matrix('x')
......@@ -391,7 +391,7 @@ class T_softplus_opts(unittest.TestCase):
assert isinstance(topo[1].op.scalar_op,
theano.tensor.nnet.sigm.ScalarSoftplus)
assert isinstance(topo[2].op.scalar_op, theano.scalar.Neg)
f(numpy.random.rand(54).astype(config.floatX))
f(np.random.rand(54).astype(config.floatX))
def test_log1msigm_to_softplus(self):
x = T.matrix()
......@@ -404,7 +404,7 @@ class T_softplus_opts(unittest.TestCase):
theano.tensor.nnet.sigm.ScalarSoftplus)
assert isinstance(topo[1].op.scalar_op, theano.scalar.Neg)
# assert check_stack_trace(f, ops_to_check='all')
f(numpy.random.rand(54, 11).astype(config.floatX))
f(np.random.rand(54, 11).astype(config.floatX))
# Same test with a flatten
out = T.log(1 - T.flatten(sigmoid(x)))
......@@ -417,7 +417,7 @@ class T_softplus_opts(unittest.TestCase):
assert isinstance(topo[1].op.scalar_op,
theano.tensor.nnet.sigm.ScalarSoftplus)
assert isinstance(topo[2].op.scalar_op, theano.scalar.Neg)
f(numpy.random.rand(54, 11).astype(config.floatX))
f(np.random.rand(54, 11).astype(config.floatX))
# Same test with a reshape
out = T.log(1 - sigmoid(x).reshape([x.size]))
......@@ -428,7 +428,7 @@ class T_softplus_opts(unittest.TestCase):
assert any(isinstance(getattr(node.op, 'scalar_op', None),
theano.tensor.nnet.sigm.ScalarSoftplus)
for node in topo)
f(numpy.random.rand(54, 11).astype(config.floatX))
f(np.random.rand(54, 11).astype(config.floatX))
def test_log1pexp_to_softplus(self):
m = theano.config.mode
......@@ -446,7 +446,7 @@ class T_softplus_opts(unittest.TestCase):
assert len(topo) == 1
assert isinstance(topo[0].op.scalar_op,
theano.tensor.nnet.sigm.ScalarSoftplus)
f(numpy.random.rand(54).astype(config.floatX))
f(np.random.rand(54).astype(config.floatX))
class T_sigmoid_utils(unittest.TestCase):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论