提交 bea31470 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #5782 from Amrithasuresh/master

Updated numpy as np #4218
......@@ -4,7 +4,7 @@ Provides Ops for FFT and DCT.
"""
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import numpy.fft
from six.moves import xrange
......@@ -126,13 +126,13 @@ def dct_matrix(rows, cols, unitary=True):
This algorithm is adapted from Dan Ellis' Rastmat spec2cep.m, lines 15-20.
"""
rval = numpy.zeros((rows, cols))
col_range = numpy.arange(cols)
scale = numpy.sqrt(2.0 / cols)
rval = np.zeros((rows, cols))
col_range = np.arange(cols)
scale = np.sqrt(2.0 / cols)
for i in xrange(rows):
rval[i] = numpy.cos(
i * (col_range * 2 + 1) / (2.0 * cols) * numpy.pi) * scale
rval[i] = np.cos(
i * (col_range * 2 + 1) / (2.0 * cols) * np.pi) * scale
if unitary:
rval[0] *= numpy.sqrt(0.5)
rval[0] *= np.sqrt(0.5)
return rval
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import numpy.linalg
import theano
......@@ -39,9 +39,9 @@ def test_rop_lop():
non_sequences=[y, mx, mv])
scan_f = function([mx, mv], sy)
rng = numpy.random.RandomState(utt.fetch_seed())
vx = numpy.asarray(rng.randn(4, 4), theano.config.floatX)
vv = numpy.asarray(rng.randn(4, 4), theano.config.floatX)
rng = np.random.RandomState(utt.fetch_seed())
vx = np.asarray(rng.randn(4, 4), theano.config.floatX)
vv = np.asarray(rng.randn(4, 4), theano.config.floatX)
v1 = rop_f(vx, vv)
v2 = scan_f(vx, vv)
......@@ -61,7 +61,7 @@ def test_rop_lop():
'Op did not raised an error even though the function'
' is not differentiable'))
vv = numpy.asarray(rng.uniform(size=(4,)), theano.config.floatX)
vv = np.asarray(rng.uniform(size=(4,)), theano.config.floatX)
yv = tensor.Lop(y, mx, v)
lop_f = function([mx, v], yv)
......@@ -75,21 +75,21 @@ def test_rop_lop():
def test_spectral_radius_bound():
tol = 10 ** (-6)
rng = numpy.random.RandomState(utt.fetch_seed())
rng = np.random.RandomState(utt.fetch_seed())
x = theano.tensor.matrix()
radius_bound = spectral_radius_bound(x, 5)
f = theano.function([x], radius_bound)
shp = (3, 4)
m = rng.rand(*shp)
m = numpy.cov(m).astype(config.floatX)
m = np.cov(m).astype(config.floatX)
radius_bound_theano = f(m)
# test the approximation
mm = m
for i in range(5):
mm = numpy.dot(mm, mm)
radius_bound_numpy = numpy.trace(mm) ** (2 ** (-5))
mm = np.dot(mm, mm)
radius_bound_numpy = np.trace(mm) ** (2 ** (-5))
assert abs(radius_bound_numpy - radius_bound_theano) < tol
# test the bound
......
......@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
import unittest
import numpy
import numpy as np
from theano import gof, tensor, function
from theano.tests import unittest_tools as utt
......@@ -39,11 +39,11 @@ class Minimal(gof.Op):
# but do not modify any of the arguments [inplace].
print("perform got %i arguments" % len(inputs))
print("Max of input[0] is ", numpy.max(inputs[0]))
print("Max of input[0] is ", np.max(inputs[0]))
# return some computed value.
# do not return something that is aliased to one of the inputs.
output[0] = numpy.asarray(0, dtype='int64')
output[0] = np.asarray(0, dtype='int64')
minimal = Minimal()
......@@ -55,7 +55,7 @@ minimal = Minimal()
class T_minimal(unittest.TestCase):
def setUp(self):
self.rng = numpy.random.RandomState(utt.fetch_seed(666))
self.rng = np.random.RandomState(utt.fetch_seed(666))
def test0(self):
A = tensor.matrix()
......@@ -66,6 +66,6 @@ class T_minimal(unittest.TestCase):
print('built')
Aval = self.rng.randn(5, 5)
bval = numpy.arange(5, dtype=float)
bval = np.arange(5, dtype=float)
f(Aval, bval)
print('done')
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import warnings
import theano
......@@ -172,7 +172,7 @@ class MultinomialFromUniform(Op):
raise ValueError("unis.shape[0] != pvals.shape[0] * n_samples",
unis.shape[0], pvals.shape[0], n_samples)
if z[0] is None or z[0].shape != pvals.shape:
z[0] = numpy.zeros(pvals.shape, dtype=node.outputs[0].dtype)
z[0] = np.zeros(pvals.shape, dtype=node.outputs[0].dtype)
else:
z[0].fill(0)
......@@ -209,7 +209,7 @@ class MultinomialFromUniform(Op):
# have the same answer as the c code as in the c code
# the cumul is in double precission.
cumsum = pvals[n].cumsum(dtype='float64')
z[0][n, numpy.searchsorted(cumsum, unis_n)] += 1
z[0][n, np.searchsorted(cumsum, unis_n)] += 1
class ChoiceFromUniform(MultinomialFromUniform):
......@@ -380,8 +380,8 @@ class ChoiceFromUniform(MultinomialFromUniform):
else:
odtype = self.odtype
if (z[0] is None or
not numpy.all(z[0].shape == [pvals.shape[0], n_samples])):
z[0] = -1 * numpy.ones((pvals.shape[0], n_samples), dtype=odtype)
not np.all(z[0].shape == [pvals.shape[0], n_samples])):
z[0] = -1 * np.ones((pvals.shape[0], n_samples), dtype=odtype)
nb_multi = pvals.shape[0]
nb_outcomes = pvals.shape[1]
......
差异被折叠。
......@@ -4,7 +4,7 @@ import sys
from six import reraise
from nose.plugins.skip import SkipTest
import numpy
import numpy as np
import theano
from theano import config, function, tensor
......@@ -40,9 +40,9 @@ def test_n_samples_1():
f = function([p, u, n], m, allow_input_downcast=True)
numpy.random.seed(12345)
np.random.seed(12345)
for i in [1, 5, 10, 100, 1000, 10000]:
uni = numpy.random.rand(2 * i).astype(config.floatX)
uni = np.random.rand(2 * i).astype(config.floatX)
res = f([[1.0, 0.0], [0.0, 1.0]], uni, i)
utt.assert_allclose(res, [[i * 1.0, 0.0], [0.0, i * 1.0]])
......@@ -55,17 +55,17 @@ def test_n_samples_2():
f = function([p, u, n], m, allow_input_downcast=True)
numpy.random.seed(12345)
np.random.seed(12345)
for i in [1, 5, 10, 100, 1000]:
uni = numpy.random.rand(i).astype(config.floatX)
pvals = numpy.random.randint(1, 1000, (1, 1000)).astype(config.floatX)
uni = np.random.rand(i).astype(config.floatX)
pvals = np.random.randint(1, 1000, (1, 1000)).astype(config.floatX)
pvals /= pvals.sum(1)
res = f(pvals, uni, i)
assert res.sum() == i
for i in [1, 5, 10, 100, 1000]:
uni = numpy.random.rand(i).astype(config.floatX)
pvals = numpy.random.randint(
uni = np.random.rand(i).astype(config.floatX)
pvals = np.random.randint(
1, 1000000, (1, 1000000)).astype(config.floatX)
pvals /= pvals.sum(1)
res = f(pvals, uni, i)
......@@ -104,8 +104,8 @@ def test_n_samples_compatibility():
raise
f = theano.function([X], samples)
res = f(numpy.random.randn(20, 10))
assert numpy.all(res.sum(axis=1) == 1)
res = f(np.random.randn(20, 10))
assert np.all(res.sum(axis=1) == 1)
def test_multinomial_0():
......@@ -160,9 +160,9 @@ def test_multinomial_large():
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1
pval = np.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = numpy.ones_like(pval[:, 0]) * 0.5
uval = np.ones_like(pval[:, 0]) * 0.5
mval = f(pval, uval)
assert mval.shape == pval.shape
......@@ -175,7 +175,7 @@ def test_multinomial_large():
else:
raise NotImplementedError(config.cast_policy)
utt.assert_allclose(mval.sum(axis=1), 2)
asdf = numpy.asarray([0, 0, 2, 0]) + 0 * pval
asdf = np.asarray([0, 0, 2, 0]) + 0 * pval
utt.assert_allclose(mval, asdf) # broadcast over all rows
run_with_c(body)
if cuda.cuda_available:
......@@ -216,9 +216,9 @@ def test_gpu_opt():
f = function([p, u], m_gpu, allow_input_downcast=True, mode=get_mode(True))
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1
pval = np.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = numpy.ones_like(pval[:, 0]) * 0.5
uval = np.ones_like(pval[:, 0]) * 0.5
f(pval, uval)
# Test with a row, it was failing in the past.
......@@ -230,7 +230,7 @@ def test_gpu_opt():
f = function([r, u], m_gpu, allow_input_downcast=True, mode=get_mode(True))
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = numpy.arange(1 * 4, dtype='float32').reshape((1, 4)) + 0.1
pval = np.arange(1 * 4, dtype='float32').reshape((1, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = numpy.ones_like(pval[:, 0]) * 0.5
uval = np.ones_like(pval[:, 0]) * 0.5
f(pval, uval)
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import os
from theano import config, function, tensor
from theano.compat import PY3
......@@ -24,25 +24,25 @@ class test_OP(unittest.TestCase):
n_elements = 1000
all_indices = range(n_elements)
numpy.random.seed(12345)
np.random.seed(12345)
expected = [
numpy.asarray([[931, 318, 185, 209, 559]]),
numpy.asarray([[477, 887, 2, 717, 333, 665, 159, 559, 348, 136]]),
numpy.asarray([[546, 28, 79, 665, 295, 779, 433, 531, 411, 716, 244, 234, 70, 88, 612, 639, 383, 335,
451, 100, 175, 492, 848, 771, 559, 214, 568, 596, 370, 486, 855, 925, 138, 300, 528, 507,
730, 199, 882, 357, 58, 195, 705, 900, 66, 468, 513, 410, 816, 672]])]
np.asarray([[931, 318, 185, 209, 559]]),
np.asarray([[477, 887, 2, 717, 333, 665, 159, 559, 348, 136]]),
np.asarray([[546, 28, 79, 665, 295, 779, 433, 531, 411, 716, 244, 234, 70, 88, 612, 639, 383, 335,
451, 100, 175, 492, 848, 771, 559, 214, 568, 596, 370, 486, 855, 925, 138, 300, 528, 507,
730, 199, 882, 357, 58, 195, 705, 900, 66, 468, 513, 410, 816, 672]])]
for i in [5, 10, 50, 100, 500, n_elements]:
uni = numpy.random.rand(i).astype(config.floatX)
pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
uni = np.random.rand(i).astype(config.floatX)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
res = f(pvals, uni, i)
for ii in range(len(expected)):
if expected[ii].shape == res.shape:
assert (expected[ii] == res).all()
res = numpy.squeeze(res)
res = np.squeeze(res)
assert len(res) == i
assert numpy.all(numpy.in1d(numpy.unique(res), all_indices)), res
assert np.all(np.in1d(np.unique(res), all_indices)), res
def test_fail_select_alot(self):
"""
......@@ -58,9 +58,9 @@ class test_OP(unittest.TestCase):
n_elements = 100
n_selected = 200
numpy.random.seed(12345)
uni = numpy.random.rand(n_selected).astype(config.floatX)
pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
np.random.seed(12345)
uni = np.random.rand(n_selected).astype(config.floatX)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
self.assertRaises(ValueError, f, pvals, uni, n_selected)
......@@ -79,18 +79,18 @@ class test_OP(unittest.TestCase):
n_elements = 100
n_selected = 10
mean_rtol = 0.0005
numpy.random.seed(12345)
pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
avg_pvals = numpy.zeros((n_elements,), dtype=config.floatX)
avg_pvals = np.zeros((n_elements,), dtype=config.floatX)
for rep in range(10000):
uni = numpy.random.rand(n_selected).astype(config.floatX)
uni = np.random.rand(n_selected).astype(config.floatX)
res = f(pvals, uni, n_selected)
res = numpy.squeeze(res)
res = np.squeeze(res)
avg_pvals[res] += 1
avg_pvals /= avg_pvals.sum()
avg_diff = numpy.mean(abs(avg_pvals - pvals))
avg_diff = np.mean(abs(avg_pvals - pvals))
assert avg_diff < mean_rtol, avg_diff
......@@ -110,14 +110,14 @@ class test_function(unittest.TestCase):
n_elements = 1000
all_indices = range(n_elements)
numpy.random.seed(12345)
np.random.seed(12345)
for i in [5, 10, 50, 100, 500, n_elements]:
pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
res = f(pvals, i)
res = numpy.squeeze(res)
res = np.squeeze(res)
assert len(res) == i
assert numpy.all(numpy.in1d(numpy.unique(res), all_indices)), res
assert np.all(np.in1d(np.unique(res), all_indices)), res
def test_fail_select_alot(self):
"""
......@@ -134,8 +134,8 @@ class test_function(unittest.TestCase):
n_elements = 100
n_selected = 200
numpy.random.seed(12345)
pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
self.assertRaises(ValueError, f, pvals, n_selected)
......@@ -155,17 +155,17 @@ class test_function(unittest.TestCase):
n_elements = 100
n_selected = 10
mean_rtol = 0.0005
numpy.random.seed(12345)
pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
avg_pvals = numpy.zeros((n_elements,), dtype=config.floatX)
avg_pvals = np.zeros((n_elements,), dtype=config.floatX)
for rep in range(10000):
res = f(pvals, n_selected)
res = numpy.squeeze(res)
res = np.squeeze(res)
avg_pvals[res] += 1
avg_pvals /= avg_pvals.sum()
avg_diff = numpy.mean(abs(avg_pvals - pvals))
avg_diff = np.mean(abs(avg_pvals - pvals))
assert avg_diff < mean_rtol
def test_unpickle_legacy_op(self):
......
......@@ -45,7 +45,7 @@ __contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import logging
import numpy
import numpy as np
import warnings
from collections import OrderedDict
......@@ -488,8 +488,8 @@ def scan(fn,
# a sequence, though is highly unlikely in practice
if 'taps' in seq:
# go through the indicated slice
mintap = numpy.min(seq['taps'])
maxtap = numpy.max(seq['taps'])
mintap = np.min(seq['taps'])
maxtap = np.max(seq['taps'])
for k in seq['taps']:
# create one slice of the input
# Later on, if we decide not to use scan because we are
......@@ -670,15 +670,15 @@ def scan(fn,
elif init_out.get('taps', None):
if numpy.any(numpy.array(init_out.get('taps', [])) > 0):
if np.any(np.array(init_out.get('taps', [])) > 0):
# Make sure we do not have requests for future values of a
# sequence we can not provide such values
raise ValueError('Can not use future taps of outputs',
init_out)
# go through the taps
mintap = abs(numpy.min(init_out['taps']))
mintap = abs(np.min(init_out['taps']))
mit_sot_tap_array.append(init_out['taps'])
idx_offset = abs(numpy.min(init_out['taps']))
idx_offset = abs(np.min(init_out['taps']))
# Sequence
mit_sot_scan_inputs.append(
scan_utils.expand_empty(init_out['initial'][:mintap],
......@@ -725,9 +725,9 @@ def scan(fn,
# a map); in that case we do not have to do anything ..
# Re-order args
max_mit_sot = numpy.max([-1] + mit_sot_rightOrder) + 1
max_sit_sot = numpy.max([-1] + sit_sot_rightOrder) + 1
n_elems = numpy.max([max_mit_sot, max_sit_sot])
max_mit_sot = np.max([-1] + mit_sot_rightOrder) + 1
max_sit_sot = np.max([-1] + sit_sot_rightOrder) + 1
n_elems = np.max([max_mit_sot, max_sit_sot])
_ordered_args = [[] for x in xrange(n_elems)]
offset = 0
for idx in xrange(n_mit_sot):
......@@ -1101,7 +1101,7 @@ def scan(fn,
return out_ls
offset = n_mit_mot
offsets = [abs(numpy.min(x)) for x in mit_sot_tap_array]
offsets = [abs(np.min(x)) for x in mit_sot_tap_array]
mit_sot_outs = remove_dimensions(
scan_outs[offset:offset + n_mit_sot],
mit_sot_return_steps,
......
......@@ -54,7 +54,7 @@ import logging
import time
from collections import OrderedDict
import numpy
import numpy as np
from six import iteritems, integer_types, raise_from
from six.moves import xrange
......@@ -193,7 +193,7 @@ class Scan(PureOp):
self.info['name'] = self.name
# Pre-computing some values to speed up perform
self.mintaps = [numpy.min(x) for x in self.tap_array]
self.mintaps = [np.min(x) for x in self.tap_array]
self.mintaps += [0 for x in xrange(self.n_nit_sot)]
self.seqs_arg_offset = 1 + self.n_seqs
self.shared_arg_offset = (self.seqs_arg_offset +
......@@ -336,7 +336,7 @@ class Scan(PureOp):
the inner function)
"""
assert numpy.all(isinstance(i, gof.Variable) for i in inputs)
assert np.all(isinstance(i, gof.Variable) for i in inputs)
# Check that the number of inputs to the Scan node corresponds to
# the number of inputs of the inner function of scan
n_outer_ins = len(inputs) - len(self.outer_nitsot(inputs)) - 1
......@@ -901,53 +901,53 @@ class Scan(PureOp):
try:
if impl == 'py':
raise theano.gof.cmodule.MissingGXX
cython_mintaps = numpy.asarray(self.mintaps, dtype='int32')
cython_mintaps = np.asarray(self.mintaps, dtype='int32')
cython_tap_array_len = \
numpy.asarray([len(x) for x in self.tap_array],
dtype='int32')
np.asarray([len(x) for x in self.tap_array],
dtype='int32')
if len(self.tap_array) == 0:
d1 = 0
else:
d1 = numpy.max(cython_tap_array_len)
d1 = np.max(cython_tap_array_len)
d0 = len(self.tap_array)
cython_tap_array = numpy.zeros((d0, d1), dtype='int32')
cython_tap_array = np.zeros((d0, d1), dtype='int32')
for _d0 in xrange(d0):
for _d1 in xrange(cython_tap_array_len[_d0]):
cython_tap_array[_d0, _d1] = self.tap_array[_d0][_d1]
cython_mit_mot_out_nslices = \
numpy.asarray([len(x) for x in self.mit_mot_out_slices],
dtype='int32')
np.asarray([len(x) for x in self.mit_mot_out_slices],
dtype='int32')
if len(self.mit_mot_out_slices) == 0:
d1 = 0
else:
d1 = numpy.max(cython_mit_mot_out_nslices)
d1 = np.max(cython_mit_mot_out_nslices)
d0 = len(self.mit_mot_out_slices)
cython_mit_mot_out_slices = numpy.zeros((d0, d1),
dtype='int32')
cython_mit_mot_out_slices = np.zeros((d0, d1),
dtype='int32')
for _d0 in xrange(d0):
for _d1 in xrange(cython_mit_mot_out_nslices[_d0]):
cython_mit_mot_out_slices[_d0, _d1] = \
self.mit_mot_out_slices[_d0][_d1]
cython_vector_seqs = numpy.asarray(self.vector_seqs,
cython_vector_seqs = np.asarray(self.vector_seqs,
dtype='int32')
cython_vector_outs = np.asarray(self.vector_outs,
dtype='int32')
cython_mitmots_preallocated = np.asarray(self.mitmots_preallocated,
dtype='int32')
cython_inps_is_tensor = np.asarray(self.inps_is_tensor,
dtype='int32')
cython_vector_outs = numpy.asarray(self.vector_outs,
cython_outs_is_tensor = np.asarray(self.outs_is_tensor,
dtype='int32')
cython_mitmots_preallocated = numpy.asarray(self.mitmots_preallocated,
dtype='int32')
cython_inps_is_tensor = numpy.asarray(self.inps_is_tensor,
dtype='int32')
cython_outs_is_tensor = numpy.asarray(self.outs_is_tensor,
dtype='int32')
if hasattr(self, 'destroy_map'):
cython_destroy_map = [x in self.destroy_map
for x in xrange(len(node.outputs))]
else:
cython_destroy_map = [0 for x in xrange(len(node.outputs))]
cython_destroy_map = numpy.asarray(cython_destroy_map,
dtype='int32')
cython_destroy_map = np.asarray(cython_destroy_map,
dtype='int32')
from . import scan_perform_ext
def p(node, args, outs):
......@@ -2200,9 +2200,9 @@ class Scan(PureOp):
# Seqs
outer_inp_seqs = [x[::-1] for x in inputs[1:1 + self.n_seqs]]
for idx in xrange(self.n_mit_mot + self.n_mit_sot):
mintap = numpy.min(self.tap_array[idx])
mintap = np.min(self.tap_array[idx])
if idx < self.n_mit_mot:
outmaxtap = numpy.max(self.mitmot_out_taps()[idx])
outmaxtap = np.max(self.mitmot_out_taps()[idx])
else:
outmaxtap = 0
seq = outs[idx]
......@@ -2226,7 +2226,7 @@ class Scan(PureOp):
# that.
for taps, x in zip(self.mitsot_taps(),
self.outer_mitsot_outs(outs)):
mintap = numpy.min(taps)
mintap = np.min(taps)
if hasattr(x[::-1][:mintap], 'test_value'):
assert (x[::-1][:mintap].tag.test_value.shape[0] ==
inputs[0].tag.test_value)
......@@ -2238,7 +2238,7 @@ class Scan(PureOp):
if hasattr(x[::-1].tag, 'test_value'):
assert (x[::-1].tag.test_value.shape[0] ==
inputs[0].tag.test_value)
outer_inp_seqs += [x[::-1][:numpy.min(taps)]
outer_inp_seqs += [x[::-1][:np.min(taps)]
for taps, x in zip(self.mitsot_taps(),
self.outer_mitsot_outs(outs))]
outer_inp_seqs += [x[::-1][:-1] for x in self.outer_sitsot_outs(outs)]
......@@ -2726,8 +2726,8 @@ class Scan(PureOp):
b = e
e = e + self.n_mit_mot
ib = ie
ie = ie + int(numpy.sum([len(x) for x in
self.tap_array[:self.n_mit_mot]]))
ie = ie + int(np.sum([len(x) for x in
self.tap_array[:self.n_mit_mot]]))
clean_eval_points = []
for inp, evp in zip(inputs[b:e], eval_points[b:e]):
if evp is not None:
......@@ -2742,9 +2742,9 @@ class Scan(PureOp):
b = e
e = e + self.n_mit_sot
ib = ie
ie = ie + int(numpy.sum([len(x) for x in
self.tap_array[self.n_mit_mot:
self.n_mit_mot + self.n_mit_sot]]))
ie = ie + int(np.sum([len(x) for x in
self.tap_array[self.n_mit_mot:
self.n_mit_mot + self.n_mit_sot]]))
clean_eval_points = []
for inp, evp in zip(inputs[b:e], eval_points[b:e]):
if evp is not None:
......@@ -2795,8 +2795,8 @@ class Scan(PureOp):
inner_other = self_inputs[ie:] + inner_eval_points[ib:]
# Outputs
n_mit_mot_outs = int(numpy.sum([len(x) for x in
self.mit_mot_out_slices]))
n_mit_mot_outs = int(np.sum([len(x) for x in
self.mit_mot_out_slices]))
info['n_mit_mot_outs'] = n_mit_mot_outs * 2
b = 0
e = n_mit_mot_outs
......
......@@ -54,7 +54,7 @@ import logging
import copy
from sys import maxsize
from collections import OrderedDict
import numpy
import numpy as np
import theano
from theano import tensor, scalar
......@@ -636,7 +636,7 @@ class PushOutSeqScan(gof.Optimizer):
if out in op.inner_mitsot_outs(ls):
odx = op.inner_mitsot_outs(ls).index(out)
inp = op.outer_mitsot(node)[odx]
st = abs(numpy.min(op.mitsot_taps()))
st = abs(np.min(op.mitsot_taps()))
y = tensor.set_subtensor(inp[st:], _y)
elif out in op.inner_sitsot_outs(ls):
odx = op.inner_sitsot_outs(ls).index(out)
......@@ -1373,7 +1373,7 @@ class ScanSaveMem(gof.Optimizer):
# TODO: Simplify the number of steps needed.
# FB: This need good testing, left to later.
# call get_scalar_constant_value()? it can
# return python/numpy scalar or numpy.ndarray
# return python/numpy scalar or np.ndarray
# currently.
# pval = pre_greedy_local_optimizer(list_opt_slice,
# pval)
......
......@@ -12,7 +12,7 @@ import os
import sys
import warnings
import numpy
import numpy as np
import theano
from theano import config
......@@ -103,7 +103,7 @@ except ImportError:
# During scan cython development, it is helpful to keep the old interface, to don't manually edit the c file each time.
preargs.remove('-DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION')
else:
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
numpy_ver = [int(n) for n in np.__version__.split('.')[:2]]
# Add add some macro to lower the number of edit
# needed to the c file.
if bool(numpy_ver >= [1, 7]):
......
......@@ -20,7 +20,7 @@ import logging
import warnings
from collections import OrderedDict
import numpy
import numpy as np
import theano
from theano.compat import izip
......@@ -589,8 +589,8 @@ def get_updates_and_outputs(ls):
def isNaN_or_Inf_or_None(x):
isNone = x is None
try:
isNaN = numpy.isnan(x)
isInf = numpy.isinf(x)
isNaN = np.isnan(x)
isInf = np.isinf(x)
isStr = isinstance(x, string_types)
except Exception:
isNaN = False
......@@ -599,8 +599,8 @@ def isNaN_or_Inf_or_None(x):
if not isNaN and not isInf:
try:
val = get_scalar_constant_value(x)
isInf = numpy.isinf(val)
isNaN = numpy.isnan(val)
isInf = np.isinf(val)
isNaN = np.isnan(val)
except Exception:
isNaN = False
isInf = False
......@@ -959,7 +959,7 @@ def scan_can_remove_outs(op, out_idxs):
added = False
for pos, idx in enumerate(out_idxs):
if (out_idxs_mask[pos] and
numpy.any([x in required_inputs for x in out_ins[idx]])):
np.any([x in required_inputs for x in out_ins[idx]])):
# This output is required ..
out_idxs_mask[pos] = 0
required_inputs += gof.graph.inputs([op.outputs[idx]])
......
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import unittest
import theano
......@@ -39,14 +39,14 @@ class TestScanCheckpoint(unittest.TestCase):
f = theano.function(inputs=[self.A, self.k],
outputs=[self.result, self.result_check])
out, out_check = f(range(10), 101)
assert numpy.allclose(out, out_check)
assert np.allclose(out, out_check)
def test_backward_pass(self):
"""Test gradient computation of A**k."""
f = theano.function(inputs=[self.A, self.k],
outputs=[self.grad_A, self.grad_A_check])
out, out_check = f(range(10), 101)
assert numpy.allclose(out, out_check)
assert np.allclose(out, out_check)
@unittest.skipUnless(PYGPU_AVAILABLE, 'Requires pygpu.')
def test_memory(self):
......@@ -59,7 +59,7 @@ class TestScanCheckpoint(unittest.TestCase):
f_check = theano.function(inputs=[self.A, self.k],
outputs=self.grad_A_check, mode=mode_with_gpu)
free_gmem = theano.gpuarray.type._context_reg[None].free_gmem
data = numpy.ones(free_gmem // 3000, dtype=numpy.float32)
data = np.ones(free_gmem // 3000, dtype=np.float32)
# Check that it works with the checkpoints
f_check(data, 1000)
# Check that the basic scan fails in that case
......
from __future__ import absolute_import, print_function, division
import numpy
import numpy as np
import unittest
import theano
......@@ -18,7 +18,7 @@ class TestGaussNewton(unittest.TestCase):
This test case is based on code by Sigurd Spieckermann.
"""
def setUp(self):
self.rng = numpy.random.RandomState(utt.fetch_seed())
self.rng = np.random.RandomState(utt.fetch_seed())
def _run(self, num_features, num_timesteps, batch_size, mode):
# determine shapes of inputs and targets depending on the batch size
......@@ -58,8 +58,8 @@ class TestGaussNewton(unittest.TestCase):
W_hy = theano.shared(
(0.01 * self.rng.uniform(size=(10, 1))).astype(config.floatX),
borrow=True)
b_h = theano.shared(numpy.zeros(10).astype(config.floatX), borrow=True)
b_y = theano.shared(numpy.zeros(1).astype(config.floatX), borrow=True)
b_h = theano.shared(np.zeros(10).astype(config.floatX), borrow=True)
b_y = theano.shared(np.zeros(1).astype(config.floatX), borrow=True)
params = [W_xh, W_hh, W_hy, b_h, b_y]
......@@ -171,8 +171,8 @@ class TestPushOutScanOutputDot(object):
# Ensure that the function compiled with the optimization produces
# the same results as the function compiled without
v_value = numpy.random.random((4)).astype(config.floatX)
m_value = numpy.random.random((4, 5)).astype(config.floatX)
v_value = np.random.random((4)).astype(config.floatX)
m_value = np.random.random((4, 5)).astype(config.floatX)
output_opt = f_opt(v_value, m_value)
output_no_opt = f_no_opt(v_value, m_value)
......@@ -217,8 +217,8 @@ class TestPushOutScanOutputDot(object):
# Ensure that the function compiled with the optimization produces
# the same results as the function compiled without
a_value = numpy.random.random((3, 4)).astype(config.floatX)
b_value = numpy.random.random((4, 5)).astype(config.floatX)
a_value = np.random.random((3, 4)).astype(config.floatX)
b_value = np.random.random((4, 5)).astype(config.floatX)
output_opt = f_opt(a_value, b_value)
output_no_opt = f_no_opt(a_value, b_value)
......@@ -263,8 +263,8 @@ class TestPushOutScanOutputDot(object):
# Ensure that the function compiled with the optimization produces
# the same results as the function compiled without
a_value = numpy.random.random((3, 4)).astype(config.floatX)
b_value = numpy.random.random((4, 5)).astype(config.floatX)
a_value = np.random.random((3, 4)).astype(config.floatX)
b_value = np.random.random((4, 5)).astype(config.floatX)
output_opt = f_opt(a_value, b_value)
output_no_opt = f_no_opt(a_value, b_value)
......@@ -296,7 +296,7 @@ class TestPushOutSumOfDot():
dim = 5
# Weight matrices
U = theano.shared(numpy.random.normal(size=(dim, dim),
U = theano.shared(np.random.normal(size=(dim, dim),
scale=0.0001).astype(config.floatX))
U.name = 'U'
V = theano.shared(U.get_value())
......@@ -306,7 +306,7 @@ class TestPushOutSumOfDot():
# Variables and their values
x = T.tensor3('x')
x_value = numpy.random.normal(size=(seq_len, batch_size, dim),
x_value = np.random.normal(size=(seq_len, batch_size, dim),
scale=0.0001).astype(config.floatX)
ri = T.tensor3('ri')
......@@ -315,7 +315,7 @@ class TestPushOutSumOfDot():
zi = T.tensor3('zi')
zi_value = x_value
init = T.alloc(numpy.cast[config.floatX](0), batch_size, dim)
init = T.alloc(np.cast[config.floatX](0), batch_size, dim)
def rnn_step1(
# sequences
x, ri, zi,
......@@ -375,8 +375,8 @@ class TestPushOutSumOfDot():
input2 = T.tensor3()
input3 = T.tensor3()
W = theano.shared(numpy.random.normal(size=(4, 5))).astype(config.floatX)
U = theano.shared(numpy.random.normal(size=(6, 7))).astype(config.floatX)
W = theano.shared(np.random.normal(size=(4, 5))).astype(config.floatX)
U = theano.shared(np.random.normal(size=(6, 7))).astype(config.floatX)
def inner_fct(seq1, seq2, seq3, previous_output):
temp1 = T.dot(seq1, W) + seq3
......@@ -384,7 +384,7 @@ class TestPushOutSumOfDot():
dot_output = T.dot(temp1, temp2)
return previous_output + dot_output
init = T.as_tensor_variable(numpy.random.normal(size=(3, 7)))
init = T.as_tensor_variable(np.random.normal(size=(3, 7)))
# Compile the function twice, once with the optimization and once
# without
......@@ -410,9 +410,9 @@ class TestPushOutSumOfDot():
# TODO
# Compare the outputs of the 2 functions
input1_value = numpy.random.random((2, 3, 4)).astype(config.floatX)
input2_value = numpy.random.random((2, 5, 6)).astype(config.floatX)
input3_value = numpy.random.random((2, 3, 5)).astype(config.floatX)
input1_value = np.random.random((2, 3, 4)).astype(config.floatX)
input2_value = np.random.random((2, 5, 6)).astype(config.floatX)
input3_value = np.random.random((2, 3, 5)).astype(config.floatX)
output_opt = f_opt(input1_value, input2_value, input3_value)
output_no_opt = f_no_opt(input1_value, input2_value, input3_value)
......
from __future__ import absolute_import, print_function, division
import itertools
import unittest
import numpy
import numpy as np
import theano
from theano import tensor
from theano.scan_module.scan_utils import equal_computations, map_variables
......@@ -51,8 +51,8 @@ class TestMapVariables(unittest.TestCase):
s2, = map_variables(self.replacer, [s])
f = theano.function([x, y, z], [s, s2])
rval = f(x=numpy.array([1, 2, 3], dtype=numpy.float32), y=1, z=2)
assert numpy.array_equal(rval, [[1, 2, 3], [2, 4, 6]])
rval = f(x=np.array([1, 2, 3], dtype=np.float32), y=1, z=2)
assert np.array_equal(rval, [[1, 2, 3], [2, 4, 6]])
def test_scan(self):
x = tensor.vector('x')
......@@ -64,7 +64,7 @@ class TestMapVariables(unittest.TestCase):
# should do this as well.
outer = tensor.scalar("outer")
shared = theano.shared(
numpy.array(1., dtype=theano.config.floatX),
np.array(1., dtype=theano.config.floatX),
name="shared")
constant = tensor.constant(1, name="constant")
......@@ -77,7 +77,7 @@ class TestMapVariables(unittest.TestCase):
return r
s, _ = theano.scan(step, sequences=x,
outputs_info=[numpy.array(0.)])
outputs_info=[np.array(0.)])
# ensure z is owned by the outer graph so map_variables() will need to
# jump through additional hoops to placate FunctionGraph.
t = z * s
......@@ -85,8 +85,8 @@ class TestMapVariables(unittest.TestCase):
t2 = z * s2
f = theano.function([x, outer], [t, t2])
rval = f(x=numpy.array([1, 2, 3], dtype=numpy.float32), outer=0.5)
assert numpy.array_equal(rval, [[1, 3, 6], [-1, -3, -6]])
rval = f(x=np.array([1, 2, 3], dtype=np.float32), outer=0.5)
assert np.array_equal(rval, [[1, 3, 6], [-1, -3, -6]])
def test_scan_with_shared_update(self):
x = tensor.vector('x')
......@@ -104,7 +104,7 @@ class TestMapVariables(unittest.TestCase):
return r
s, _ = theano.scan(step, sequences=x,
outputs_info=[numpy.array(0.)])
outputs_info=[np.array(0.)])
self.assertRaises(NotImplementedError,
map_variables, self.replacer, [s])
......@@ -128,7 +128,7 @@ class TestMapVariables(unittest.TestCase):
return r + counter
s, _ = theano.scan(step, sequences=x,
outputs_info=[numpy.array(0.)])
outputs_info=[np.array(0.)])
self.assertRaises(NotImplementedError,
map_variables, self.replacer, [s])
......@@ -137,7 +137,7 @@ class TestMapVariables(unittest.TestCase):
# inner graph.
outer = tensor.scalar("outer")
shared = theano.shared(
numpy.array(1., dtype=theano.config.floatX),
np.array(1., dtype=theano.config.floatX),
name="shared")
constant = tensor.constant(1., name="constant")
z = outer * (shared + constant)
......
......@@ -130,10 +130,10 @@ import logging
import os
import time
import numpy
import numpy as np
import numpy.distutils
try:
import numpy.distutils.__config__
import numpy.distutils.__config__ # noqa
except ImportError:
pass
......@@ -166,10 +166,10 @@ try:
# `scipy.linalg.blas.fblas` with `scipy.linalg.blas`.
# See http://github.com/scipy/scipy/pull/358
fblas = scipy.linalg.blas
_blas_gemv_fns = {numpy.dtype('float32'): fblas.sgemv,
numpy.dtype('float64'): fblas.dgemv,
numpy.dtype('complex64'): fblas.cgemv,
numpy.dtype('complex128'): fblas.zgemv}
_blas_gemv_fns = {np.dtype('float32'): fblas.sgemv,
np.dtype('float64'): fblas.dgemv,
np.dtype('complex64'): fblas.cgemv,
np.dtype('complex128'): fblas.zgemv}
except ImportError as e:
have_fblas = False
# This is used in Gemv and ScipyGer. We use CGemv and CGer
......@@ -190,12 +190,12 @@ def check_init_y():
if not have_fblas:
check_init_y._result = False
y = float('NaN') * numpy.ones((2,))
x = numpy.ones((2,))
A = numpy.ones((2, 2))
y = float('NaN') * np.ones((2,))
x = np.ones((2,))
A = np.ones((2, 2))
gemv = _blas_gemv_fns[y.dtype]
gemv(1.0, A.T, x, 0.0, y, overwrite_y=True, trans=True)
check_init_y._result = numpy.isnan(y).any()
check_init_y._result = np.isnan(y).any()
return check_init_y._result
......@@ -269,7 +269,7 @@ class Gemv(Op):
out_storage[0][0] = gemv(alpha, A.T, x, beta, y,
overwrite_y=self.inplace, trans=True)
else:
out = numpy.dot(A, x)
out = np.dot(A, x)
if alpha != 1:
out *= alpha
if beta != 0:
......@@ -277,7 +277,7 @@ class Gemv(Op):
out += beta * y
else:
out += y
out_storage[0][0] = numpy.asarray(out, dtype=y.dtype)
out_storage[0][0] = np.asarray(out, dtype=y.dtype)
def infer_shape(self, node, input_shapes):
return [input_shapes[0]]
......@@ -341,9 +341,9 @@ class Ger(Op):
else:
A = cA.copy()
if calpha != 1:
A += calpha * numpy.outer(cx, cy)
A += calpha * np.outer(cx, cy)
else:
A += numpy.outer(cx, cy)
A += np.outer(cx, cy)
cZ[0] = A
def infer_shape(self, node, input_shapes):
......@@ -900,26 +900,26 @@ class Gemm(GemmRelated):
if not self.inplace:
z = z.copy() # the original z will not be changed
if z.shape == ():
z.itemset(z * a + b * numpy.dot(x, y))
z.itemset(z * a + b * np.dot(x, y))
zout[0] = z
else:
if b == 0.0:
if a == 1.0:
z[:] = numpy.dot(x, y)
z[:] = np.dot(x, y)
elif a == -1.0:
z[:] = -numpy.dot(x, y)
z[:] = -np.dot(x, y)
else:
z[:] = a * numpy.dot(x, y)
z[:] = a * np.dot(x, y)
elif b == 1.0:
if a == 1.0:
z += numpy.dot(x, y)
z += np.dot(x, y)
elif a == -1.0:
z -= numpy.dot(x, y)
z -= np.dot(x, y)
else:
z += a * numpy.dot(x, y)
z += a * np.dot(x, y)
else:
z *= b
z += a * numpy.dot(x, y)
z += a * np.dot(x, y)
zout[0] = z
def infer_shape(self, node, input_shapes):
......@@ -1066,7 +1066,7 @@ def _as_scalar(res, dtype=None):
"""Return None or a TensorVariable whose type is in T.float_scalar_types"""
if dtype is None:
dtype = config.floatX
if numpy.all(res.type.broadcastable):
if np.all(res.type.broadcastable):
while res.owner and isinstance(res.owner.op, T.DimShuffle):
res = res.owner.inputs[0]
# may still have some number of True's
......@@ -1216,7 +1216,7 @@ def _gemm_canonicalize(r, scale, rval, maxclients):
vectors = []
matrices = []
for i in r.owner.inputs:
if numpy.all(i.type.broadcastable):
if np.all(i.type.broadcastable):
while i.owner and isinstance(i.owner.op, T.DimShuffle):
i = i.owner.inputs[0]
if i.type.broadcastable:
......@@ -1539,7 +1539,7 @@ class Dot22(GemmRelated):
x, y = inp
z, = out
try:
z[0] = numpy.asarray(numpy.dot(x, y))
z[0] = np.asarray(np.dot(x, y))
except ValueError as e:
# The error raised by numpy has no shape information, we mean to
# add that
......@@ -1704,8 +1704,8 @@ def local_dot22_to_ger_or_gemv(node):
x, y = node.inputs
xb = x.broadcastable
yb = y.broadcastable
one = T.as_tensor_variable(numpy.asarray(1, dtype=x.dtype))
zero = T.as_tensor_variable(numpy.asarray(0, dtype=x.dtype))
one = T.as_tensor_variable(np.asarray(1, dtype=x.dtype))
zero = T.as_tensor_variable(np.asarray(0, dtype=x.dtype))
if xb[1] and yb[0]:
# x and y are both vectors so this might qualifies for a GER
xv = x.dimshuffle(0)
......@@ -1810,7 +1810,7 @@ class Dot22Scalar(GemmRelated):
x, y, scalar = inp
z, = out
try:
z[0] = numpy.asarray(scalar * numpy.dot(x, y))
z[0] = np.asarray(scalar * np.dot(x, y))
except ValueError as e:
# The error raised by numpy has no shape information, we
# mean to add that
......@@ -2034,9 +2034,9 @@ class BatchedDot(Op):
shape = self.infer_shape(node, [i.shape for i in inp])[0]
dtype = node.outputs[0].dtype
z0 = z[0] = numpy.empty(shape, dtype=dtype)
z0 = z[0] = np.empty(shape, dtype=dtype)
for i in xrange(z0.shape[0]):
z0[i] = numpy.dot(x[i], y[i])
z0[i] = np.dot(x[i], y[i])
def c_support_code(self):
batch_gemm_defn = """
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论