提交 123e1241 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #1848 from Tanjay94/Swapaxes

Swapaxes
...@@ -1220,3 +1220,66 @@ def matrix_power(M, n): ...@@ -1220,3 +1220,66 @@ def matrix_power(M, n):
for i in xrange(n): for i in xrange(n):
result = theano.dot(result, M) result = theano.dot(result, M)
return result return result
def norm(x,ord):
x = as_tensor_variable(x)
ndim = x.ndim
if ndim == 0:
raise ValueError("'axis' entry is out of bounds.")
elif ndim == 1:
if ord == None:
return tensor.sum(x**2)**0.5
elif ord == 'inf':
return tensor.max(abs(x))
elif ord == '-inf':
return tensor.min(abs(x))
elif ord == 0:
return x[x.nonzero()].shape[0]
else:
try:
z = tensor.sum(abs(x**ord))**(1./ord)
except TypeError:
raise ValueError("Invalid norm order for vectors.")
return z
elif ndim == 2:
if ord == None or ord == 'fro':
return tensor.sum(abs(x**2))**(0.5)
elif ord == 'inf':
return tensor.max(tensor.sum(abs(x), 1))
elif ord == '-inf':
return tensor.min(tensor.sum(abs(x), 1))
elif ord == 1:
return tensor.max(tensor.sum(abs(x), 0))
elif ord == -1:
return tensor.min(tensor.sum(abs(x),0))
else:
raise ValueError(0)
elif ndim > 2:
raise NotImplementedError("We don't support norm witn ndim > 2")
class lstsq(theano.Op):
def __eq__(self, other):
pass
def __hash__(self):
return hash(type(self))
def __str__(self):
return self.__class__.__name__
def make_node(self, x, y, rcond):
x = theano.tensor.as_tensor_variable(x)
y = theano.tensor.as_tensor_variable(y)
rcond = theano.tensor.as_tensor_variable(rcond)
return theano.Apply(self, [x, y, rcond], [y.type(), theano.tensor.dvector(), theano.tensor.lscalar(), theano.tensor.dvector()])
def perform(self, node, inputs, outputs):
x = inputs[0]
y = inputs[1]
rcond = inputs[2]
zz = numpy.linalg.lstsq(inputs[0], inputs[1], inputs[2])
outputs[0][0] = zz[0]
outputs[1][0] = zz[1]
outputs[2][0] = zz[2]
outputs[3][0] = zz[3]
...@@ -3,6 +3,8 @@ import unittest ...@@ -3,6 +3,8 @@ import unittest
import numpy import numpy
import numpy.linalg import numpy.linalg
from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_almost_equal
from numpy.testing import dec, assert_array_equal, assert_allclose
from numpy import inf
import theano import theano
from theano import tensor, function from theano import tensor, function
...@@ -30,8 +32,10 @@ from theano.sandbox.linalg.ops import (cholesky, ...@@ -30,8 +32,10 @@ from theano.sandbox.linalg.ops import (cholesky,
spectral_radius_bound, spectral_radius_bound,
imported_scipy, imported_scipy,
Eig, Eig,
inv_as_solve inv_as_solve,
norm
) )
from theano.sandbox.linalg import eig, eigh, eigvalsh from theano.sandbox.linalg import eig, eigh, eigvalsh
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr from nose.plugins.attrib import attr
...@@ -631,3 +635,69 @@ class Matrix_power(): ...@@ -631,3 +635,69 @@ class Matrix_power():
f = function([A], [Q]) f = function([A], [Q])
a = rng.rand(4, 3).astype(theano.config.floatX) a = rng.rand(4, 3).astype(theano.config.floatX)
self.assertRaises(ValueError, f, a) self.assertRaises(ValueError, f, a)
class T_NormTests(unittest.TestCase):
def test_wrong_type_of_ord_for_vector(self):
self.assertRaises(ValueError, norm, [2, 1], 'fro')
def test_wrong_type_of_ord_for_matrix(self):
self.assertRaises(ValueError, norm, [[2, 1], [3, 4]], 0)
def test_non_tensorial_input(self):
self.assertRaises(ValueError, norm, 3, None)
def test_tensor_input(self):
self.assertRaises(NotImplementedError, norm, numpy.random.rand(3, 4, 5), None)
def test_numpy_compare(self):
rng = numpy.random.RandomState(utt.fetch_seed())
M = tensor.matrix("A", dtype=theano.config.floatX)
V = tensor.vector("V", dtype=theano.config.floatX)
a = rng.rand(4, 4).astype(theano.config.floatX)
b = rng.rand(4).astype(theano.config.floatX)
A = ( [None, 'fro', 'inf', '-inf', 1, -1, None, 'inf', '-inf', 0, 1, -1, 2, -2],
[M, M, M, M, M, M, V, V, V, V, V, V, V, V],
[a, a, a, a, a, a, b, b, b, b, b, b, b, b],
[None, 'fro', inf, -inf, 1, -1, None, inf, -inf, 0, 1, -1, 2, -2])
for i in range(0, 14):
f = function([A[1][i]], norm(A[1][i], A[0][i]))
t_n = f(A[2][i])
n_n = numpy.linalg.norm(A[2][i], A[3][i])
assert _allclose(n_n, t_n)
class T_lstsq(unittest.TestCase):
def test_correct_solution(self):
x = tensor.lmatrix()
y = tensor.lmatrix()
z = tensor.lscalar()
b = theano.sandbox.linalg.ops.lstsq()(x, y, z)
f = function([x, y, z], b)
TestMatrix1 = numpy.asarray([[2, 1], [3, 4]])
TestMatrix2 = numpy.asarray([[17, 20], [43, 50]])
TestScalar = numpy.asarray(1)
f = function([x, y, z], b)
m = f(TestMatrix1, TestMatrix2, TestScalar)
self.assertTrue(numpy.allclose(TestMatrix2, numpy.dot(TestMatrix1, m[0])))
def test_wrong_coefficient_matrix(self):
x = tensor.vector()
y = tensor.vector()
z = tensor.scalar()
b = theano.sandbox.linalg.ops.lstsq()(x, y, z)
f = function([x, y, z], b)
self.assertRaises(numpy.linalg.linalg.LinAlgError, f, [2, 1], [2, 1], 1)
def test_wrong_rcond_dimension(self):
x = tensor.vector()
y = tensor.vector()
z = tensor.vector()
b = theano.sandbox.linalg.ops.lstsq()(x, y, z)
f = function([x, y, z], b)
self.assertRaises(numpy.linalg.LinAlgError, f, [2, 1], [2, 1], [2, 1])
...@@ -5026,3 +5026,12 @@ def ptp(a, axis=None): ...@@ -5026,3 +5026,12 @@ def ptp(a, axis=None):
def power(x, y): def power(x, y):
return x**y return x**y
def swapaxes(y,axis1,axis2):
"swap axes of inputted tensor"
y = as_tensor_variable(y)
ndim = y.ndim
li = range(0, ndim)
li[axis1], li[axis2] = li[axis2], li[axis1]
return y.dimshuffle(li)
...@@ -45,7 +45,9 @@ from theano.tensor import (_shared, wvector, bvector, autocast_float_as, ...@@ -45,7 +45,9 @@ from theano.tensor import (_shared, wvector, bvector, autocast_float_as,
dtensor3, SpecifyShape, Mean, dtensor3, SpecifyShape, Mean,
itensor3, Tile, switch, Diagonal, Diag, itensor3, Tile, switch, Diagonal, Diag,
nonzero, flatnonzero, nonzero_values, nonzero, flatnonzero, nonzero_values,
stacklists, DimShuffle, hessian, ptp, power) stacklists, DimShuffle, hessian, ptp, power,
swapaxes
)
from theano.tests import unittest_tools as utt from theano.tests import unittest_tools as utt
...@@ -6899,6 +6901,39 @@ if __name__ == '__main__': ...@@ -6899,6 +6901,39 @@ if __name__ == '__main__':
t.test_infer_shape() t.test_infer_shape()
class T_swapaxes(unittest.TestCase):
def test_no_dimensional_input(self):
self.assertRaises(IndexError, swapaxes, 2, 0, 1)
def test_unidimensional_input(self):
self.assertRaises(IndexError, swapaxes, [2, 1], 0, 1)
def test_not_enough_dimension(self):
self.assertRaises(IndexError, swapaxes, [[2, 1], [3, 4]], 3, 4)
def test_doubleswap(self):
y = matrix()
n = swapaxes(y, 0, 1)
f = function([y], n)
testMatrix = [[2, 1], [3, 4]]
self.assertTrue(numpy.array_equal(testMatrix, f(f(testMatrix))))
def test_interface(self):
x = theano.tensor.matrix()
x.swapaxes(0,1)
def test_numpy_compare(self):
rng = numpy.random.RandomState(utt.fetch_seed())
A = tensor.matrix("A", dtype=theano.config.floatX)
Q = swapaxes(A, 0, 1)
fn = function([A], [Q])
a = rng.rand(4, 4).astype(theano.config.floatX)
n_s = numpy.swapaxes(a, 0, 1)
t_s = fn(a)
assert numpy.allclose(n_s, t_s)
class T_Power(): class T_Power():
def test_numpy_compare(self): def test_numpy_compare(self):
rng = numpy.random.RandomState(utt.fetch_seed()) rng = numpy.random.RandomState(utt.fetch_seed())
...@@ -6925,6 +6960,17 @@ class T_Power(): ...@@ -6925,6 +6960,17 @@ class T_Power():
f = function([x], z) f = function([x], z)
self.assertRaise(ValueError, f, [1, 2, 3, 4]) self.assertRaise(ValueError, f, [1, 2, 3, 4])
def test_numpy_compare(self):
rng = numpy.random.RandomState(utt.fetch_seed())
A = tensor.matrix("A", dtype=theano.config.floatX)
Q = power(A, 2)
fn = function([A], [Q])
a = rng.rand(4, 4).astype(theano.config.floatX)
n_p = numpy.power(a, 2)
t_p = fn(a)
assert numpy.allclose(n_s, t_s)
""" """
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -257,6 +257,10 @@ class test_Broadcast(unittest.TestCase): ...@@ -257,6 +257,10 @@ class test_Broadcast(unittest.TestCase):
f(xv, yv) f(xv, yv)
assert (xv == yv).all() assert (xv == yv).all()
def test_fill_var(self):
x = tensor.matrix()
x.fill(3)
def test_fill_grad(self): def test_fill_grad(self):
# Fix bug reported at # Fix bug reported at
# https://groups.google.com/d/topic/theano-users/nQshB8gUA6k/discussion # https://groups.google.com/d/topic/theano-users/nQshB8gUA6k/discussion
......
...@@ -557,6 +557,16 @@ class _tensor_py_operators: ...@@ -557,6 +557,16 @@ class _tensor_py_operators:
return theano.tensor.ptp(self, axis) return theano.tensor.ptp(self, axis)
def swapaxes(self, axis1, axis2):
"""Return 'tensor.swapaxes(self, axis1, axis2)
If a matrix is provided with the right axes, its transpose will be returned.
"""
return theano.tensor.basic.swapaxes(self, axis1, axis2)
def fill(self, value):
"""Fill inputted tensor with the assigned value"""
return theano.tensor.basic.fill(self, value)
class TensorVariable(_tensor_py_operators, Variable): class TensorVariable(_tensor_py_operators, Variable):
"""Subclass to add the tensor operators to the basic `Variable` class.""" """Subclass to add the tensor operators to the basic `Variable` class."""
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论