Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
65908b64
提交
65908b64
authored
7月 21, 2014
作者:
Tanjay94
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Separated numpy and scipy function tests into two new file, test_slinalg and test_nlinalg.
上级
a35f1fee
显示空白字符变更
内嵌
并排
正在显示
5 个修改的文件
包含
856 行增加
和
771 行删除
+856
-771
ops.py
theano/sandbox/linalg/ops.py
+4
-216
test_linalg.py
theano/sandbox/linalg/tests/test_linalg.py
+0
-555
nlinalg.py
theano/tensor/nlinalg.py
+233
-0
test_nlinalg.py
theano/tensor/tests/test_nlinalg.py
+434
-0
test_slinalg.py
theano/tensor/tests/test_slinalg.py
+185
-0
没有找到文件。
theano/sandbox/linalg/ops.py
浏览文件 @
65908b64
...
@@ -33,7 +33,10 @@ from theano.tensor.nlinalg import ( MatrixInverse,
...
@@ -33,7 +33,10 @@ from theano.tensor.nlinalg import ( MatrixInverse,
EighGrad
,
EighGrad
,
eigh
,
eigh
,
matrix_dot
,
matrix_dot
,
_zero_disconnected
_zero_disconnected
,
qr
,
svd
,
lstsq
)
)
from
theano.tensor.slinalg
import
(
Cholesky
,
from
theano.tensor.slinalg
import
(
Cholesky
,
...
@@ -377,199 +380,10 @@ def spectral_radius_bound(X, log2_exponent):
...
@@ -377,199 +380,10 @@ def spectral_radius_bound(X, log2_exponent):
2
**
(
-
log2_exponent
))
2
**
(
-
log2_exponent
))
class
SVD
(
Op
):
# See doc in the docstring of the function just after this class.
_numop
=
staticmethod
(
numpy
.
linalg
.
svd
)
def
__init__
(
self
,
full_matrices
=
True
,
compute_uv
=
True
):
"""
inputs :
--------
full_matrices : bool, optional
If True (default), u and v have the shapes (M, M) and (N, N),
respectively.
Otherwise, the shapes are (M, K) and (K, N), respectively,
where K = min(M, N).
compute_uv : bool, optional
Whether or not to compute u and v in addition to s.
True by default.
"""
self
.
full_matrices
=
full_matrices
self
.
compute_uv
=
compute_uv
def
__hash__
(
self
):
return
hash
((
type
(
self
),
self
.
props
()))
def
__eq__
(
self
,
other
):
return
(
type
(
self
)
==
type
(
other
)
and
self
.
props
()
==
other
.
props
())
def
props
(
self
):
return
self
.
full_matrices
,
self
.
compute_uv
,
def
make_node
(
self
,
x
):
x
=
as_tensor_variable
(
x
)
assert
x
.
ndim
==
2
,
"The input of svd function should be a matrix."
w
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
u
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
v
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
return
Apply
(
self
,
[
x
],
[
w
,
u
,
v
])
def
perform
(
self
,
node
,
(
x
,),
(
w
,
u
,
v
)):
assert
x
.
ndim
==
2
,
"The input of svd function should be a matrix."
w
[
0
],
u
[
0
],
v
[
0
]
=
self
.
_numop
(
x
,
self
.
full_matrices
,
self
.
compute_uv
)
def
__str__
(
self
):
return
self
.
_numop
.
__name__
.
capitalize
()
def
svd
(
a
,
full_matrices
=
1
,
compute_uv
=
1
):
"""
This function performs the SVD on CPU.
Parameters :
------------
full_matrices : bool, optional
If True (default), u and v have the shapes (M, M) and (N, N),
respectively.
Otherwise, the shapes are (M, K) and (K, N), respectively,
where K = min(M, N).
compute_uv : bool, optional
Whether or not to compute u and v in addition to s.
True by default.
Returns :
-------
U, V and D matrices.
"""
return
SVD
(
full_matrices
,
compute_uv
)(
a
)
class
QRFull
(
Op
):
"""
Full QR Decomposition.
Computes the QR decomposition of a matrix.
Factor the matrix a as qr, where q is orthonormal
and r is upper-triangular.
"""
_numop
=
staticmethod
(
numpy
.
linalg
.
qr
)
def
__init__
(
self
,
mode
):
self
.
mode
=
mode
def
__hash__
(
self
):
return
hash
((
type
(
self
),
self
.
props
()))
def
__eq__
(
self
,
other
):
return
(
type
(
self
)
==
type
(
other
)
and
self
.
props
()
==
other
.
props
())
def
make_node
(
self
,
x
):
x
=
as_tensor_variable
(
x
)
assert
x
.
ndim
==
2
,
"The input of qr function should be a matrix."
q
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
r
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
return
Apply
(
self
,
[
x
],
[
q
,
r
])
def
props
(
self
):
return
self
.
mode
def
perform
(
self
,
node
,
(
x
,),
(
q
,
r
)):
assert
x
.
ndim
==
2
,
"The input of qr function should be a matrix."
q
[
0
],
r
[
0
]
=
self
.
_numop
(
x
,
self
.
mode
)
def
__str__
(
self
):
return
self
.
_numop
.
__class__
.
__name__
class
QRIncomplete
(
Op
):
"""
Incomplete QR Decomposition.
Computes the QR decomposition of a matrix.
Factor the matrix a as qr and return a single matrix.
"""
_numop
=
staticmethod
(
numpy
.
linalg
.
qr
)
def
__init__
(
self
,
mode
):
self
.
mode
=
mode
def
__hash__
(
self
):
return
hash
((
type
(
self
),
self
.
props
()))
def
__eq__
(
self
,
other
):
return
(
type
(
self
)
==
type
(
other
)
and
self
.
props
()
==
other
.
props
())
def
props
(
self
):
return
self
.
mode
def
make_node
(
self
,
x
):
x
=
as_tensor_variable
(
x
)
assert
x
.
ndim
==
2
,
"The input of qr function should be a matrix."
q
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
return
Apply
(
self
,
[
x
],
[
q
])
def
perform
(
self
,
node
,
(
x
,),
(
q
,)):
assert
x
.
ndim
==
2
,
"The input of qr function should be a matrix."
q
[
0
]
=
self
.
_numop
(
x
,
self
.
mode
)
def
__str__
(
self
):
return
self
.
_numop
.
__class__
.
__name__
def
qr
(
a
,
mode
=
"full"
):
"""
Computes the QR decomposition of a matrix.
Factor the matrix a as qr, where q
is orthonormal and r is upper-triangular.
Parameters :
------------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated. The options 'reduced',
'complete', and 'raw' are new in numpy 1.8, see the notes for more
information. The default is 'reduced' and to maintain backward
compatibility with earlier versions of numpy both it and the old
default 'full' can be omitted. Note that array h returned in 'raw'
mode is transposed for calling Fortran. The 'economic' mode is
deprecated. The modes 'full' and 'economic' may be passed using only
the first letter for backwards compatibility, but all others
must be spelled out.
Default mode is 'full' which is also default for numpy 1.6.1.
Note: Default mode was left to full as full and reduced are both doing
the same thing in the new numpy version but only full works on the old
previous numpy version.
Returns :
---------
q : matrix of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete'
the result is an orthogonal/unitary matrix depending on whether
or not a is real/complex. The determinant may be either +/- 1 in that case.
r : matrix of float or complex, optional
The upper-triangular matrix.
"""
x
=
[[
2
,
1
],
[
3
,
4
]]
if
isinstance
(
numpy
.
linalg
.
qr
(
x
,
mode
),
tuple
):
return
QRFull
(
mode
)(
a
)
else
:
return
QRIncomplete
(
mode
)(
a
)
def
matrix_power
(
M
,
n
):
def
matrix_power
(
M
,
n
):
...
@@ -615,29 +429,3 @@ def norm(x,ord):
...
@@ -615,29 +429,3 @@ def norm(x,ord):
elif
ndim
>
2
:
elif
ndim
>
2
:
raise
NotImplementedError
(
"We don't support norm witn ndim > 2"
)
raise
NotImplementedError
(
"We don't support norm witn ndim > 2"
)
class
lstsq
(
theano
.
Op
):
def
__eq__
(
self
,
other
):
return
type
(
self
)
==
type
(
other
)
def
__hash__
(
self
):
return
hash
(
type
(
self
))
def
__str__
(
self
):
return
self
.
__class__
.
__name__
def
make_node
(
self
,
x
,
y
,
rcond
):
x
=
theano
.
tensor
.
as_tensor_variable
(
x
)
y
=
theano
.
tensor
.
as_tensor_variable
(
y
)
rcond
=
theano
.
tensor
.
as_tensor_variable
(
rcond
)
return
theano
.
Apply
(
self
,
[
x
,
y
,
rcond
],
[
y
.
type
(),
theano
.
tensor
.
dvector
(),
theano
.
tensor
.
lscalar
(),
theano
.
tensor
.
dvector
()])
def
perform
(
self
,
node
,
inputs
,
outputs
):
x
=
inputs
[
0
]
y
=
inputs
[
1
]
rcond
=
inputs
[
2
]
zz
=
numpy
.
linalg
.
lstsq
(
inputs
[
0
],
inputs
[
1
],
inputs
[
2
])
outputs
[
0
][
0
]
=
zz
[
0
]
outputs
[
1
][
0
]
=
zz
[
1
]
outputs
[
2
][
0
]
=
zz
[
2
]
outputs
[
3
][
0
]
=
zz
[
3
]
theano/sandbox/linalg/tests/test_linalg.py
浏览文件 @
65908b64
...
@@ -44,209 +44,6 @@ from nose.plugins.attrib import attr
...
@@ -44,209 +44,6 @@ from nose.plugins.attrib import attr
from
nose.tools
import
assert_raises
from
nose.tools
import
assert_raises
def
check_lower_triangular
(
pd
,
ch_f
):
ch
=
ch_f
(
pd
)
assert
ch
[
0
,
pd
.
shape
[
1
]
-
1
]
==
0
assert
ch
[
pd
.
shape
[
0
]
-
1
,
0
]
!=
0
assert
numpy
.
allclose
(
numpy
.
dot
(
ch
,
ch
.
T
),
pd
)
assert
not
numpy
.
allclose
(
numpy
.
dot
(
ch
.
T
,
ch
),
pd
)
def
check_upper_triangular
(
pd
,
ch_f
):
ch
=
ch_f
(
pd
)
assert
ch
[
4
,
0
]
==
0
assert
ch
[
0
,
4
]
!=
0
assert
numpy
.
allclose
(
numpy
.
dot
(
ch
.
T
,
ch
),
pd
)
assert
not
numpy
.
allclose
(
numpy
.
dot
(
ch
,
ch
.
T
),
pd
)
def
test_cholesky
():
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the Cholesky op."
)
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
5
,
5
)
.
astype
(
config
.
floatX
)
pd
=
numpy
.
dot
(
r
,
r
.
T
)
x
=
tensor
.
matrix
()
chol
=
cholesky
(
x
)
# Check the default.
ch_f
=
function
([
x
],
chol
)
yield
check_lower_triangular
,
pd
,
ch_f
# Explicit lower-triangular.
chol
=
Cholesky
(
lower
=
True
)(
x
)
ch_f
=
function
([
x
],
chol
)
yield
check_lower_triangular
,
pd
,
ch_f
# Explicit upper-triangular.
chol
=
Cholesky
(
lower
=
False
)(
x
)
ch_f
=
function
([
x
],
chol
)
yield
check_upper_triangular
,
pd
,
ch_f
def
test_cholesky_grad
():
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the Cholesky op."
)
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
5
,
5
)
.
astype
(
config
.
floatX
)
pd
=
numpy
.
dot
(
r
,
r
.
T
)
eps
=
None
if
config
.
floatX
==
"float64"
:
eps
=
2e-8
# Check the default.
yield
(
lambda
:
utt
.
verify_grad
(
cholesky
,
[
pd
],
3
,
rng
,
eps
=
eps
))
# Explicit lower-triangular.
yield
(
lambda
:
utt
.
verify_grad
(
Cholesky
(
lower
=
True
),
[
pd
],
3
,
rng
,
eps
=
eps
))
# Explicit upper-triangular.
yield
(
lambda
:
utt
.
verify_grad
(
Cholesky
(
lower
=
False
),
[
pd
],
3
,
rng
,
eps
=
eps
))
@attr
(
'slow'
)
def
test_cholesky_and_cholesky_grad_shape
():
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the Cholesky op."
)
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
x
=
tensor
.
matrix
()
for
l
in
(
cholesky
(
x
),
Cholesky
(
lower
=
True
)(
x
),
Cholesky
(
lower
=
False
)(
x
)):
f_chol
=
theano
.
function
([
x
],
l
.
shape
)
g
=
tensor
.
grad
(
l
.
sum
(),
x
)
f_cholgrad
=
theano
.
function
([
x
],
g
.
shape
)
topo_chol
=
f_chol
.
maker
.
fgraph
.
toposort
()
topo_cholgrad
=
f_cholgrad
.
maker
.
fgraph
.
toposort
()
if
config
.
mode
!=
'FAST_COMPILE'
:
assert
sum
([
node
.
op
.
__class__
==
Cholesky
for
node
in
topo_chol
])
==
0
assert
sum
([
node
.
op
.
__class__
==
CholeskyGrad
for
node
in
topo_cholgrad
])
==
0
for
shp
in
[
2
,
3
,
5
]:
m
=
numpy
.
cov
(
rng
.
randn
(
shp
,
shp
+
10
))
.
astype
(
config
.
floatX
)
yield
numpy
.
testing
.
assert_equal
,
f_chol
(
m
),
(
shp
,
shp
)
yield
numpy
.
testing
.
assert_equal
,
f_cholgrad
(
m
),
(
shp
,
shp
)
def
test_inverse_correctness
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
4
,
4
)
.
astype
(
theano
.
config
.
floatX
)
x
=
tensor
.
matrix
()
xi
=
matrix_inverse
(
x
)
ri
=
function
([
x
],
xi
)(
r
)
assert
ri
.
shape
==
r
.
shape
assert
ri
.
dtype
==
r
.
dtype
rir
=
numpy
.
dot
(
ri
,
r
)
rri
=
numpy
.
dot
(
r
,
ri
)
assert
_allclose
(
numpy
.
identity
(
4
),
rir
),
rir
assert
_allclose
(
numpy
.
identity
(
4
),
rri
),
rri
def
test_pseudoinverse_correctness
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
d1
=
rng
.
randint
(
4
)
+
2
d2
=
rng
.
randint
(
4
)
+
2
r
=
rng
.
randn
(
d1
,
d2
)
.
astype
(
theano
.
config
.
floatX
)
x
=
tensor
.
matrix
()
xi
=
pinv
(
x
)
ri
=
function
([
x
],
xi
)(
r
)
assert
ri
.
shape
[
0
]
==
r
.
shape
[
1
]
assert
ri
.
shape
[
1
]
==
r
.
shape
[
0
]
assert
ri
.
dtype
==
r
.
dtype
# Note that pseudoinverse can be quite unprecise so I prefer to compare
# the result with what numpy.linalg returns
assert
_allclose
(
ri
,
numpy
.
linalg
.
pinv
(
r
))
def
test_matrix_dot
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
n
=
rng
.
randint
(
4
)
+
2
rs
=
[]
xs
=
[]
for
k
in
xrange
(
n
):
rs
+=
[
rng
.
randn
(
4
,
4
)
.
astype
(
theano
.
config
.
floatX
)]
xs
+=
[
tensor
.
matrix
()]
sol
=
matrix_dot
(
*
xs
)
theano_sol
=
function
(
xs
,
sol
)(
*
rs
)
numpy_sol
=
rs
[
0
]
for
r
in
rs
[
1
:]:
numpy_sol
=
numpy
.
dot
(
numpy_sol
,
r
)
assert
_allclose
(
numpy_sol
,
theano_sol
)
def
test_qr_modes
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
A
=
tensor
.
matrix
(
"A"
,
dtype
=
theano
.
config
.
floatX
)
a
=
rng
.
rand
(
4
,
4
)
.
astype
(
theano
.
config
.
floatX
)
f
=
function
([
A
],
qr
(
A
))
t_qr
=
f
(
a
)
n_qr
=
numpy
.
linalg
.
qr
(
a
)
assert
_allclose
(
n_qr
,
t_qr
)
for
mode
in
[
"reduced"
,
"r"
,
"raw"
,
"full"
,
"economic"
]:
f
=
function
([
A
],
qr
(
A
,
mode
))
t_qr
=
f
(
a
)
n_qr
=
numpy
.
linalg
.
qr
(
a
,
mode
)
if
isinstance
(
n_qr
,
(
list
,
tuple
)):
assert
_allclose
(
n_qr
[
0
],
t_qr
[
0
])
assert
_allclose
(
n_qr
[
1
],
t_qr
[
1
])
else
:
assert
_allclose
(
n_qr
,
t_qr
)
try
:
n_qr
=
numpy
.
linalg
.
qr
(
a
,
"complete"
)
f
=
function
([
A
],
qr
(
A
,
"complete"
))
t_qr
=
f
(
a
)
assert
_allclose
(
n_qr
,
t_qr
)
except
TypeError
,
e
:
assert
"name 'complete' is not defined"
in
str
(
e
)
def
test_svd
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
A
=
tensor
.
matrix
(
"A"
,
dtype
=
theano
.
config
.
floatX
)
U
,
V
,
T
=
svd
(
A
)
fn
=
function
([
A
],
[
U
,
V
,
T
])
a
=
rng
.
rand
(
4
,
4
)
.
astype
(
theano
.
config
.
floatX
)
n_u
,
n_v
,
n_t
=
numpy
.
linalg
.
svd
(
a
)
t_u
,
t_v
,
t_t
=
fn
(
a
)
assert
_allclose
(
n_u
,
t_u
)
assert
_allclose
(
n_v
,
t_v
)
assert
_allclose
(
n_t
,
t_t
)
def
test_inverse_singular
():
singular
=
numpy
.
array
([[
1
,
0
,
0
]]
+
[[
0
,
1
,
0
]]
*
2
,
dtype
=
theano
.
config
.
floatX
)
a
=
tensor
.
matrix
()
f
=
function
([
a
],
matrix_inverse
(
a
))
try
:
f
(
singular
)
except
numpy
.
linalg
.
LinAlgError
:
return
assert
False
def
test_inverse_grad
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
4
,
4
)
tensor
.
verify_grad
(
matrix_inverse
,
[
r
],
rng
=
numpy
.
random
)
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
4
,
4
)
tensor
.
verify_grad
(
matrix_inverse
,
[
r
],
rng
=
numpy
.
random
)
def
test_rop_lop
():
def
test_rop_lop
():
mx
=
tensor
.
matrix
(
'mx'
)
mx
=
tensor
.
matrix
(
'mx'
)
mv
=
tensor
.
matrix
(
'mv'
)
mv
=
tensor
.
matrix
(
'mv'
)
...
@@ -295,189 +92,6 @@ def test_rop_lop():
...
@@ -295,189 +92,6 @@ def test_rop_lop():
assert
_allclose
(
v1
,
v2
),
(
'LOP mismatch:
%
s
%
s'
%
(
v1
,
v2
))
assert
_allclose
(
v1
,
v2
),
(
'LOP mismatch:
%
s
%
s'
%
(
v1
,
v2
))
def
test_det
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
5
,
5
)
.
astype
(
config
.
floatX
)
x
=
tensor
.
matrix
()
f
=
theano
.
function
([
x
],
det
(
x
))
assert
numpy
.
allclose
(
numpy
.
linalg
.
det
(
r
),
f
(
r
))
def
test_det_grad
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
5
,
5
)
.
astype
(
config
.
floatX
)
tensor
.
verify_grad
(
det
,
[
r
],
rng
=
numpy
.
random
)
def
test_det_shape
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
5
,
5
)
.
astype
(
config
.
floatX
)
x
=
tensor
.
matrix
()
f
=
theano
.
function
([
x
],
det
(
x
))
f_shape
=
theano
.
function
([
x
],
det
(
x
)
.
shape
)
assert
numpy
.
all
(
f
(
r
)
.
shape
==
f_shape
(
r
))
class
test_diag
(
unittest
.
TestCase
):
"""
Test that linalg.diag has the same behavior as numpy.diag.
numpy.diag has two behaviors:
(1) when given a vector, it returns a matrix with that vector as the
diagonal.
(2) when given a matrix, returns a vector which is the diagonal of the
matrix.
(1) and (2) are tested by test_alloc_diag and test_extract_diag
respectively.
test_diag test makes sure that linalg.diag instantiates
the right op based on the dimension of the input.
"""
def
__init__
(
self
,
name
,
mode
=
None
,
shared
=
tensor
.
_shared
,
floatX
=
None
,
type
=
tensor
.
TensorType
):
self
.
mode
=
mode
self
.
shared
=
shared
if
floatX
is
None
:
floatX
=
config
.
floatX
self
.
floatX
=
floatX
self
.
type
=
type
super
(
test_diag
,
self
)
.
__init__
(
name
)
def
test_alloc_diag
(
self
):
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
x
=
theano
.
tensor
.
vector
()
g
=
alloc_diag
(
x
)
f
=
theano
.
function
([
x
],
g
)
# test "normal" scenario (5x5 matrix) and special cases of 0x0 and 1x1
for
shp
in
[
5
,
0
,
1
]:
m
=
rng
.
rand
(
shp
)
.
astype
(
self
.
floatX
)
v
=
numpy
.
diag
(
m
)
r
=
f
(
m
)
# The right matrix is created
assert
(
r
==
v
)
.
all
()
# Test we accept only vectors
xx
=
theano
.
tensor
.
matrix
()
ok
=
False
try
:
alloc_diag
(
xx
)
except
TypeError
:
ok
=
True
assert
ok
# Test infer_shape
f
=
theano
.
function
([
x
],
g
.
shape
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
if
config
.
mode
!=
'FAST_COMPILE'
:
assert
sum
([
node
.
op
.
__class__
==
AllocDiag
for
node
in
topo
])
==
0
for
shp
in
[
5
,
0
,
1
]:
m
=
rng
.
rand
(
shp
)
.
astype
(
self
.
floatX
)
assert
(
f
(
m
)
==
m
.
shape
)
.
all
()
def
test_alloc_diag_grad
(
self
):
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
x
=
rng
.
rand
(
5
)
tensor
.
verify_grad
(
alloc_diag
,
[
x
],
rng
=
rng
)
def
test_diag
(
self
):
# test that it builds a matrix with given diagonal when using
# vector inputs
x
=
theano
.
tensor
.
vector
()
y
=
diag
(
x
)
assert
y
.
owner
.
op
.
__class__
==
AllocDiag
# test that it extracts the diagonal when using matrix input
x
=
theano
.
tensor
.
matrix
()
y
=
extract_diag
(
x
)
assert
y
.
owner
.
op
.
__class__
==
ExtractDiag
# other types should raise error
x
=
theano
.
tensor
.
tensor3
()
ok
=
False
try
:
y
=
extract_diag
(
x
)
except
TypeError
:
ok
=
True
assert
ok
# not testing the view=True case since it is not used anywhere.
def
test_extract_diag
(
self
):
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
m
=
rng
.
rand
(
2
,
3
)
.
astype
(
self
.
floatX
)
x
=
self
.
shared
(
m
)
g
=
extract_diag
(
x
)
f
=
theano
.
function
([],
g
)
assert
[
isinstance
(
node
.
inputs
[
0
]
.
type
,
self
.
type
)
for
node
in
f
.
maker
.
fgraph
.
toposort
()
if
isinstance
(
node
.
op
,
ExtractDiag
)]
==
[
True
]
for
shp
in
[(
2
,
3
),
(
3
,
2
),
(
3
,
3
),
(
1
,
1
),
(
0
,
0
)]:
m
=
rng
.
rand
(
*
shp
)
.
astype
(
self
.
floatX
)
x
.
set_value
(
m
)
v
=
numpy
.
diag
(
m
)
r
=
f
()
# The right diagonal is extracted
assert
(
r
==
v
)
.
all
()
# Test we accept only matrix
xx
=
theano
.
tensor
.
vector
()
ok
=
False
try
:
extract_diag
(
xx
)
except
TypeError
:
ok
=
True
assert
ok
# Test infer_shape
f
=
theano
.
function
([],
g
.
shape
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
if
config
.
mode
!=
'FAST_COMPILE'
:
assert
sum
([
node
.
op
.
__class__
==
ExtractDiag
for
node
in
topo
])
==
0
for
shp
in
[(
2
,
3
),
(
3
,
2
),
(
3
,
3
)]:
m
=
rng
.
rand
(
*
shp
)
.
astype
(
self
.
floatX
)
x
.
set_value
(
m
)
assert
f
()
==
min
(
shp
)
def
test_extract_diag_grad
(
self
):
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
x
=
rng
.
rand
(
5
,
4
)
.
astype
(
self
.
floatX
)
tensor
.
verify_grad
(
extract_diag
,
[
x
],
rng
=
rng
)
@attr
(
'slow'
)
def
test_extract_diag_empty
(
self
):
c
=
self
.
shared
(
numpy
.
array
([[],
[]],
self
.
floatX
))
f
=
theano
.
function
([],
extract_diag
(
c
),
mode
=
self
.
mode
)
assert
[
isinstance
(
node
.
inputs
[
0
]
.
type
,
self
.
type
)
for
node
in
f
.
maker
.
fgraph
.
toposort
()
if
isinstance
(
node
.
op
,
ExtractDiag
)]
==
[
True
]
def
test_trace
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
x
=
theano
.
tensor
.
matrix
()
g
=
trace
(
x
)
f
=
theano
.
function
([
x
],
g
)
for
shp
in
[(
2
,
3
),
(
3
,
2
),
(
3
,
3
)]:
m
=
rng
.
rand
(
*
shp
)
.
astype
(
config
.
floatX
)
v
=
numpy
.
trace
(
m
)
assert
v
==
f
(
m
)
xx
=
theano
.
tensor
.
vector
()
ok
=
False
try
:
trace
(
xx
)
except
TypeError
:
ok
=
True
assert
ok
def
test_spectral_radius_bound
():
def
test_spectral_radius_bound
():
tol
=
10
**
(
-
6
)
tol
=
10
**
(
-
6
)
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
...
@@ -525,143 +139,6 @@ def test_spectral_radius_bound():
...
@@ -525,143 +139,6 @@ def test_spectral_radius_bound():
assert
ok
assert
ok
class
test_Solve
(
utt
.
InferShapeTester
):
def
setUp
(
self
):
super
(
test_Solve
,
self
)
.
setUp
()
self
.
op_class
=
Solve
self
.
op
=
Solve
()
def
test_infer_shape
(
self
):
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the Cholesky op."
)
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
A
=
theano
.
tensor
.
matrix
()
b
=
theano
.
tensor
.
matrix
()
self
.
_compile_and_check
([
A
,
b
],
# theano.function inputs
[
self
.
op
(
A
,
b
)],
# theano.function outputs
# A must be square
[
numpy
.
asarray
(
rng
.
rand
(
5
,
5
),
dtype
=
config
.
floatX
),
numpy
.
asarray
(
rng
.
rand
(
5
,
1
),
dtype
=
config
.
floatX
)],
self
.
op_class
,
warn
=
False
)
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
A
=
theano
.
tensor
.
matrix
()
b
=
theano
.
tensor
.
vector
()
self
.
_compile_and_check
([
A
,
b
],
# theano.function inputs
[
self
.
op
(
A
,
b
)],
# theano.function outputs
# A must be square
[
numpy
.
asarray
(
rng
.
rand
(
5
,
5
),
dtype
=
config
.
floatX
),
numpy
.
asarray
(
rng
.
rand
(
5
),
dtype
=
config
.
floatX
)],
self
.
op_class
,
warn
=
False
)
class
test_Eig
(
utt
.
InferShapeTester
):
op_class
=
Eig
op
=
eig
dtype
=
'float64'
def
setUp
(
self
):
super
(
test_Eig
,
self
)
.
setUp
()
self
.
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
self
.
A
=
theano
.
tensor
.
matrix
(
dtype
=
self
.
dtype
)
X
=
numpy
.
asarray
(
self
.
rng
.
rand
(
5
,
5
),
dtype
=
self
.
dtype
)
self
.
S
=
X
.
dot
(
X
.
T
)
def
test_infer_shape
(
self
):
A
=
self
.
A
S
=
self
.
S
self
.
_compile_and_check
([
A
],
# theano.function inputs
self
.
op
(
A
),
# theano.function outputs
# S must be square
[
S
],
self
.
op_class
,
warn
=
False
)
def
test_eval
(
self
):
A
=
theano
.
tensor
.
matrix
(
dtype
=
self
.
dtype
)
self
.
assertEquals
([
e
.
eval
({
A
:
[[
1
]]})
for
e
in
self
.
op
(
A
)],
[[
1.0
],
[[
1.0
]]])
x
=
[[
0
,
1
],
[
1
,
0
]]
w
,
v
=
[
e
.
eval
({
A
:
x
})
for
e
in
self
.
op
(
A
)]
assert_array_almost_equal
(
numpy
.
dot
(
x
,
v
),
w
*
v
)
class
test_Eigh
(
test_Eig
):
op
=
staticmethod
(
eigh
)
def
test_uplo
(
self
):
S
=
self
.
S
a
=
theano
.
tensor
.
matrix
(
dtype
=
self
.
dtype
)
wu
,
vu
=
[
out
.
eval
({
a
:
S
})
for
out
in
self
.
op
(
a
,
'U'
)]
wl
,
vl
=
[
out
.
eval
({
a
:
S
})
for
out
in
self
.
op
(
a
,
'L'
)]
assert_array_almost_equal
(
wu
,
wl
)
assert_array_almost_equal
(
vu
*
numpy
.
sign
(
vu
[
0
,
:]),
vl
*
numpy
.
sign
(
vl
[
0
,
:]))
def
test_grad
(
self
):
S
=
self
.
S
utt
.
verify_grad
(
lambda
x
:
self
.
op
(
x
)[
0
],
[
S
],
rng
=
self
.
rng
)
utt
.
verify_grad
(
lambda
x
:
self
.
op
(
x
)[
1
],
[
S
],
rng
=
self
.
rng
)
utt
.
verify_grad
(
lambda
x
:
self
.
op
(
x
,
'U'
)[
0
],
[
S
],
rng
=
self
.
rng
)
utt
.
verify_grad
(
lambda
x
:
self
.
op
(
x
,
'U'
)[
1
],
[
S
],
rng
=
self
.
rng
)
class
test_Eigh_float32
(
test_Eigh
):
dtype
=
'float32'
def
test_matrix_inverse_solve
():
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the Solve op."
)
A
=
theano
.
tensor
.
dmatrix
(
'A'
)
b
=
theano
.
tensor
.
dmatrix
(
'b'
)
node
=
matrix_inverse
(
A
)
.
dot
(
b
)
.
owner
[
out
]
=
inv_as_solve
.
transform
(
node
)
assert
isinstance
(
out
.
owner
.
op
,
Solve
)
def
test_eigvalsh
():
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the geigvalsh op."
)
import
scipy.linalg
A
=
theano
.
tensor
.
dmatrix
(
'a'
)
B
=
theano
.
tensor
.
dmatrix
(
'b'
)
f
=
function
([
A
,
B
],
eigvalsh
(
A
,
B
))
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
a
=
rng
.
randn
(
5
,
5
)
a
=
a
+
a
.
T
for
b
in
[
10
*
numpy
.
eye
(
5
,
5
)
+
rng
.
randn
(
5
,
5
)]:
w
=
f
(
a
,
b
)
refw
=
scipy
.
linalg
.
eigvalsh
(
a
,
b
)
numpy
.
testing
.
assert_array_almost_equal
(
w
,
refw
)
# We need to test None separatly, as otherwise DebugMode will
# complain, as this isn't a valid ndarray.
b
=
None
B
=
theano
.
tensor
.
NoneConst
f
=
function
([
A
],
eigvalsh
(
A
,
B
))
w
=
f
(
a
)
refw
=
scipy
.
linalg
.
eigvalsh
(
a
,
b
)
numpy
.
testing
.
assert_array_almost_equal
(
w
,
refw
)
def
test_eigvalsh_grad
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
a
=
rng
.
randn
(
5
,
5
)
a
=
a
+
a
.
T
b
=
10
*
numpy
.
eye
(
5
,
5
)
+
rng
.
randn
(
5
,
5
)
tensor
.
verify_grad
(
lambda
a
,
b
:
eigvalsh
(
a
,
b
)
.
dot
([
1
,
2
,
3
,
4
,
5
]),
[
a
,
b
],
rng
=
numpy
.
random
)
class
Matrix_power
():
class
Matrix_power
():
def
test_numpy_compare
(
self
):
def
test_numpy_compare
(
self
):
...
@@ -716,35 +193,3 @@ class T_NormTests(unittest.TestCase):
...
@@ -716,35 +193,3 @@ class T_NormTests(unittest.TestCase):
t_n
=
f
(
A
[
2
][
i
])
t_n
=
f
(
A
[
2
][
i
])
n_n
=
numpy
.
linalg
.
norm
(
A
[
2
][
i
],
A
[
3
][
i
])
n_n
=
numpy
.
linalg
.
norm
(
A
[
2
][
i
],
A
[
3
][
i
])
assert
_allclose
(
n_n
,
t_n
)
assert
_allclose
(
n_n
,
t_n
)
class
T_lstsq
(
unittest
.
TestCase
):
def
test_correct_solution
(
self
):
x
=
tensor
.
lmatrix
()
y
=
tensor
.
lmatrix
()
z
=
tensor
.
lscalar
()
b
=
theano
.
sandbox
.
linalg
.
ops
.
lstsq
()(
x
,
y
,
z
)
f
=
function
([
x
,
y
,
z
],
b
)
TestMatrix1
=
numpy
.
asarray
([[
2
,
1
],
[
3
,
4
]])
TestMatrix2
=
numpy
.
asarray
([[
17
,
20
],
[
43
,
50
]])
TestScalar
=
numpy
.
asarray
(
1
)
f
=
function
([
x
,
y
,
z
],
b
)
m
=
f
(
TestMatrix1
,
TestMatrix2
,
TestScalar
)
self
.
assertTrue
(
numpy
.
allclose
(
TestMatrix2
,
numpy
.
dot
(
TestMatrix1
,
m
[
0
])))
def
test_wrong_coefficient_matrix
(
self
):
x
=
tensor
.
vector
()
y
=
tensor
.
vector
()
z
=
tensor
.
scalar
()
b
=
theano
.
sandbox
.
linalg
.
ops
.
lstsq
()(
x
,
y
,
z
)
f
=
function
([
x
,
y
,
z
],
b
)
self
.
assertRaises
(
numpy
.
linalg
.
linalg
.
LinAlgError
,
f
,
[
2
,
1
],
[
2
,
1
],
1
)
def
test_wrong_rcond_dimension
(
self
):
x
=
tensor
.
vector
()
y
=
tensor
.
vector
()
z
=
tensor
.
vector
()
b
=
theano
.
sandbox
.
linalg
.
ops
.
lstsq
()(
x
,
y
,
z
)
f
=
function
([
x
,
y
,
z
],
b
)
self
.
assertRaises
(
numpy
.
linalg
.
LinAlgError
,
f
,
[
2
,
1
],
[
2
,
1
],
[
2
,
1
])
theano/tensor/nlinalg.py
浏览文件 @
65908b64
...
@@ -496,3 +496,235 @@ class EighGrad(Op):
...
@@ -496,3 +496,235 @@ class EighGrad(Op):
def
eigh
(
a
,
UPLO
=
'L'
):
def
eigh
(
a
,
UPLO
=
'L'
):
return
Eigh
(
UPLO
)(
a
)
return
Eigh
(
UPLO
)(
a
)
class
QRFull
(
Op
):
"""
Full QR Decomposition.
Computes the QR decomposition of a matrix.
Factor the matrix a as qr, where q is orthonormal
and r is upper-triangular.
"""
_numop
=
staticmethod
(
numpy
.
linalg
.
qr
)
def
__init__
(
self
,
mode
):
self
.
mode
=
mode
def
__hash__
(
self
):
return
hash
((
type
(
self
),
self
.
props
()))
def
__eq__
(
self
,
other
):
return
(
type
(
self
)
==
type
(
other
)
and
self
.
props
()
==
other
.
props
())
def
make_node
(
self
,
x
):
x
=
as_tensor_variable
(
x
)
assert
x
.
ndim
==
2
,
"The input of qr function should be a matrix."
q
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
r
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
return
Apply
(
self
,
[
x
],
[
q
,
r
])
def
props
(
self
):
return
self
.
mode
def
perform
(
self
,
node
,
(
x
,),
(
q
,
r
)):
assert
x
.
ndim
==
2
,
"The input of qr function should be a matrix."
q
[
0
],
r
[
0
]
=
self
.
_numop
(
x
,
self
.
mode
)
def
__str__
(
self
):
return
self
.
_numop
.
__class__
.
__name__
class
QRIncomplete
(
Op
):
"""
Incomplete QR Decomposition.
Computes the QR decomposition of a matrix.
Factor the matrix a as qr and return a single matrix.
"""
_numop
=
staticmethod
(
numpy
.
linalg
.
qr
)
def
__init__
(
self
,
mode
):
self
.
mode
=
mode
def
__hash__
(
self
):
return
hash
((
type
(
self
),
self
.
props
()))
def
__eq__
(
self
,
other
):
return
(
type
(
self
)
==
type
(
other
)
and
self
.
props
()
==
other
.
props
())
def
props
(
self
):
return
self
.
mode
def
make_node
(
self
,
x
):
x
=
as_tensor_variable
(
x
)
assert
x
.
ndim
==
2
,
"The input of qr function should be a matrix."
q
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
return
Apply
(
self
,
[
x
],
[
q
])
def
perform
(
self
,
node
,
(
x
,),
(
q
,)):
assert
x
.
ndim
==
2
,
"The input of qr function should be a matrix."
q
[
0
]
=
self
.
_numop
(
x
,
self
.
mode
)
def
__str__
(
self
):
return
self
.
_numop
.
__class__
.
__name__
def
qr
(
a
,
mode
=
"full"
):
"""
Computes the QR decomposition of a matrix.
Factor the matrix a as qr, where q
is orthonormal and r is upper-triangular.
Parameters :
------------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated. The options 'reduced',
'complete', and 'raw' are new in numpy 1.8, see the notes for more
information. The default is 'reduced' and to maintain backward
compatibility with earlier versions of numpy both it and the old
default 'full' can be omitted. Note that array h returned in 'raw'
mode is transposed for calling Fortran. The 'economic' mode is
deprecated. The modes 'full' and 'economic' may be passed using only
the first letter for backwards compatibility, but all others
must be spelled out.
Default mode is 'full' which is also default for numpy 1.6.1.
Note: Default mode was left to full as full and reduced are both doing
the same thing in the new numpy version but only full works on the old
previous numpy version.
Returns :
---------
q : matrix of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete'
the result is an orthogonal/unitary matrix depending on whether
or not a is real/complex. The determinant may be either +/- 1 in that case.
r : matrix of float or complex, optional
The upper-triangular matrix.
"""
x
=
[[
2
,
1
],
[
3
,
4
]]
if
isinstance
(
numpy
.
linalg
.
qr
(
x
,
mode
),
tuple
):
return
QRFull
(
mode
)(
a
)
else
:
return
QRIncomplete
(
mode
)(
a
)
class
SVD
(
Op
):
# See doc in the docstring of the function just after this class.
_numop
=
staticmethod
(
numpy
.
linalg
.
svd
)
def
__init__
(
self
,
full_matrices
=
True
,
compute_uv
=
True
):
"""
inputs :
--------
full_matrices : bool, optional
If True (default), u and v have the shapes (M, M) and (N, N),
respectively.
Otherwise, the shapes are (M, K) and (K, N), respectively,
where K = min(M, N).
compute_uv : bool, optional
Whether or not to compute u and v in addition to s.
True by default.
"""
self
.
full_matrices
=
full_matrices
self
.
compute_uv
=
compute_uv
def
__hash__
(
self
):
return
hash
((
type
(
self
),
self
.
props
()))
def
__eq__
(
self
,
other
):
return
(
type
(
self
)
==
type
(
other
)
and
self
.
props
()
==
other
.
props
())
def
props
(
self
):
return
self
.
full_matrices
,
self
.
compute_uv
,
def
make_node
(
self
,
x
):
x
=
as_tensor_variable
(
x
)
assert
x
.
ndim
==
2
,
"The input of svd function should be a matrix."
w
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
u
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
v
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
return
Apply
(
self
,
[
x
],
[
w
,
u
,
v
])
def
perform
(
self
,
node
,
(
x
,),
(
w
,
u
,
v
)):
assert
x
.
ndim
==
2
,
"The input of svd function should be a matrix."
w
[
0
],
u
[
0
],
v
[
0
]
=
self
.
_numop
(
x
,
self
.
full_matrices
,
self
.
compute_uv
)
def
__str__
(
self
):
return
self
.
_numop
.
__name__
.
capitalize
()
def
svd
(
a
,
full_matrices
=
1
,
compute_uv
=
1
):
"""
This function performs the SVD on CPU.
Parameters :
------------
full_matrices : bool, optional
If True (default), u and v have the shapes (M, M) and (N, N),
respectively.
Otherwise, the shapes are (M, K) and (K, N), respectively,
where K = min(M, N).
compute_uv : bool, optional
Whether or not to compute u and v in addition to s.
True by default.
Returns :
-------
U, V and D matrices.
"""
return
SVD
(
full_matrices
,
compute_uv
)(
a
)
def
test_matrix_inverse_solve
():
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the Solve op."
)
A
=
theano
.
tensor
.
dmatrix
(
'A'
)
b
=
theano
.
tensor
.
dmatrix
(
'b'
)
node
=
matrix_inverse
(
A
)
.
dot
(
b
)
.
owner
[
out
]
=
inv_as_solve
.
transform
(
node
)
assert
isinstance
(
out
.
owner
.
op
,
Solve
)
class
lstsq
(
theano
.
Op
):
def
__eq__
(
self
,
other
):
return
type
(
self
)
==
type
(
other
)
def
__hash__
(
self
):
return
hash
(
type
(
self
))
def
__str__
(
self
):
return
self
.
__class__
.
__name__
def
make_node
(
self
,
x
,
y
,
rcond
):
x
=
theano
.
tensor
.
as_tensor_variable
(
x
)
y
=
theano
.
tensor
.
as_tensor_variable
(
y
)
rcond
=
theano
.
tensor
.
as_tensor_variable
(
rcond
)
return
theano
.
Apply
(
self
,
[
x
,
y
,
rcond
],
[
y
.
type
(),
theano
.
tensor
.
dvector
(),
theano
.
tensor
.
lscalar
(),
theano
.
tensor
.
dvector
()])
def
perform
(
self
,
node
,
inputs
,
outputs
):
x
=
inputs
[
0
]
y
=
inputs
[
1
]
rcond
=
inputs
[
2
]
zz
=
numpy
.
linalg
.
lstsq
(
inputs
[
0
],
inputs
[
1
],
inputs
[
2
])
outputs
[
0
][
0
]
=
zz
[
0
]
outputs
[
1
][
0
]
=
zz
[
1
]
outputs
[
2
][
0
]
=
zz
[
2
]
outputs
[
3
][
0
]
=
zz
[
3
]
\ No newline at end of file
theano/tensor/tests/test_nlinalg.py
0 → 100644
浏览文件 @
65908b64
import
unittest
import
numpy
import
numpy.linalg
from
numpy.testing
import
assert_array_almost_equal
from
numpy.testing
import
dec
,
assert_array_equal
,
assert_allclose
from
numpy
import
inf
import
theano
from
theano
import
tensor
,
function
from
theano.tensor.basic
import
_allclose
from
theano.tests.test_rop
import
break_op
from
theano.tests
import
unittest_tools
as
utt
from
theano
import
config
from
theano.tensor.nlinalg
import
(
MatrixInverse
,
matrix_inverse
,
MatrixPinv
,
pinv
,
AllocDiag
,
alloc_diag
,
ExtractDiag
,
extract_diag
,
diag
,
trace
,
Det
,
det
,
Eig
,
eig
,
Eigh
,
EighGrad
,
eigh
,
matrix_dot
,
_zero_disconnected
,
qr
)
from
nose.plugins.skip
import
SkipTest
from
nose.plugins.attrib
import
attr
from
nose.tools
import
assert_raises
def
test_pseudoinverse_correctness
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
d1
=
rng
.
randint
(
4
)
+
2
d2
=
rng
.
randint
(
4
)
+
2
r
=
rng
.
randn
(
d1
,
d2
)
.
astype
(
theano
.
config
.
floatX
)
x
=
tensor
.
matrix
()
xi
=
pinv
(
x
)
ri
=
function
([
x
],
xi
)(
r
)
assert
ri
.
shape
[
0
]
==
r
.
shape
[
1
]
assert
ri
.
shape
[
1
]
==
r
.
shape
[
0
]
assert
ri
.
dtype
==
r
.
dtype
# Note that pseudoinverse can be quite unprecise so I prefer to compare
# the result with what numpy.linalg returns
assert
_allclose
(
ri
,
numpy
.
linalg
.
pinv
(
r
))
def
test_inverse_correctness
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
4
,
4
)
.
astype
(
theano
.
config
.
floatX
)
x
=
tensor
.
matrix
()
xi
=
matrix_inverse
(
x
)
ri
=
function
([
x
],
xi
)(
r
)
assert
ri
.
shape
==
r
.
shape
assert
ri
.
dtype
==
r
.
dtype
rir
=
numpy
.
dot
(
ri
,
r
)
rri
=
numpy
.
dot
(
r
,
ri
)
assert
_allclose
(
numpy
.
identity
(
4
),
rir
),
rir
assert
_allclose
(
numpy
.
identity
(
4
),
rri
),
rri
def
test_matrix_dot
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
n
=
rng
.
randint
(
4
)
+
2
rs
=
[]
xs
=
[]
for
k
in
xrange
(
n
):
rs
+=
[
rng
.
randn
(
4
,
4
)
.
astype
(
theano
.
config
.
floatX
)]
xs
+=
[
tensor
.
matrix
()]
sol
=
matrix_dot
(
*
xs
)
theano_sol
=
function
(
xs
,
sol
)(
*
rs
)
numpy_sol
=
rs
[
0
]
for
r
in
rs
[
1
:]:
numpy_sol
=
numpy
.
dot
(
numpy_sol
,
r
)
assert
_allclose
(
numpy_sol
,
theano_sol
)
def
test_qr_modes
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
A
=
tensor
.
matrix
(
"A"
,
dtype
=
theano
.
config
.
floatX
)
a
=
rng
.
rand
(
4
,
4
)
.
astype
(
theano
.
config
.
floatX
)
f
=
function
([
A
],
qr
(
A
))
t_qr
=
f
(
a
)
n_qr
=
numpy
.
linalg
.
qr
(
a
)
assert
_allclose
(
n_qr
,
t_qr
)
for
mode
in
[
"reduced"
,
"r"
,
"raw"
,
"full"
,
"economic"
]:
f
=
function
([
A
],
qr
(
A
,
mode
))
t_qr
=
f
(
a
)
n_qr
=
numpy
.
linalg
.
qr
(
a
,
mode
)
if
isinstance
(
n_qr
,
(
list
,
tuple
)):
assert
_allclose
(
n_qr
[
0
],
t_qr
[
0
])
assert
_allclose
(
n_qr
[
1
],
t_qr
[
1
])
else
:
assert
_allclose
(
n_qr
,
t_qr
)
try
:
n_qr
=
numpy
.
linalg
.
qr
(
a
,
"complete"
)
f
=
function
([
A
],
qr
(
A
,
"complete"
))
t_qr
=
f
(
a
)
assert
_allclose
(
n_qr
,
t_qr
)
except
TypeError
,
e
:
assert
"name 'complete' is not defined"
in
str
(
e
)
def
test_svd
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
A
=
tensor
.
matrix
(
"A"
,
dtype
=
theano
.
config
.
floatX
)
U
,
V
,
T
=
svd
(
A
)
fn
=
function
([
A
],
[
U
,
V
,
T
])
a
=
rng
.
rand
(
4
,
4
)
.
astype
(
theano
.
config
.
floatX
)
n_u
,
n_v
,
n_t
=
numpy
.
linalg
.
svd
(
a
)
t_u
,
t_v
,
t_t
=
fn
(
a
)
assert
_allclose
(
n_u
,
t_u
)
assert
_allclose
(
n_v
,
t_v
)
assert
_allclose
(
n_t
,
t_t
)
def
test_inverse_singular
():
singular
=
numpy
.
array
([[
1
,
0
,
0
]]
+
[[
0
,
1
,
0
]]
*
2
,
dtype
=
theano
.
config
.
floatX
)
a
=
tensor
.
matrix
()
f
=
function
([
a
],
matrix_inverse
(
a
))
try
:
f
(
singular
)
except
numpy
.
linalg
.
LinAlgError
:
return
assert
False
def
test_inverse_grad
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
4
,
4
)
tensor
.
verify_grad
(
matrix_inverse
,
[
r
],
rng
=
numpy
.
random
)
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
4
,
4
)
tensor
.
verify_grad
(
matrix_inverse
,
[
r
],
rng
=
numpy
.
random
)
def
test_det
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
5
,
5
)
.
astype
(
config
.
floatX
)
x
=
tensor
.
matrix
()
f
=
theano
.
function
([
x
],
det
(
x
))
assert
numpy
.
allclose
(
numpy
.
linalg
.
det
(
r
),
f
(
r
))
def
test_det_grad
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
5
,
5
)
.
astype
(
config
.
floatX
)
tensor
.
verify_grad
(
det
,
[
r
],
rng
=
numpy
.
random
)
def
test_det_shape
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
5
,
5
)
.
astype
(
config
.
floatX
)
x
=
tensor
.
matrix
()
f
=
theano
.
function
([
x
],
det
(
x
))
f_shape
=
theano
.
function
([
x
],
det
(
x
)
.
shape
)
assert
numpy
.
all
(
f
(
r
)
.
shape
==
f_shape
(
r
))
class
test_diag
(
unittest
.
TestCase
):
"""
Test that linalg.diag has the same behavior as numpy.diag.
numpy.diag has two behaviors:
(1) when given a vector, it returns a matrix with that vector as the
diagonal.
(2) when given a matrix, returns a vector which is the diagonal of the
matrix.
(1) and (2) are tested by test_alloc_diag and test_extract_diag
respectively.
test_diag test makes sure that linalg.diag instantiates
the right op based on the dimension of the input.
"""
def
__init__
(
self
,
name
,
mode
=
None
,
shared
=
tensor
.
_shared
,
floatX
=
None
,
type
=
tensor
.
TensorType
):
self
.
mode
=
mode
self
.
shared
=
shared
if
floatX
is
None
:
floatX
=
config
.
floatX
self
.
floatX
=
floatX
self
.
type
=
type
super
(
test_diag
,
self
)
.
__init__
(
name
)
def
test_alloc_diag
(
self
):
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
x
=
theano
.
tensor
.
vector
()
g
=
alloc_diag
(
x
)
f
=
theano
.
function
([
x
],
g
)
# test "normal" scenario (5x5 matrix) and special cases of 0x0 and 1x1
for
shp
in
[
5
,
0
,
1
]:
m
=
rng
.
rand
(
shp
)
.
astype
(
self
.
floatX
)
v
=
numpy
.
diag
(
m
)
r
=
f
(
m
)
# The right matrix is created
assert
(
r
==
v
)
.
all
()
# Test we accept only vectors
xx
=
theano
.
tensor
.
matrix
()
ok
=
False
try
:
alloc_diag
(
xx
)
except
TypeError
:
ok
=
True
assert
ok
# Test infer_shape
f
=
theano
.
function
([
x
],
g
.
shape
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
if
config
.
mode
!=
'FAST_COMPILE'
:
assert
sum
([
node
.
op
.
__class__
==
AllocDiag
for
node
in
topo
])
==
0
for
shp
in
[
5
,
0
,
1
]:
m
=
rng
.
rand
(
shp
)
.
astype
(
self
.
floatX
)
assert
(
f
(
m
)
==
m
.
shape
)
.
all
()
def
test_alloc_diag_grad
(
self
):
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
x
=
rng
.
rand
(
5
)
tensor
.
verify_grad
(
alloc_diag
,
[
x
],
rng
=
rng
)
def
test_diag
(
self
):
# test that it builds a matrix with given diagonal when using
# vector inputs
x
=
theano
.
tensor
.
vector
()
y
=
diag
(
x
)
assert
y
.
owner
.
op
.
__class__
==
AllocDiag
# test that it extracts the diagonal when using matrix input
x
=
theano
.
tensor
.
matrix
()
y
=
extract_diag
(
x
)
assert
y
.
owner
.
op
.
__class__
==
ExtractDiag
# other types should raise error
x
=
theano
.
tensor
.
tensor3
()
ok
=
False
try
:
y
=
extract_diag
(
x
)
except
TypeError
:
ok
=
True
assert
ok
# not testing the view=True case since it is not used anywhere.
def
test_extract_diag
(
self
):
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
m
=
rng
.
rand
(
2
,
3
)
.
astype
(
self
.
floatX
)
x
=
self
.
shared
(
m
)
g
=
extract_diag
(
x
)
f
=
theano
.
function
([],
g
)
assert
[
isinstance
(
node
.
inputs
[
0
]
.
type
,
self
.
type
)
for
node
in
f
.
maker
.
fgraph
.
toposort
()
if
isinstance
(
node
.
op
,
ExtractDiag
)]
==
[
True
]
for
shp
in
[(
2
,
3
),
(
3
,
2
),
(
3
,
3
),
(
1
,
1
),
(
0
,
0
)]:
m
=
rng
.
rand
(
*
shp
)
.
astype
(
self
.
floatX
)
x
.
set_value
(
m
)
v
=
numpy
.
diag
(
m
)
r
=
f
()
# The right diagonal is extracted
assert
(
r
==
v
)
.
all
()
# Test we accept only matrix
xx
=
theano
.
tensor
.
vector
()
ok
=
False
try
:
extract_diag
(
xx
)
except
TypeError
:
ok
=
True
assert
ok
# Test infer_shape
f
=
theano
.
function
([],
g
.
shape
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
if
config
.
mode
!=
'FAST_COMPILE'
:
assert
sum
([
node
.
op
.
__class__
==
ExtractDiag
for
node
in
topo
])
==
0
for
shp
in
[(
2
,
3
),
(
3
,
2
),
(
3
,
3
)]:
m
=
rng
.
rand
(
*
shp
)
.
astype
(
self
.
floatX
)
x
.
set_value
(
m
)
assert
f
()
==
min
(
shp
)
def
test_extract_diag_grad
(
self
):
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
x
=
rng
.
rand
(
5
,
4
)
.
astype
(
self
.
floatX
)
tensor
.
verify_grad
(
extract_diag
,
[
x
],
rng
=
rng
)
@attr
(
'slow'
)
def
test_extract_diag_empty
(
self
):
c
=
self
.
shared
(
numpy
.
array
([[],
[]],
self
.
floatX
))
f
=
theano
.
function
([],
extract_diag
(
c
),
mode
=
self
.
mode
)
assert
[
isinstance
(
node
.
inputs
[
0
]
.
type
,
self
.
type
)
for
node
in
f
.
maker
.
fgraph
.
toposort
()
if
isinstance
(
node
.
op
,
ExtractDiag
)]
==
[
True
]
def
test_trace
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
x
=
theano
.
tensor
.
matrix
()
g
=
trace
(
x
)
f
=
theano
.
function
([
x
],
g
)
for
shp
in
[(
2
,
3
),
(
3
,
2
),
(
3
,
3
)]:
m
=
rng
.
rand
(
*
shp
)
.
astype
(
config
.
floatX
)
v
=
numpy
.
trace
(
m
)
assert
v
==
f
(
m
)
xx
=
theano
.
tensor
.
vector
()
ok
=
False
try
:
trace
(
xx
)
except
TypeError
:
ok
=
True
assert
ok
class
test_Eig
(
utt
.
InferShapeTester
):
op_class
=
Eig
op
=
eig
dtype
=
'float64'
def
setUp
(
self
):
super
(
test_Eig
,
self
)
.
setUp
()
self
.
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
self
.
A
=
theano
.
tensor
.
matrix
(
dtype
=
self
.
dtype
)
X
=
numpy
.
asarray
(
self
.
rng
.
rand
(
5
,
5
),
dtype
=
self
.
dtype
)
self
.
S
=
X
.
dot
(
X
.
T
)
def
test_infer_shape
(
self
):
A
=
self
.
A
S
=
self
.
S
self
.
_compile_and_check
([
A
],
# theano.function inputs
self
.
op
(
A
),
# theano.function outputs
# S must be square
[
S
],
self
.
op_class
,
warn
=
False
)
def
test_eval
(
self
):
A
=
theano
.
tensor
.
matrix
(
dtype
=
self
.
dtype
)
self
.
assertEquals
([
e
.
eval
({
A
:
[[
1
]]})
for
e
in
self
.
op
(
A
)],
[[
1.0
],
[[
1.0
]]])
x
=
[[
0
,
1
],
[
1
,
0
]]
w
,
v
=
[
e
.
eval
({
A
:
x
})
for
e
in
self
.
op
(
A
)]
assert_array_almost_equal
(
numpy
.
dot
(
x
,
v
),
w
*
v
)
class
test_Eigh
(
test_Eig
):
op
=
staticmethod
(
eigh
)
def
test_uplo
(
self
):
S
=
self
.
S
a
=
theano
.
tensor
.
matrix
(
dtype
=
self
.
dtype
)
wu
,
vu
=
[
out
.
eval
({
a
:
S
})
for
out
in
self
.
op
(
a
,
'U'
)]
wl
,
vl
=
[
out
.
eval
({
a
:
S
})
for
out
in
self
.
op
(
a
,
'L'
)]
assert_array_almost_equal
(
wu
,
wl
)
assert_array_almost_equal
(
vu
*
numpy
.
sign
(
vu
[
0
,
:]),
vl
*
numpy
.
sign
(
vl
[
0
,
:]))
def
test_grad
(
self
):
S
=
self
.
S
utt
.
verify_grad
(
lambda
x
:
self
.
op
(
x
)[
0
],
[
S
],
rng
=
self
.
rng
)
utt
.
verify_grad
(
lambda
x
:
self
.
op
(
x
)[
1
],
[
S
],
rng
=
self
.
rng
)
utt
.
verify_grad
(
lambda
x
:
self
.
op
(
x
,
'U'
)[
0
],
[
S
],
rng
=
self
.
rng
)
utt
.
verify_grad
(
lambda
x
:
self
.
op
(
x
,
'U'
)[
1
],
[
S
],
rng
=
self
.
rng
)
class
test_Eigh_float32
(
test_Eigh
):
dtype
=
'float32'
class
T_lstsq
(
unittest
.
TestCase
):
def
test_correct_solution
(
self
):
x
=
tensor
.
lmatrix
()
y
=
tensor
.
lmatrix
()
z
=
tensor
.
lscalar
()
b
=
theano
.
sandbox
.
linalg
.
ops
.
lstsq
()(
x
,
y
,
z
)
f
=
function
([
x
,
y
,
z
],
b
)
TestMatrix1
=
numpy
.
asarray
([[
2
,
1
],
[
3
,
4
]])
TestMatrix2
=
numpy
.
asarray
([[
17
,
20
],
[
43
,
50
]])
TestScalar
=
numpy
.
asarray
(
1
)
f
=
function
([
x
,
y
,
z
],
b
)
m
=
f
(
TestMatrix1
,
TestMatrix2
,
TestScalar
)
self
.
assertTrue
(
numpy
.
allclose
(
TestMatrix2
,
numpy
.
dot
(
TestMatrix1
,
m
[
0
])))
def
test_wrong_coefficient_matrix
(
self
):
x
=
tensor
.
vector
()
y
=
tensor
.
vector
()
z
=
tensor
.
scalar
()
b
=
theano
.
sandbox
.
linalg
.
ops
.
lstsq
()(
x
,
y
,
z
)
f
=
function
([
x
,
y
,
z
],
b
)
self
.
assertRaises
(
numpy
.
linalg
.
linalg
.
LinAlgError
,
f
,
[
2
,
1
],
[
2
,
1
],
1
)
def
test_wrong_rcond_dimension
(
self
):
x
=
tensor
.
vector
()
y
=
tensor
.
vector
()
z
=
tensor
.
vector
()
b
=
theano
.
sandbox
.
linalg
.
ops
.
lstsq
()(
x
,
y
,
z
)
f
=
function
([
x
,
y
,
z
],
b
)
self
.
assertRaises
(
numpy
.
linalg
.
LinAlgError
,
f
,
[
2
,
1
],
[
2
,
1
],
[
2
,
1
])
theano/tensor/tests/test_slinalg.py
0 → 100644
浏览文件 @
65908b64
import
unittest
import
numpy
import
numpy.linalg
from
numpy.testing
import
assert_array_almost_equal
from
numpy.testing
import
dec
,
assert_array_equal
,
assert_allclose
from
numpy
import
inf
import
theano
from
theano
import
tensor
,
function
from
theano.tensor.basic
import
_allclose
from
theano.tests.test_rop
import
break_op
from
theano.tests
import
unittest_tools
as
utt
from
theano
import
config
from
theano.tensor.slinalg
import
(
Cholesky
,
cholesky
,
CholeskyGrad
,
Solve
,
solve
,
Eigvalsh
,
EigvalshGrad
,
eigvalsh
)
from
nose.plugins.skip
import
SkipTest
from
nose.plugins.attrib
import
attr
from
nose.tools
import
assert_raises
def
check_lower_triangular
(
pd
,
ch_f
):
ch
=
ch_f
(
pd
)
assert
ch
[
0
,
pd
.
shape
[
1
]
-
1
]
==
0
assert
ch
[
pd
.
shape
[
0
]
-
1
,
0
]
!=
0
assert
numpy
.
allclose
(
numpy
.
dot
(
ch
,
ch
.
T
),
pd
)
assert
not
numpy
.
allclose
(
numpy
.
dot
(
ch
.
T
,
ch
),
pd
)
def
check_upper_triangular
(
pd
,
ch_f
):
ch
=
ch_f
(
pd
)
assert
ch
[
4
,
0
]
==
0
assert
ch
[
0
,
4
]
!=
0
assert
numpy
.
allclose
(
numpy
.
dot
(
ch
.
T
,
ch
),
pd
)
assert
not
numpy
.
allclose
(
numpy
.
dot
(
ch
,
ch
.
T
),
pd
)
def
test_cholesky
():
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the Cholesky op."
)
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
5
,
5
)
.
astype
(
config
.
floatX
)
pd
=
numpy
.
dot
(
r
,
r
.
T
)
x
=
tensor
.
matrix
()
chol
=
cholesky
(
x
)
# Check the default.
ch_f
=
function
([
x
],
chol
)
yield
check_lower_triangular
,
pd
,
ch_f
# Explicit lower-triangular.
chol
=
Cholesky
(
lower
=
True
)(
x
)
ch_f
=
function
([
x
],
chol
)
yield
check_lower_triangular
,
pd
,
ch_f
# Explicit upper-triangular.
chol
=
Cholesky
(
lower
=
False
)(
x
)
ch_f
=
function
([
x
],
chol
)
yield
check_upper_triangular
,
pd
,
ch_f
def
test_cholesky_grad
():
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the Cholesky op."
)
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
r
=
rng
.
randn
(
5
,
5
)
.
astype
(
config
.
floatX
)
pd
=
numpy
.
dot
(
r
,
r
.
T
)
eps
=
None
if
config
.
floatX
==
"float64"
:
eps
=
2e-8
# Check the default.
yield
(
lambda
:
utt
.
verify_grad
(
cholesky
,
[
pd
],
3
,
rng
,
eps
=
eps
))
# Explicit lower-triangular.
yield
(
lambda
:
utt
.
verify_grad
(
Cholesky
(
lower
=
True
),
[
pd
],
3
,
rng
,
eps
=
eps
))
# Explicit upper-triangular.
yield
(
lambda
:
utt
.
verify_grad
(
Cholesky
(
lower
=
False
),
[
pd
],
3
,
rng
,
eps
=
eps
))
@attr
(
'slow'
)
def
test_cholesky_and_cholesky_grad_shape
():
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the Cholesky op."
)
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
x
=
tensor
.
matrix
()
for
l
in
(
cholesky
(
x
),
Cholesky
(
lower
=
True
)(
x
),
Cholesky
(
lower
=
False
)(
x
)):
f_chol
=
theano
.
function
([
x
],
l
.
shape
)
g
=
tensor
.
grad
(
l
.
sum
(),
x
)
f_cholgrad
=
theano
.
function
([
x
],
g
.
shape
)
topo_chol
=
f_chol
.
maker
.
fgraph
.
toposort
()
topo_cholgrad
=
f_cholgrad
.
maker
.
fgraph
.
toposort
()
if
config
.
mode
!=
'FAST_COMPILE'
:
assert
sum
([
node
.
op
.
__class__
==
Cholesky
for
node
in
topo_chol
])
==
0
assert
sum
([
node
.
op
.
__class__
==
CholeskyGrad
for
node
in
topo_cholgrad
])
==
0
for
shp
in
[
2
,
3
,
5
]:
m
=
numpy
.
cov
(
rng
.
randn
(
shp
,
shp
+
10
))
.
astype
(
config
.
floatX
)
yield
numpy
.
testing
.
assert_equal
,
f_chol
(
m
),
(
shp
,
shp
)
yield
numpy
.
testing
.
assert_equal
,
f_cholgrad
(
m
),
(
shp
,
shp
)
def
test_eigvalsh
():
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the geigvalsh op."
)
import
scipy.linalg
A
=
theano
.
tensor
.
dmatrix
(
'a'
)
B
=
theano
.
tensor
.
dmatrix
(
'b'
)
f
=
function
([
A
,
B
],
eigvalsh
(
A
,
B
))
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
a
=
rng
.
randn
(
5
,
5
)
a
=
a
+
a
.
T
for
b
in
[
10
*
numpy
.
eye
(
5
,
5
)
+
rng
.
randn
(
5
,
5
)]:
w
=
f
(
a
,
b
)
refw
=
scipy
.
linalg
.
eigvalsh
(
a
,
b
)
numpy
.
testing
.
assert_array_almost_equal
(
w
,
refw
)
# We need to test None separatly, as otherwise DebugMode will
# complain, as this isn't a valid ndarray.
b
=
None
B
=
theano
.
tensor
.
NoneConst
f
=
function
([
A
],
eigvalsh
(
A
,
B
))
w
=
f
(
a
)
refw
=
scipy
.
linalg
.
eigvalsh
(
a
,
b
)
numpy
.
testing
.
assert_array_almost_equal
(
w
,
refw
)
def
test_eigvalsh_grad
():
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
a
=
rng
.
randn
(
5
,
5
)
a
=
a
+
a
.
T
b
=
10
*
numpy
.
eye
(
5
,
5
)
+
rng
.
randn
(
5
,
5
)
tensor
.
verify_grad
(
lambda
a
,
b
:
eigvalsh
(
a
,
b
)
.
dot
([
1
,
2
,
3
,
4
,
5
]),
[
a
,
b
],
rng
=
numpy
.
random
)
class
test_Solve
(
utt
.
InferShapeTester
):
def
setUp
(
self
):
super
(
test_Solve
,
self
)
.
setUp
()
self
.
op_class
=
Solve
self
.
op
=
Solve
()
def
test_infer_shape
(
self
):
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the Cholesky op."
)
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
A
=
theano
.
tensor
.
matrix
()
b
=
theano
.
tensor
.
matrix
()
self
.
_compile_and_check
([
A
,
b
],
# theano.function inputs
[
self
.
op
(
A
,
b
)],
# theano.function outputs
# A must be square
[
numpy
.
asarray
(
rng
.
rand
(
5
,
5
),
dtype
=
config
.
floatX
),
numpy
.
asarray
(
rng
.
rand
(
5
,
1
),
dtype
=
config
.
floatX
)],
self
.
op_class
,
warn
=
False
)
rng
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
())
A
=
theano
.
tensor
.
matrix
()
b
=
theano
.
tensor
.
vector
()
self
.
_compile_and_check
([
A
,
b
],
# theano.function inputs
[
self
.
op
(
A
,
b
)],
# theano.function outputs
# A must be square
[
numpy
.
asarray
(
rng
.
rand
(
5
,
5
),
dtype
=
config
.
floatX
),
numpy
.
asarray
(
rng
.
rand
(
5
),
dtype
=
config
.
floatX
)],
self
.
op_class
,
warn
=
False
)
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论