Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
e5e60a01
提交
e5e60a01
authored
10月 05, 2011
作者:
Ian Goodfellow
提交者:
Olivier Delalleau
10月 06, 2011
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fixed up imports, changed comment
上级
965afb68
隐藏空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
103 行增加
和
86 行删除
+103
-86
test_basic.py
theano/tensor/tests/test_basic.py
+103
-86
没有找到文件。
theano/tensor/tests/test_basic.py
浏览文件 @
e5e60a01
...
...
@@ -9,10 +9,24 @@ import numpy
from
numpy.testing
import
dec
from
numpy.testing.noseclasses
import
KnownFailureTest
from
theano.tensor
import
*
from
theano.tensor
import
_shared
from
theano.tensor
import
basic
as
tensor
# for hidden symbols
from
theano.tensor
import
inplace
import
theano.tensor
as
T
from
theano.tensor
import
(
wvector
,
bvector
,
autocast_float_as
,
argmin
,
max_and_argmax
,
cscalar
,
Subtensor
,
ctensor3
,
join
,
horizontal_stack
,
vertical_stack
,
argmax
,
get_vector_length
,
fscalar
,
zeros_like
,
sum
,
tensor3
,
vector
,
izip
,
add
,
addbroadcast
,
alloc
,
as_tensor_variable
,
tensor_from_scalar
,
ARange
,
autocast_float
,
basic
,
clip
,
constant
,
default
,
dot
,
inc_subtensor
,
set_subtensor
,
dmatrix
,
dscalar
,
dvector
,
eq
,
eye
,
fill
,
flatten
,
inverse_permutation
,
tensor4
,
permute_row_elements
,
Flatten
,
fmatrix
,
fscalars
,
grad
,
inplace
,
iscalar
,
matrix
,
minimum
,
matrices
,
maximum
,
mul
,
neq
,
Reshape
,
row
,
scalar
,
scalars
,
second
,
smallest
,
stack
,
sub
,
Tensor
,
tensor_copy
,
tensordot
,
tensordot_grad
,
TensorType
,
unbroadcast
,
var
,
value
,
Join
,
shape
,
MaxAndArgmax
,
lscalar
,
zvector
,
exp
,
get_constant_value
,
ivector
,
reshape
,
scalar_from_tensor
,
scal
,
iscalars
,
arange
,
dscalars
,
fvector
,
imatrix
,
numeric_grad
,
opt
,
ComplexError
,
TensorDot
,
lvector
,
true_div
,
max
,
min
)
import
warnings
from
copy
import
copy
from
theano
import
compile
,
config
...
...
@@ -22,8 +36,9 @@ from theano.gof.python25 import any, all, combinations
from
theano.compile.mode
import
get_default_mode
from
theano
import
function
from
theano.tests
import
unittest_tools
as
utt
import
theano.tensor
as
T
import
theano
import
logging
imported_scipy_special
=
False
mode_no_scipy
=
get_default_mode
()
...
...
@@ -498,7 +513,7 @@ _good_broadcast_div_mod_normal_float = dict(empty2 = (numpy.asarray([0]), numpy.
def
no_complex
(
d
):
"""Remove pairs from dictionary d when the value contains complex data."""
return
dict
((
k
,
v
)
for
k
,
v
in
d
.
iteritems
()
if
all
(
str
(
x
.
dtype
)
not
in
tensor
.
complex_dtypes
for
x
in
v
))
if
all
(
str
(
x
.
dtype
)
not
in
basic
.
complex_dtypes
for
x
in
v
))
# 'No-complex' versions.
...
...
@@ -526,7 +541,7 @@ if config.floatX=='float32':
# float32.
# This is probably caused by our way of computing the gradient error.
div_grad_rtol
=
0.025
TrueDivTester
=
makeBroadcastTester
(
op
=
true_div
,
TrueDivTester
=
makeBroadcastTester
(
op
=
T
.
true_div
,
expected
=
lambda
x
,
y
:
check_floatX
((
x
,
y
),
x
/
y
),
good
=
_good_broadcast_div_mod_normal_float
,
# integers = (randint(2, 3), randint_nonzero(2, 3)),
...
...
@@ -542,7 +557,7 @@ TrueDivInplaceTester = makeBroadcastTester(op = inplace.true_div_inplace,
grad_rtol
=
div_grad_rtol
,
inplace
=
True
)
ModTester
=
makeBroadcastTester
(
op
=
mod
,
ModTester
=
makeBroadcastTester
(
op
=
T
.
mod
,
expected
=
lambda
x
,
y
:
numpy
.
asarray
(
x
%
y
,
dtype
=
theano
.
scalar
.
basic
.
upcast
(
x
.
dtype
,
y
.
dtype
)),
good
=
_good_broadcast_div_mod_normal_float_no_complex
,
# integers = (randint(2, 3), randint_nonzero(2, 3)),
...
...
@@ -625,7 +640,7 @@ _grad_broadcast_unary_normal = dict(normal = (numpy.asarray(rand_ranged(-5, 5, (
AbsTester
=
makeBroadcastTester
(
op
=
tensor
.
abs_
,
AbsTester
=
makeBroadcastTester
(
op
=
basic
.
abs_
,
expected
=
lambda
x
:
abs
(
x
),
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
...
...
@@ -638,7 +653,7 @@ AbsInplaceTester = makeBroadcastTester(op = inplace.abs__inplace,
grad
=
_grad_broadcast_unary_normal
,
inplace
=
True
)
NegTester
=
makeBroadcastTester
(
op
=
neg
,
NegTester
=
makeBroadcastTester
(
op
=
T
.
neg
,
expected
=
lambda
x
:
-
x
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
...
...
@@ -648,7 +663,7 @@ NegInplaceTester = makeBroadcastTester(op = inplace.neg_inplace,
grad
=
_grad_broadcast_unary_normal
,
inplace
=
True
)
SgnTester
=
makeBroadcastTester
(
op
=
sgn
,
SgnTester
=
makeBroadcastTester
(
op
=
T
.
sgn
,
expected
=
numpy
.
sign
,
good
=
_good_broadcast_unary_normal_no_complex
,
grad
=
_grad_broadcast_unary_normal
,)
...
...
@@ -657,7 +672,7 @@ SgnInplaceTester = makeBroadcastTester(op = inplace.sgn_inplace,
good
=
_good_broadcast_unary_normal_no_complex
,
grad
=
_grad_broadcast_unary_normal
,
inplace
=
True
)
CeilTester
=
makeBroadcastTester
(
op
=
ceil
,
CeilTester
=
makeBroadcastTester
(
op
=
T
.
ceil
,
expected
=
lambda
a
:
numpy
.
asarray
(
numpy
.
ceil
(
a
),
a
.
dtype
),
good
=
_good_broadcast_unary_normal_no_complex
,
grad
=
_grad_broadcast_unary_normal
)
...
...
@@ -667,7 +682,7 @@ CeilInplaceTester = makeBroadcastTester(op = inplace.ceil_inplace,
grad
=
_grad_broadcast_unary_normal
,
inplace
=
True
)
FloorTester
=
makeBroadcastTester
(
op
=
floor
,
FloorTester
=
makeBroadcastTester
(
op
=
T
.
floor
,
expected
=
lambda
a
:
numpy
.
asarray
(
numpy
.
floor
(
a
),
a
.
dtype
),
good
=
_good_broadcast_unary_normal_no_complex
,
grad
=
_grad_broadcast_unary_normal
)
...
...
@@ -677,7 +692,7 @@ FloorInplaceTester = makeBroadcastTester(op = inplace.floor_inplace,
grad
=
_grad_broadcast_unary_normal
,
inplace
=
True
)
RoundHalfToEvenTester
=
makeBroadcastTester
(
op
=
round_half_to_even
,
RoundHalfToEvenTester
=
makeBroadcastTester
(
op
=
T
.
round_half_to_even
,
expected
=
numpy
.
round
,
good
=
_good_broadcast_unary_normal_float_no_complex
)
# TODO: Why complex are accepted in the next one?
...
...
@@ -689,7 +704,7 @@ RoundHalfToEvenInplaceTester = makeBroadcastTester(op = inplace.round_half_to_ev
#numpy.vectorize don't handle correctly empty ndarray.
#see in their file numpy/lib/function_base.py in class vectorize.__call__
#This happen in float32 mode.
RoundHalfAwayFromZeroTester
=
makeBroadcastTester
(
op
=
round_half_away_from_zero
,
RoundHalfAwayFromZeroTester
=
makeBroadcastTester
(
op
=
T
.
round_half_away_from_zero
,
expected
=
theano
.
scalar
.
basic
.
round_half_away_from_zero_vec
,
good
=
_good_broadcast_unary_normal_float_no_empty_no_complex
)
#_good_broadcast_unary_normal_float)
RoundHalfAwayFromZeroInplaceTester
=
makeBroadcastTester
(
op
=
inplace
.
round_half_away_from_zero_inplace
,
...
...
@@ -697,7 +712,7 @@ RoundHalfAwayFromZeroInplaceTester = makeBroadcastTester(op = inplace.round_half
good
=
_good_broadcast_unary_normal_float_no_empty_no_complex
,
inplace
=
True
)
SqrTester
=
makeBroadcastTester
(
op
=
sqr
,
SqrTester
=
makeBroadcastTester
(
op
=
T
.
sqr
,
expected
=
numpy
.
square
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
...
...
@@ -707,7 +722,7 @@ SqrInplaceTester = makeBroadcastTester(op = inplace.sqr_inplace,
grad
=
_grad_broadcast_unary_normal
,
inplace
=
True
)
ExpTester
=
makeBroadcastTester
(
op
=
exp
,
ExpTester
=
makeBroadcastTester
(
op
=
T
.
exp
,
expected
=
numpy
.
exp
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
...
...
@@ -729,7 +744,7 @@ _grad_broadcast_unary_positive = dict(normal = (rand_ranged(0.001, 5, (2, 3)),),
#empty = (numpy.asarray([]),),
)
LogTester
=
makeBroadcastTester
(
op
=
log
,
LogTester
=
makeBroadcastTester
(
op
=
T
.
log
,
expected
=
numpy
.
log
,
good
=
_good_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
)
...
...
@@ -739,7 +754,7 @@ LogInplaceTester = makeBroadcastTester(op = inplace.log_inplace,
grad
=
_grad_broadcast_unary_positive
,
inplace
=
True
)
Log2Tester
=
makeBroadcastTester
(
op
=
log2
,
Log2Tester
=
makeBroadcastTester
(
op
=
T
.
log2
,
expected
=
numpy
.
log2
,
good
=
_good_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
)
...
...
@@ -749,7 +764,7 @@ Log2InplaceTester = makeBroadcastTester(op = inplace.log2_inplace,
grad
=
_grad_broadcast_unary_positive
,
inplace
=
True
)
Log10Tester
=
makeBroadcastTester
(
op
=
log10
,
Log10Tester
=
makeBroadcastTester
(
op
=
T
.
log10
,
expected
=
numpy
.
log10
,
good
=
_good_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
)
...
...
@@ -759,7 +774,7 @@ Log10InplaceTester = makeBroadcastTester(op = inplace.log10_inplace,
grad
=
_grad_broadcast_unary_positive
,
inplace
=
True
)
Log1pTester
=
makeBroadcastTester
(
op
=
log1p
,
Log1pTester
=
makeBroadcastTester
(
op
=
T
.
log1p
,
expected
=
numpy
.
log1p
,
good
=
_good_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
)
...
...
@@ -770,7 +785,7 @@ Log1pInplaceTester = makeBroadcastTester(op = inplace.log1p_inplace,
inplace
=
True
)
SqrtTester
=
makeBroadcastTester
(
op
=
sqrt
,
SqrtTester
=
makeBroadcastTester
(
op
=
T
.
sqrt
,
expected
=
numpy
.
sqrt
,
good
=
_good_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
)
...
...
@@ -803,7 +818,7 @@ _grad_broadcast_unary_arccos = dict(normal = (rand_ranged(-1.+1e-7, 1-1e-7, (2,
)
SinTester
=
makeBroadcastTester
(
op
=
sin
,
SinTester
=
makeBroadcastTester
(
op
=
T
.
sin
,
expected
=
numpy
.
sin
,
good
=
_good_broadcast_unary_wide
,
grad
=
_grad_broadcast_unary_wide
)
...
...
@@ -813,7 +828,7 @@ SinInplaceTester = makeBroadcastTester(op = inplace.sin_inplace,
grad
=
_grad_broadcast_unary_wide
,
inplace
=
True
)
CosTester
=
makeBroadcastTester
(
op
=
cos
,
CosTester
=
makeBroadcastTester
(
op
=
T
.
cos
,
expected
=
numpy
.
cos
,
good
=
_good_broadcast_unary_wide
,
grad
=
_grad_broadcast_unary_wide
)
...
...
@@ -822,7 +837,7 @@ CosInplaceTester = makeBroadcastTester(op = inplace.cos_inplace,
good
=
_good_broadcast_unary_wide
,
grad
=
_grad_broadcast_unary_wide
,
inplace
=
True
)
ArccosTester
=
makeBroadcastTester
(
op
=
arccos
,
ArccosTester
=
makeBroadcastTester
(
op
=
T
.
arccos
,
expected
=
numpy
.
arccos
,
good
=
_good_broadcast_unary_arccos
,
grad
=
_grad_broadcast_unary_arccos
)
...
...
@@ -837,7 +852,7 @@ if config.floatX=='float32':
#We raise the relative tolerence for the grad as their is error in float32
#This is probably caused by our way of computing the gradient error.
tan_grad_rtol
=
0.052
TanTester
=
makeBroadcastTester
(
op
=
tan
,
TanTester
=
makeBroadcastTester
(
op
=
T
.
tan
,
expected
=
numpy
.
tan
,
good
=
dict
(
normal
=
(
rand_ranged
(
-
3.14
,
3.14
,
(
2
,
3
)),),
shifted
=
(
rand_ranged
(
3.15
,
6.28
,
(
2
,
3
)),)),
...
...
@@ -854,7 +869,7 @@ TanInplaceTester = makeBroadcastTester(op = inplace.tan_inplace,
inplace
=
True
)
CoshTester
=
makeBroadcastTester
(
op
=
cosh
,
CoshTester
=
makeBroadcastTester
(
op
=
T
.
cosh
,
expected
=
numpy
.
cosh
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
...
...
@@ -864,7 +879,7 @@ CoshInplaceTester = makeBroadcastTester(op = inplace.cosh_inplace,
grad
=
_grad_broadcast_unary_normal
,
inplace
=
True
)
SinhTester
=
makeBroadcastTester
(
op
=
sinh
,
SinhTester
=
makeBroadcastTester
(
op
=
T
.
sinh
,
expected
=
numpy
.
sinh
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
...
...
@@ -874,7 +889,7 @@ SinhInplaceTester = makeBroadcastTester(op = inplace.sinh_inplace,
grad
=
_grad_broadcast_unary_normal
,
inplace
=
True
)
TanhTester
=
makeBroadcastTester
(
op
=
tanh
,
TanhTester
=
makeBroadcastTester
(
op
=
T
.
tanh
,
expected
=
numpy
.
tanh
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
...
...
@@ -904,7 +919,7 @@ else:
expected_erfc
=
[]
skip_scipy
=
"scipy is not present"
ErfTester
=
makeBroadcastTester
(
op
=
erf
,
ErfTester
=
makeBroadcastTester
(
op
=
T
.
erf
,
expected
=
expected_erf
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
...
...
@@ -920,7 +935,7 @@ ErfInplaceTester = makeBroadcastTester(op = inplace.erf_inplace,
inplace
=
True
,
skip
=
skip_scipy
)
ErfcTester
=
makeBroadcastTester
(
op
=
erfc
,
ErfcTester
=
makeBroadcastTester
(
op
=
T
.
erfc
,
expected
=
expected_erfc
,
good
=
_good_broadcast_unary_normal_no_int_no_complex
,
grad
=
_grad_broadcast_unary_normal
,
...
...
@@ -936,12 +951,12 @@ ErfcInplaceTester = makeBroadcastTester(op = inplace.erfc_inplace,
inplace
=
True
,
skip
=
skip_scipy
)
ZerosLikeTester
=
makeBroadcastTester
(
op
=
zeros_like
,
ZerosLikeTester
=
makeBroadcastTester
(
op
=
T
.
zeros_like
,
expected
=
numpy
.
zeros_like
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
OnesLikeTester
=
makeBroadcastTester
(
op
=
ones_like
,
OnesLikeTester
=
makeBroadcastTester
(
op
=
T
.
ones_like
,
expected
=
numpy
.
ones_like
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
...
...
@@ -1082,16 +1097,16 @@ Alloc13GradTester = makeBroadcastTester(
def
test_eye
():
def
check
(
dtype
,
N
,
M_
=
None
,
k
=
0
):
# Theano does not accept None as a
tensor
.
# Theano does not accept None as a
basic
.
# So we must use a real value.
M
=
M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if
M
is
None
and
theano
.
config
.
mode
in
[
'DebugMode'
,
'DEBUG_MODE'
]:
M
=
N
N_symb
=
tensor
.
iscalar
()
M_symb
=
tensor
.
iscalar
()
k_symb
=
tensor
.
iscalar
()
N_symb
=
basic
.
iscalar
()
M_symb
=
basic
.
iscalar
()
k_symb
=
basic
.
iscalar
()
f
=
function
([
N_symb
,
M_symb
,
k_symb
],
eye
(
N_symb
,
M_symb
,
k_symb
,
dtype
=
dtype
))
result
=
f
(
N
,
M
,
k
)
...
...
@@ -1115,7 +1130,7 @@ def test_eye():
def
test_identity
():
def
check
(
dtype
):
obj
=
rand_of_dtype
((
2
,),
dtype
)
sym
=
tensor
.
vector
(
dtype
=
dtype
)
sym
=
basic
.
vector
(
dtype
=
dtype
)
f
=
function
([
sym
],
tensor_copy
(
sym
))
assert
numpy
.
all
(
obj
==
f
(
obj
))
assert
obj
.
dtype
==
f
(
obj
)
.
dtype
...
...
@@ -1137,16 +1152,16 @@ class CastTester(unittest.TestCase):
(
rand_of_dtype
((
2
,),
dtype
),
dtype
))
for
dtype
in
ALL_DTYPES
])
for
testname
,
(
obj
,
dtype
)
in
good
:
inp
=
tensor
.
vector
(
dtype
=
obj
.
dtype
)
out
=
tensor
.
cast
(
inp
,
dtype
=
dtype
)
inp
=
basic
.
vector
(
dtype
=
obj
.
dtype
)
out
=
basic
.
cast
(
inp
,
dtype
=
dtype
)
f
=
function
([
inp
],
out
)
assert
f
(
obj
)
.
dtype
==
numpy
.
dtype
(
dtype
)
def
test_cast_from_real_to_complex
(
self
):
for
real_dtype
in
REAL_DTYPES
:
for
complex_dtype
in
COMPLEX_DTYPES
:
inp
=
tensor
.
vector
(
dtype
=
real_dtype
)
out
=
tensor
.
cast
(
inp
,
dtype
=
complex_dtype
)
inp
=
basic
.
vector
(
dtype
=
real_dtype
)
out
=
basic
.
cast
(
inp
,
dtype
=
complex_dtype
)
f
=
function
([
inp
],
out
)
obj
=
rand_of_dtype
((
2
,
),
real_dtype
)
assert
f
(
obj
)
.
dtype
==
numpy
.
dtype
(
complex_dtype
)
...
...
@@ -1154,8 +1169,8 @@ class CastTester(unittest.TestCase):
def
test_cast_from_complex_to_real_raises_error
(
self
):
for
real_dtype
in
REAL_DTYPES
:
for
complex_dtype
in
COMPLEX_DTYPES
:
inp
=
tensor
.
vector
(
dtype
=
real_dtype
)
self
.
assertRaises
(
TypeError
,
tensor
.
cast
(
inp
,
dtype
=
complex_dtype
))
inp
=
basic
.
vector
(
dtype
=
real_dtype
)
self
.
assertRaises
(
TypeError
,
basic
.
cast
(
inp
,
dtype
=
complex_dtype
))
ClipTester
=
makeTester
(
name
=
'ClipTester'
,
op
=
clip
,
...
...
@@ -1189,9 +1204,9 @@ ClipTester = makeTester(name='ClipTester',
class
T_Clip
(
unittest
.
TestCase
):
def
test_complex_value
(
self
):
for
dtype
in
[
'complex64'
,
'complex128'
]:
a
=
tensor
.
vector
(
dtype
=
dtype
)
b
=
tensor
.
scalar
()
c
=
tensor
.
scalar
()
a
=
basic
.
vector
(
dtype
=
dtype
)
b
=
basic
.
scalar
()
c
=
basic
.
scalar
()
self
.
assertRaises
(
TypeError
,
clip
,
a
,
b
,
c
)
#TODO: consider moving this function / functionality to gradient.py
...
...
@@ -1289,7 +1304,7 @@ def test_nan_inf_constant_signature():
assert
(
x
.
signature
()
==
y
.
signature
())
==
(
i
==
j
)
# Also test that nan !=0 and nan != nan.
x
=
tensor
.
scalar
()
x
=
basic
.
scalar
()
mode
=
get_default_mode
()
if
isinstance
(
mode
,
theano
.
compile
.
debugmode
.
DebugMode
):
# Disable the check preventing usage of NaN / Inf values.
...
...
@@ -1818,10 +1833,10 @@ class T_subtensor(unittest.TestCase):
This is build in a way that allow to reuse it to test the equivalent gpu op.
"""
def
__init__
(
self
,
name
,
shared
=
_shared
,
sub
=
theano
.
tensor
.
basic
.
Subtensor
,
inc_sub
=
theano
.
tensor
.
basic
.
IncSubtensor
,
adv_sub1
=
theano
.
tensor
.
basic
.
AdvancedSubtensor1
,
adv_incsub1
=
theano
.
tensor
.
basic
.
AdvancedIncSubtensor1
,
sub
=
basic
.
Subtensor
,
inc_sub
=
basic
.
IncSubtensor
,
adv_sub1
=
basic
.
AdvancedSubtensor1
,
adv_incsub1
=
basic
.
AdvancedIncSubtensor1
,
mode
=
None
,
dtype
=
theano
.
config
.
floatX
,
ignore_topo
=
(
theano
.
compile
.
function_module
.
DeepCopyOp
)):
...
...
@@ -2125,7 +2140,7 @@ class T_subtensor(unittest.TestCase):
self
.
assertTrue
(
numpy
.
allclose
(
val
,
good
),
(
val
,
good
))
# Test reuse of output memory
if
isinstance
(
self
.
adv_sub1
,
tensor
.
AdvancedSubtensor1
):
if
isinstance
(
self
.
adv_sub1
,
basic
.
AdvancedSubtensor1
):
op
=
self
.
adv_sub1
()
# When idx is a TensorConstant.
if
hasattr
(
idx
,
"data"
):
...
...
@@ -2164,7 +2179,7 @@ class T_subtensor(unittest.TestCase):
def
test_adv_sub1_broadcast
(
self
):
ones
=
numpy
.
ones
((
1
,
3
),
dtype
=
self
.
dtype
)
n
=
self
.
shared
(
ones
*
5
,
broadcastable
=
(
True
,
False
))
idx
=
tensor
.
lvector
()
idx
=
basic
.
lvector
()
t
=
n
[
idx
]
self
.
assertTrue
(
isinstance
(
t
.
owner
.
op
,
theano
.
tensor
.
basic
.
AdvancedSubtensor1
))
...
...
@@ -2566,8 +2581,8 @@ class T_Join_and_Split(unittest.TestCase):
def
test_stack_scalar_make_vector
(
self
):
'''Test that calling stack() on scalars instantiates MakeVector,
not Join. Test that the floatX dtype stay floatX, not downcasted to int64'''
a
=
tensor
.
scalar
(
'a'
)
b
=
tensor
.
scalar
(
'b'
)
a
=
basic
.
scalar
(
'a'
)
b
=
basic
.
scalar
(
'b'
)
s
=
stack
(
a
,
b
,
a
,
b
)
f
=
function
([
a
,
b
],
s
)
val
=
f
(
1
,
2
)
...
...
@@ -2581,8 +2596,8 @@ class T_Join_and_Split(unittest.TestCase):
def
test_stack_scalar_make_vector_dtype
(
self
):
'''Test that calling stack() on scalars instantiates MakeVector,
event when the scalar don't have the same dtype.'''
a
=
tensor
.
iscalar
(
'a'
)
b
=
tensor
.
lscalar
(
'b'
)
a
=
basic
.
iscalar
(
'a'
)
b
=
basic
.
lscalar
(
'b'
)
s
=
stack
(
a
,
b
,
a
,
b
)
f
=
function
([
a
,
b
],
s
)
val
=
f
(
1
,
2
)
...
...
@@ -2595,8 +2610,8 @@ class T_Join_and_Split(unittest.TestCase):
def
test_stack_scalar_make_vector_constant
(
self
):
'''Test that calling stack() on scalars instantiates MakeVector,
event when the scalar are simple int type.'''
a
=
tensor
.
iscalar
(
'a'
)
b
=
tensor
.
lscalar
(
'b'
)
a
=
basic
.
iscalar
(
'a'
)
b
=
basic
.
lscalar
(
'b'
)
#test when the constant is the first element.
#The first element is used in a special way
s
=
stack
(
10
,
a
,
b
,
numpy
.
int8
(
3
))
...
...
@@ -2860,7 +2875,7 @@ class T_Join_and_Split(unittest.TestCase):
if
theano
.
config
.
mode
!=
'FAST_COMPILE'
:
for
node
in
f
.
maker
.
env
.
toposort
():
assert
not
isinstance
(
node
.
op
,
tensor
.
Join
)
assert
not
isinstance
(
node
.
op
,
basic
.
Join
)
# Test dim 1
z
=
join
(
1
,
x1
,
x2
,
x3
)
...
...
@@ -2870,7 +2885,7 @@ class T_Join_and_Split(unittest.TestCase):
if
theano
.
config
.
mode
!=
'FAST_COMPILE'
:
for
node
in
f
.
maker
.
env
.
toposort
():
assert
not
isinstance
(
node
.
op
,
tensor
.
Join
)
assert
not
isinstance
(
node
.
op
,
basic
.
Join
)
# Test hide error
if
theano
.
config
.
mode
in
[
'DebugMode'
,
'DEBUG_MODE'
,
'FAST_COMPILE'
]:
...
...
@@ -3020,7 +3035,7 @@ class T_add(unittest.TestCase):
class
T_ceil
(
unittest
.
TestCase
):
def
test_complex
(
self
):
self
.
assertRaises
(
TypeError
,
ceil
,
zvector
())
self
.
assertRaises
(
TypeError
,
T
.
ceil
,
T
.
zvector
())
class
T_exp
(
unittest
.
TestCase
):
def
test_grad_0
(
self
):
...
...
@@ -3653,7 +3668,7 @@ class test_grad(unittest.TestCase):
"""grad: Test passing a single variable param"""
o
=
test_grad
.
O
()
a1
=
o
.
make_node
()
self
.
assertTrue
(
o
.
gval0
is
grad
(
a1
.
outputs
[
0
],
a1
.
inputs
[
0
]))
self
.
assertTrue
(
o
.
gval0
is
T
.
grad
(
a1
.
outputs
[
0
],
a1
.
inputs
[
0
]))
def
test_Nparam
(
self
):
"""grad: Test passing multiple variable params"""
...
...
@@ -3667,8 +3682,10 @@ class test_grad(unittest.TestCase):
def
test_grad_keep_type
(
self
):
"""Tests that the theano grad method returns a list if it is passed a list
and a single variable if it is passed a single variable.
pylearn2 depends on theano behaving this way but theano developers have
repeatedly changed it """
pylearn2 depends on theano behaving this way. This functionality has been
added three times and erroneously removed twice. If you do anything that
requires changing this test or making it fail you are almost certainly
making a common mistake, NOT fixing something. """
X
=
T
.
matrix
()
y
=
X
.
sum
()
...
...
@@ -3838,7 +3855,7 @@ class T_reshape(unittest.TestCase):
def
test_make_column_matrix_broadcastable
():
# The goal of the operation made by `b` is to ensure the second dimension
# of the column matrix is broadcastable.
a
=
dmatrix
()
a
=
T
.
dmatrix
()
b
=
a
.
reshape
((
a
.
shape
[
0
],
))
.
dimshuffle
(
0
,
'x'
)
f
=
function
([
a
],
b
)
assert
(
f
(
numpy
.
zeros
((
3
,
1
)))
+
numpy
.
ones
(
2
)
==
numpy
.
ones
((
3
,
2
)))
.
all
()
...
...
@@ -4774,7 +4791,7 @@ def _test_autocast_numpy():
assert
config
.
cast_policy
==
'numpy'
# Go through some typical scalar values.
def
ok
(
z
):
assert
tensor
.
constant
(
z
)
.
dtype
==
numpy
.
asarray
(
z
)
.
dtype
assert
basic
.
constant
(
z
)
.
dtype
==
numpy
.
asarray
(
z
)
.
dtype
for
x
in
([
2
**
i
for
i
in
xrange
(
63
)]
+
[
0
]
+
[
0.
,
1.
,
1.1
,
1.5
]):
...
...
@@ -4796,9 +4813,9 @@ def _test_autocast_numpy_floatX():
floatX
==
'float32'
and
not
hasattr
(
z
,
'dtype'
)):
# Special case where we use 'float32' instead of 'float64'.
assert
tensor
.
constant
(
z
)
.
dtype
==
'float32'
assert
basic
.
constant
(
z
)
.
dtype
==
'float32'
else
:
assert
tensor
.
constant
(
z
)
.
dtype
==
numpy
.
asarray
(
z
)
.
dtype
assert
basic
.
constant
(
z
)
.
dtype
==
numpy
.
asarray
(
z
)
.
dtype
try
:
# Test with various values of `config.floatX`.
for
floatX
in
(
'float32'
,
'float64'
):
...
...
@@ -4837,9 +4854,9 @@ class test_arithmetic_cast(unittest.TestCase):
# scalar == scalar stored as a 0d array
# array == 1d array
# i_scalar == scalar type used internally by Theano
theano_scalar
=
lambda
dtype
:
tensor
.
scalar
(
dtype
=
str
(
dtype
))
theano_scalar
=
lambda
dtype
:
basic
.
scalar
(
dtype
=
str
(
dtype
))
numpy_scalar
=
lambda
dtype
:
numpy
.
array
(
1
,
dtype
=
dtype
)
theano_array
=
lambda
dtype
:
tensor
.
vector
(
dtype
=
str
(
dtype
))
theano_array
=
lambda
dtype
:
basic
.
vector
(
dtype
=
str
(
dtype
))
numpy_array
=
lambda
dtype
:
numpy
.
array
([
1
],
dtype
=
dtype
)
theano_i_scalar
=
lambda
dtype
:
theano
.
scalar
.
Scalar
(
str
(
dtype
))()
numpy_i_scalar
=
numpy_scalar
...
...
@@ -4860,8 +4877,8 @@ class test_arithmetic_cast(unittest.TestCase):
# special way (depending on `config.int_division`).
is_int_division
=
(
op
is
operator
.
div
and
a_type
in
tensor
.
discrete_dtypes
and
b_type
in
tensor
.
discrete_dtypes
)
a_type
in
basic
.
discrete_dtypes
and
b_type
in
basic
.
discrete_dtypes
)
# We will test all meaningful combinations of
# scalar and array operations.
for
combo
in
(
...
...
@@ -5076,10 +5093,10 @@ def test_mod_compile():
The c_code generated is not compiling as of 30 June 2010. I fix the compilation in the same commit.
"""
x
=
tensor
.
vector
()
y
=
tensor
.
vector
()
x
=
basic
.
vector
()
y
=
basic
.
vector
()
shape
=
x
.
shape
out
=
tensor
.
switch
(
tensor
.
eq
(
3
%
x
.
shape
[
0
],
0
),
y
,
y
[:
-
1
])
out
=
basic
.
switch
(
basic
.
eq
(
3
%
x
.
shape
[
0
],
0
),
y
,
y
[:
-
1
])
f
=
theano
.
function
([
x
,
y
],
out
)
...
...
@@ -5097,7 +5114,7 @@ def test_unalign():
b
[:]
=
numpy
.
random
.
rand
(
len
(
b
))
out_numpy
=
2
*
a
+
3
*
b
av
,
bv
=
tensor
.
vectors
(
'ab'
)
av
,
bv
=
basic
.
vectors
(
'ab'
)
f
=
theano
.
function
([
av
,
bv
],
2
*
av
+
3
*
bv
)
f
.
maker
.
env
.
toposort
()
# FAST_COMPILE use the python code that support unaligned data
...
...
@@ -5130,28 +5147,28 @@ def test_dimshuffle_duplicate():
class
T_get_constant_value
(
unittest
.
TestCase
):
def
test_get_constant_value
(
self
):
a
=
tensor
.
stack
(
1
,
2
,
3
)
a
=
basic
.
stack
(
1
,
2
,
3
)
assert
get_constant_value
(
a
[
0
])
==
1
assert
get_constant_value
(
a
[
1
])
==
2
assert
get_constant_value
(
a
[
2
])
==
3
b
=
tensor
.
iscalar
()
a
=
tensor
.
stack
(
b
,
2
,
3
)
b
=
basic
.
iscalar
()
a
=
basic
.
stack
(
b
,
2
,
3
)
self
.
assertRaises
(
TypeError
,
get_constant_value
,
a
[
0
])
assert
get_constant_value
(
a
[
1
])
==
2
assert
get_constant_value
(
a
[
2
])
==
3
# For now get_constant_value goes through only MakeVector and Join of
# scalars.
v
=
tensor
.
ivector
()
a
=
tensor
.
stack
(
v
,
2
,
3
)
v
=
basic
.
ivector
()
a
=
basic
.
stack
(
v
,
2
,
3
)
self
.
assertRaises
(
TypeError
,
get_constant_value
,
a
[
0
])
self
.
assertRaises
(
TypeError
,
get_constant_value
,
a
[
1
])
self
.
assertRaises
(
TypeError
,
get_constant_value
,
a
[
2
])
# Test the case SubTensor(Shape(v)) when the dimensions
# is broadcastable.
v
=
tensor
.
row
()
v
=
basic
.
row
()
assert
get_constant_value
(
v
.
shape
[
0
])
==
1
def
test_subtensor_of_constant
(
self
):
...
...
@@ -5198,17 +5215,17 @@ class test_size(unittest.TestCase):
"""
def
test_matrix
(
self
):
x
=
tensor
.
matrix
()
x
=
basic
.
matrix
()
y
=
numpy
.
zeros
((
5
,
7
),
dtype
=
config
.
floatX
)
assert
y
.
size
==
function
([
x
],
x
.
size
)(
y
)
def
test_vector
(
self
):
x
=
tensor
.
vector
()
x
=
basic
.
vector
()
y
=
numpy
.
zeros
(
7
,
dtype
=
config
.
floatX
)
assert
y
.
size
==
function
([
x
],
x
.
size
)(
y
)
def
test_scalar
(
self
):
x
=
tensor
.
scalar
()
x
=
basic
.
scalar
()
y
=
numpy
.
array
(
7
,
dtype
=
config
.
floatX
)
assert
y
.
size
==
function
([
x
],
x
.
size
)(
y
)
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论