Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
93b50c70
提交
93b50c70
authored
2月 05, 2016
作者:
Vincent Michalski
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
added unittests checking for stack trace for nnet.py optimizations
上级
8ed70923
显示空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
21 行增加
和
1 行删除
+21
-1
test_nnet.py
theano/tensor/nnet/tests/test_nnet.py
+21
-1
没有找到文件。
theano/tensor/nnet/tests/test_nnet.py
浏览文件 @
93b50c70
...
@@ -139,6 +139,14 @@ class T_SoftmaxWithBias(utt.InferShapeTester):
...
@@ -139,6 +139,14 @@ class T_SoftmaxWithBias(utt.InferShapeTester):
f
([
0
,
1
,
0
])
f
([
0
,
1
,
0
])
# print f.maker.fgraph.toposort()
# print f.maker.fgraph.toposort()
def
test_softmax_with_bias_trace
(
self
):
a
=
theano
.
shared
(
numpy
.
random
.
randn
(
3
)
.
astype
(
config
.
floatX
))
b
=
theano
.
shared
(
numpy
.
float32
(
numpy
.
random
.
randn
()))
sm
=
T
.
softmax
(
a
+
b
)
f
=
theano
.
function
([],
sm
)
self
.
assertTrue
(
hasattr
(
f
.
maker
.
fgraph
.
outputs
[
0
]
.
tag
,
'trace'
))
def
test_infer_shape
(
self
):
def
test_infer_shape
(
self
):
admat
=
matrix
()
admat
=
matrix
()
advec
=
vector
()
advec
=
vector
()
...
@@ -242,6 +250,7 @@ class T_LogSoftmax(utt.InferShapeTester):
...
@@ -242,6 +250,7 @@ class T_LogSoftmax(utt.InferShapeTester):
sm
=
tensor
.
nnet
.
softmax
(
x
)
sm
=
tensor
.
nnet
.
softmax
(
x
)
logsm
=
tensor
.
log
(
sm
)
logsm
=
tensor
.
log
(
sm
)
f
=
theano
.
function
([
x
],
logsm
)
f
=
theano
.
function
([
x
],
logsm
)
self
.
assertTrue
(
hasattr
(
f
.
maker
.
fgraph
.
outputs
[
0
]
.
tag
,
'trace'
))
assert
isinstance
(
f
.
maker
.
fgraph
.
outputs
[
0
]
.
owner
.
op
,
assert
isinstance
(
f
.
maker
.
fgraph
.
outputs
[
0
]
.
owner
.
op
,
theano
.
tensor
.
nnet
.
nnet
.
LogSoftmax
)
theano
.
tensor
.
nnet
.
nnet
.
LogSoftmax
)
...
@@ -265,6 +274,8 @@ class T_LogSoftmax(utt.InferShapeTester):
...
@@ -265,6 +274,8 @@ class T_LogSoftmax(utt.InferShapeTester):
return
logsm
return
logsm
# We set step to 0.1 because for big values we need a big epsilon
# We set step to 0.1 because for big values we need a big epsilon
utt
.
verify_grad
(
myfunc
,
[
a
],
eps
=
0.1
,
mode
=
m
)
utt
.
verify_grad
(
myfunc
,
[
a
],
eps
=
0.1
,
mode
=
m
)
f
=
theano
.
function
([],
myfunc
(
a
))
self
.
assertTrue
(
hasattr
(
f
.
maker
.
fgraph
.
outputs
[
0
]
.
tag
,
'trace'
))
class
T_SoftmaxGrad
(
utt
.
InferShapeTester
):
class
T_SoftmaxGrad
(
utt
.
InferShapeTester
):
...
@@ -642,6 +653,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
...
@@ -642,6 +653,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
fgraph
=
gof
.
FunctionGraph
(
fgraph
=
gof
.
FunctionGraph
(
[
x
,
one_of_n
],
[
x
,
one_of_n
],
[
g_x
])
[
g_x
])
self
.
assertTrue
(
hasattr
(
fgraph
.
outputs
[
0
]
.
tag
,
'trace'
))
# print 'BEFORE'
# print 'BEFORE'
# for node in fgraph.toposort():
# for node in fgraph.toposort():
...
@@ -737,6 +749,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
...
@@ -737,6 +749,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
for
expr
in
expressions
:
for
expr
in
expressions
:
# Verify the optimizer worked on the expressions
# Verify the optimizer worked on the expressions
f
=
theano
.
function
([
x
,
y
],
expr
,
mode
=
mode
)
f
=
theano
.
function
([
x
,
y
],
expr
,
mode
=
mode
)
self
.
assertTrue
(
hasattr
(
f
.
maker
.
fgraph
.
outputs
[
0
]
.
tag
,
'trace'
))
if
verbose
:
if
verbose
:
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
try
:
try
:
...
@@ -752,6 +765,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
...
@@ -752,6 +765,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
# Also verify the gradient wrt x
# Also verify the gradient wrt x
g
=
theano
.
function
([
x
,
y
],
T
.
grad
(
expr
,
x
),
mode
=
mode
)
g
=
theano
.
function
([
x
,
y
],
T
.
grad
(
expr
,
x
),
mode
=
mode
)
self
.
assertTrue
(
hasattr
(
g
.
maker
.
fgraph
.
outputs
[
0
]
.
tag
,
'trace'
))
if
verbose
:
if
verbose
:
theano
.
printing
.
debugprint
(
g
)
theano
.
printing
.
debugprint
(
g
)
try
:
try
:
...
@@ -774,6 +788,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
...
@@ -774,6 +788,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
for
expr
in
bias_expressions
:
for
expr
in
bias_expressions
:
f
=
theano
.
function
([
x
,
b
,
y
],
expr
,
mode
=
mode
)
f
=
theano
.
function
([
x
,
b
,
y
],
expr
,
mode
=
mode
)
self
.
assertTrue
(
hasattr
(
f
.
maker
.
fgraph
.
outputs
[
0
]
.
tag
,
'trace'
))
if
verbose
:
if
verbose
:
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
try
:
try
:
...
@@ -785,6 +800,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
...
@@ -785,6 +800,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
raise
raise
g
=
theano
.
function
([
x
,
b
,
y
],
T
.
grad
(
expr
,
x
),
mode
=
mode
)
g
=
theano
.
function
([
x
,
b
,
y
],
T
.
grad
(
expr
,
x
),
mode
=
mode
)
self
.
assertTrue
(
hasattr
(
g
.
maker
.
fgraph
.
outputs
[
0
]
.
tag
,
'trace'
))
if
verbose
:
if
verbose
:
theano
.
printing
.
debugprint
(
g
)
theano
.
printing
.
debugprint
(
g
)
try
:
try
:
...
@@ -807,6 +823,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
...
@@ -807,6 +823,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
for
expr
in
mean_expressions
:
for
expr
in
mean_expressions
:
f
=
theano
.
function
([
x
,
y
],
expr
,
mode
=
mode
)
f
=
theano
.
function
([
x
,
y
],
expr
,
mode
=
mode
)
self
.
assertTrue
(
hasattr
(
f
.
maker
.
fgraph
.
outputs
[
0
]
.
tag
,
'trace'
))
if
verbose
:
if
verbose
:
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
try
:
try
:
...
@@ -821,6 +838,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
...
@@ -821,6 +838,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
raise
raise
g
=
theano
.
function
([
x
,
y
],
T
.
grad
(
expr
,
x
),
mode
=
mode
)
g
=
theano
.
function
([
x
,
y
],
T
.
grad
(
expr
,
x
),
mode
=
mode
)
self
.
assertTrue
(
hasattr
(
g
.
maker
.
fgraph
.
outputs
[
0
]
.
tag
,
'trace'
))
if
verbose
:
if
verbose
:
theano
.
printing
.
debugprint
(
g
)
theano
.
printing
.
debugprint
(
g
)
try
:
try
:
...
@@ -844,6 +862,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
...
@@ -844,6 +862,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
for
expr
in
mean_bias_expressions
:
for
expr
in
mean_bias_expressions
:
f
=
theano
.
function
([
x
,
b
,
y
],
expr
,
mode
=
mode
)
f
=
theano
.
function
([
x
,
b
,
y
],
expr
,
mode
=
mode
)
self
.
assertTrue
(
hasattr
(
f
.
maker
.
fgraph
.
outputs
[
0
]
.
tag
,
'trace'
))
if
verbose
:
if
verbose
:
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
try
:
try
:
...
@@ -856,6 +875,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
...
@@ -856,6 +875,7 @@ class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
raise
raise
g
=
theano
.
function
([
x
,
b
,
y
],
T
.
grad
(
expr
,
x
),
mode
=
mode
)
g
=
theano
.
function
([
x
,
b
,
y
],
T
.
grad
(
expr
,
x
),
mode
=
mode
)
self
.
assertTrue
(
hasattr
(
g
.
maker
.
fgraph
.
outputs
[
0
]
.
tag
,
'trace'
))
if
verbose
:
if
verbose
:
theano
.
printing
.
debugprint
(
g
)
theano
.
printing
.
debugprint
(
g
)
try
:
try
:
...
@@ -1269,6 +1289,7 @@ def test_argmax_pushdown():
...
@@ -1269,6 +1289,7 @@ def test_argmax_pushdown():
fgraph
=
gof
.
FunctionGraph
(
fgraph
=
gof
.
FunctionGraph
(
[
x
],
[
x
],
[
out
])
[
out
])
assert
hasattr
(
fgraph
.
outputs
[
0
]
.
tag
,
'trace'
)
backup
=
config
.
warn
.
argmax_pushdown_bug
backup
=
config
.
warn
.
argmax_pushdown_bug
config
.
warn
.
argmax_pushdown_bug
=
False
config
.
warn
.
argmax_pushdown_bug
=
False
...
@@ -1621,7 +1642,6 @@ def test_h_softmax():
...
@@ -1621,7 +1642,6 @@ def test_h_softmax():
#############
#############
x_mat
=
numpy
.
random
.
normal
(
size
=
(
batch_size
,
input_size
))
.
astype
(
floatX
)
x_mat
=
numpy
.
random
.
normal
(
size
=
(
batch_size
,
input_size
))
.
astype
(
floatX
)
y_mat
=
numpy
.
random
.
randint
(
0
,
output_size
,
batch_size
)
.
astype
(
'int32'
)
y_mat
=
numpy
.
random
.
randint
(
0
,
output_size
,
batch_size
)
.
astype
(
'int32'
)
tg_output
=
fun_output_tg
(
x_mat
,
y_mat
)
tg_output
=
fun_output_tg
(
x_mat
,
y_mat
)
all_outputs
=
fun_output
(
x_mat
)
all_outputs
=
fun_output
(
x_mat
)
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论