Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
461b7e02
提交
461b7e02
authored
11月 13, 2014
作者:
Frederic
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Make opt local_adv_sub1_adv_inc_sub1 add assert to don't loose error and…
Make opt local_adv_sub1_adv_inc_sub1 add assert to don't loose error and document how to get rid of them
上级
ad4c3a84
隐藏空白字符变更
内嵌
并排
正在显示
3 个修改的文件
包含
67 行增加
和
19 行删除
+67
-19
faq.txt
doc/faq.txt
+21
-1
opt.py
theano/tensor/opt.py
+22
-14
test_opt.py
theano/tensor/tests/test_opt.py
+24
-4
没有找到文件。
doc/faq.txt
浏览文件 @
461b7e02
...
...
@@ -72,13 +72,33 @@ and use directly the optimized graph from the pickled file.
Faster Theano function
----------------------
You can set the Theano flag `
allow_gc` to `False
` to get a speed-up by using
You can set the Theano flag `
`allow_gc`` to ``False`
` to get a speed-up by using
more memory. By default, Theano frees intermediate results when we don't need
them anymore. Doing so prevents us from reusing this memory. So disabling the
garbage collection will keep all intermediate results' memory space to allow to
reuse them during the next call to the same Theano function, if they are of the
correct shape. The shape could change if the shapes of the inputs change.
.. unsafe_optimization:
Unsafe optimization
===================
Some Theano optimizations make the assumption that the user inputs is
valid. What that mean is that if the user provide invalid values (like
not compatible shapes or indexing values that are out of bound) and
the optimizations is applied, the user error will get lost.Most of the
time, the assumption is that the user inputs are valid. So it is good
to have the optimization being applied. But loosing the error is bad.
The newest optimization in Theano with such assumption will add
assertion in the graph to keep the user error message. But computing
those assumption could take some time. If you are sure you all is valid
in your graph and want the fastest possible Theano, you can enable an
optimization that will remove those assertion:
``optimizer_including=local_remove_all_assert``
Faster Small Theano function
----------------------------
...
...
theano/tensor/opt.py
浏览文件 @
461b7e02
...
...
@@ -2502,39 +2502,47 @@ def local_setsubtensor_of_constants(node):
return
False
@register_canonicalize
(
"rm_idx_err"
,
"rm_shape_err"
)
@register_stabilize
(
"rm_idx_err"
,
"rm_shape_err"
)
@register_canonicalize
@register_stabilize
@gof.local_optimizer
([
AdvancedSubtensor1
])
def
local_adv_sub1_adv_inc_sub1
(
node
):
"""
Optimize the possible AdvSub1(AdvIncSub1(...), ...)
"""Optimize the possible AdvSub1(AdvIncSub1(...), ...)
AdvancedSubtensor1(AdvancedIncSubtensor1(0s, y, idx), idx) -> y
AdvancedSubtensor1(AdvancedSetSubtensor1(x, y, idx), idx) -> y
:note: This opt can remove index errors. We should assert that idx
values are in range and that x and y have compatible shapes.
:note: This opt add AssertOp. Otherwise, it would remove shape and
index error. If you want to get rid of them, see the
:ref:`unsafe_optimization` section.
:todo: add AssertOp to do not remove shape error.
"""
if
not
isinstance
(
node
.
op
,
AdvancedSubtensor1
):
return
x
=
node
.
inputs
[
0
]
if
(
not
x
.
owner
or
not
isinstance
(
x
.
owner
.
op
,
AdvancedIncSubtensor1
)):
inp
=
node
.
inputs
[
0
]
if
(
not
inp
.
owner
or
not
isinstance
(
inp
.
owner
.
op
,
AdvancedIncSubtensor1
)):
return
idx
=
node
.
inputs
[
1
]
idx2
=
x
.
owner
.
inputs
[
2
]
y
=
x
.
owner
.
inputs
[
1
]
idx2
=
inp
.
owner
.
inputs
[
2
]
x
=
inp
.
owner
.
inputs
[
0
]
y
=
inp
.
owner
.
inputs
[
1
]
if
idx
is
not
idx2
:
return
if
(
not
x
.
owner
.
op
.
set_instead_of_inc
and
T
.
extract_constant
(
x
.
owner
.
inputs
[
0
]
)
!=
0
):
if
(
not
inp
.
owner
.
op
.
set_instead_of_inc
and
T
.
extract_constant
(
x
)
!=
0
):
return
cond
=
[
T
.
all
(
T
.
and_
(
T
.
lt
(
idx
,
x
.
shape
[
0
]),
T
.
ge
(
idx
,
-
x
.
shape
[
0
])))]
if
not
node
.
fgraph
.
shape_feature
.
same_shape
(
idx
,
y
,
0
,
0
):
cond
.
append
(
T
.
eq
(
idx
.
shape
[
0
],
y
.
shape
[
0
]))
y
=
Assert
(
"Bad indexing or shapes in a AdvancedIncSubtensor1 that was optimized away"
)(
y
,
*
cond
)
if
y
.
dtype
==
node
.
outputs
[
0
]
.
dtype
:
return
[
y
]
# It is possible that y is upcast or downcast to x.dtype.
# In all case, as we set or add with 0, we can just cast y.
return
[
T
.
cast
(
y
,
x
.
dtype
)]
return
[
T
.
cast
(
y
,
node
.
outputs
[
0
]
.
dtype
)]
####################
# Rebroadcast opts #
...
...
theano/tensor/tests/test_opt.py
浏览文件 @
461b7e02
...
...
@@ -2421,7 +2421,8 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
def
setUp
(
self
):
utt
.
seed_rng
()
mode
=
theano
.
compile
.
mode
.
get_default_mode
()
self
.
mode
=
mode
.
including
(
"local_adv_sub1_adv_inc_sub1"
)
self
.
mode
=
mode
.
including
(
"local_adv_sub1_adv_inc_sub1"
)
.
excluding
(
"fusion"
)
self
.
mode_no_assert
=
self
.
mode
.
including
(
"local_remove_all_assert"
)
def
test0
(
self
):
for
dtype1
,
dtype2
in
[(
"float32"
,
"float32"
),
...
...
@@ -2439,7 +2440,7 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
# set_subtensor
inc
=
tensor
.
set_subtensor
(
x
[
idx
],
y
)
o
=
inc
[
idx
]
f
=
theano
.
function
([
x
,
y
,
idx
],
o
,
self
.
mode
)
f
=
theano
.
function
([
x
,
y
,
idx
],
o
,
self
.
mode
_no_assert
)
res
=
f
(
dx
,
dy
,
didx
)
assert
numpy
.
allclose
(
dy
,
res
)
...
...
@@ -2453,7 +2454,7 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
# inc_subtensor(data[idx], y)
inc
=
tensor
.
inc_subtensor
(
x
[
idx
],
y
)
o
=
inc
[
idx
]
f
=
theano
.
function
([
x
,
y
,
idx
],
o
,
self
.
mode
)
f
=
theano
.
function
([
x
,
y
,
idx
],
o
,
self
.
mode
_no_assert
)
res
=
f
(
dx
,
dy
,
didx
)
assert
numpy
.
allclose
((
dx
[
didx
]
+
dy
),
res
)
...
...
@@ -2463,7 +2464,7 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
# inc_subtensor(0[idx], y)
inc
=
tensor
.
inc_subtensor
(
x
.
zeros_like
()[
idx
],
y
)
o
=
inc
[
idx
]
f
=
theano
.
function
([
x
,
y
,
idx
],
o
,
self
.
mode
)
f
=
theano
.
function
([
x
,
y
,
idx
],
o
,
self
.
mode
_no_assert
)
res
=
f
(
dx
,
dy
,
didx
)
assert
numpy
.
allclose
(
dy
,
res
)
...
...
@@ -2474,6 +2475,25 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
else
:
assert
len
(
topo
)
>
2
def
test_assert
(
self
):
x
=
tensor
.
matrix
(
"x"
)
y
=
tensor
.
matrix
(
"y"
)
idx
=
tensor
.
ivector
()
dx
=
numpy
.
random
.
rand
(
4
,
5
)
.
astype
(
config
.
floatX
)
dy
=
numpy
.
random
.
rand
(
2
,
5
)
.
astype
(
config
.
floatX
)
didx
=
numpy
.
asarray
([
1
,
3
],
"int32"
)
# set_subtensor
inc
=
tensor
.
set_subtensor
(
x
[
idx
],
y
)
o
=
inc
[
idx
]
f
=
theano
.
function
([
x
,
y
,
idx
],
o
,
self
.
mode
)
# test wrong index
for
i
in
[
dx
.
shape
[
0
],
-
dx
.
shape
[
0
]
-
1
]:
self
.
assertRaises
(
AssertionError
,
f
,
dx
,
dy
,
[
i
,
i
])
# test wrong shape
self
.
assertRaises
(
AssertionError
,
f
,
dx
,
dy
,
[
1
])
class
Test_alloc_zero
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论