Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
21471c14
提交
21471c14
authored
7月 24, 2015
作者:
Pascal Lamblin
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #3077 from julianser/new_stacktrace_fix
Further work on issue #3018.
上级
d7d722fa
1b6e3b78
显示空白字符变更
内嵌
并排
正在显示
5 个修改的文件
包含
162 行增加
和
36 行删除
+162
-36
fg.py
theano/gof/fg.py
+8
-6
link.py
theano/gof/link.py
+9
-7
op.py
theano/gof/op.py
+9
-6
utils.py
theano/gof/utils.py
+1
-1
opt.py
theano/tensor/opt.py
+135
-16
没有找到文件。
theano/gof/fg.py
浏览文件 @
21471c14
...
@@ -331,15 +331,17 @@ class FunctionGraph(utils.object2):
...
@@ -331,15 +331,17 @@ class FunctionGraph(utils.object2):
# if there is no path then r isn't really a graph input so we shouldn't be running error
# if there is no path then r isn't really a graph input so we shouldn't be running error
# handler code in the first place
# handler code in the first place
assert
path
is
not
None
assert
path
is
not
None
tr
=
getattr
(
r
.
tag
,
'trace'
,
None
)
tr
=
getattr
(
r
.
tag
,
'trace'
,
[]
)
detailed_err_msg
=
""
detailed_err_msg
=
""
if
tr
:
if
len
(
tr
)
>
0
:
sio
=
StringIO
()
traceback
.
print_list
(
tr
,
sio
)
tr
=
sio
.
getvalue
()
detailed_err_msg
+=
"
\n
Backtrace when the variable is created:
\n
"
detailed_err_msg
+=
"
\n
Backtrace when the variable is created:
\n
"
detailed_err_msg
+=
str
(
tr
)
# Print separate message for each element in
# the list of batcktraces
sio
=
StringIO
()
for
subtr
in
tr
:
traceback
.
print_list
(
subtr
,
sio
)
detailed_err_msg
+=
str
(
sio
.
getvalue
())
raise
MissingInputError
(
raise
MissingInputError
(
'A variable that is an input to the graph was '
'A variable that is an input to the graph was '
'neither provided as an input to the function '
'neither provided as an input to the function '
...
...
theano/gof/link.py
浏览文件 @
21471c14
...
@@ -156,14 +156,16 @@ def raise_with_op(node, thunk=None, exc_info=None, storage_map=None):
...
@@ -156,14 +156,16 @@ def raise_with_op(node, thunk=None, exc_info=None, storage_map=None):
"HINT: Use another linker then the c linker to"
"HINT: Use another linker then the c linker to"
" have the inputs shapes and strides printed."
)
" have the inputs shapes and strides printed."
)
# Print node backtrace
# Print node backtraces
tr
=
getattr
(
node
.
outputs
[
0
]
.
tag
,
'trace'
,
None
)
tr
=
getattr
(
node
.
outputs
[
0
]
.
tag
,
'trace'
,
[])
if
tr
:
if
len
(
tr
)
>
0
:
sio
=
StringIO
()
traceback
.
print_list
(
tr
,
sio
)
tr
=
sio
.
getvalue
()
detailed_err_msg
+=
"
\n
Backtrace when the node is created:
\n
"
detailed_err_msg
+=
"
\n
Backtrace when the node is created:
\n
"
detailed_err_msg
+=
str
(
tr
)
# Print separate message for each element in the list of batcktraces
sio
=
StringIO
()
for
subtr
in
tr
:
traceback
.
print_list
(
subtr
,
sio
)
detailed_err_msg
+=
str
(
sio
.
getvalue
())
else
:
else
:
hints
.
append
(
hints
.
append
(
"HINT: Re-running with most Theano optimization disabled could"
"HINT: Re-running with most Theano optimization disabled could"
...
...
theano/gof/op.py
浏览文件 @
21471c14
...
@@ -458,14 +458,17 @@ class PureOp(object):
...
@@ -458,14 +458,17 @@ class PureOp(object):
detailed_err_msg
=
(
detailed_err_msg
=
(
"For compute_test_value, one input test value does not"
"For compute_test_value, one input test value does not"
" have the requested type.
\n
"
)
" have the requested type.
\n
"
)
tr
=
getattr
(
v
.
tag
,
'trace'
,
None
)
tr
=
getattr
(
v
.
tag
,
'trace'
,
[])
if
tr
:
if
len
(
tr
)
>
0
:
sio
=
StringIO
()
traceback
.
print_list
(
tr
,
sio
)
tr
=
sio
.
getvalue
()
detailed_err_msg
+=
(
detailed_err_msg
+=
(
"
\n
Backtrace when that variable is created:
\n
"
)
"
\n
Backtrace when that variable is created:
\n
"
)
detailed_err_msg
+=
str
(
tr
)
# Print separate message for each element in the list
# of batcktraces
sio
=
StringIO
()
for
subtr
in
tr
:
traceback
.
print_list
(
subtr
,
sio
)
detailed_err_msg
+=
str
(
sio
.
getvalue
())
detailed_err_msg
+=
(
detailed_err_msg
+=
(
"
\n
The error when converting the test value to that"
"
\n
The error when converting the test value to that"
" variable type:"
)
" variable type:"
)
...
...
theano/gof/utils.py
浏览文件 @
21471c14
...
@@ -94,7 +94,7 @@ def add_tag_trace(thing, user_line=1):
...
@@ -94,7 +94,7 @@ def add_tag_trace(thing, user_line=1):
# The order is from the oldest to the newest
# The order is from the oldest to the newest
if
len
(
tr
)
>
user_line
:
if
len
(
tr
)
>
user_line
:
tr
=
tr
[
-
user_line
:]
tr
=
tr
[
-
user_line
:]
thing
.
tag
.
trace
=
tr
thing
.
tag
.
trace
=
[
tr
]
return
thing
return
thing
...
...
theano/tensor/opt.py
浏览文件 @
21471c14
...
@@ -63,6 +63,54 @@ theano.configparser.AddConfigVar('on_shape_error',
...
@@ -63,6 +63,54 @@ theano.configparser.AddConfigVar('on_shape_error',
# Utilities
# Utilities
def
copy_stack_trace
(
from_var
,
to_var
):
"""
Copies the stack trace from one or more tensor variables to
one or more tensor variables.
:param from_var: tensor variable or list of tensor variables to
copy stack traces from.
:param to_var: tensor variable or list of tensor variables to
copy stack traces to.
.. note:: The stacktrace is assumed to be of the form of a list of lists
of tuples. Each tuple contains the filename, line number, function name
and so on. Each list of tuples contains the truples belonging to a
particular variable.
"""
# Store stack traces from from_var
tr
=
[]
if
type
(
from_var
)
is
list
:
# If from_var is a list, store concatenated stack traces
if
len
(
from_var
)
>
0
:
for
v
in
from_var
:
if
hasattr
(
v
.
tag
,
'trace'
)
and
len
(
v
.
tag
.
trace
)
>
0
:
tr
=
tr
+
v
.
tag
.
trace
else
:
# If from_var is not a list, it must be a single tensor
# variable, so just store that particular stack trace
if
hasattr
(
from_var
.
tag
,
'trace'
):
tr
=
from_var
.
tag
.
trace
# Copy over stack traces to to_var
if
type
(
to_var
)
is
list
:
# Copy over stack traces from from_var to each variable in
# to_var, including the stack_trace of the to_var before
for
v
in
to_var
:
if
hasattr
(
v
.
tag
,
'trace'
):
v
.
tag
.
trace
=
v
.
tag
.
trace
+
tr
else
:
v
.
tag
.
trace
=
tr
else
:
# Copy over stack traces from from_var to each variable to
# to_var, including the stack_trace of the to_var before
if
hasattr
(
to_var
.
tag
,
'trace'
):
to_var
.
tag
.
trace
=
to_var
.
tag
.
trace
+
tr
else
:
to_var
.
tag
.
trace
=
tr
def
out2in
(
*
local_opts
,
**
kwargs
):
def
out2in
(
*
local_opts
,
**
kwargs
):
"""WRITEME """
"""WRITEME """
name
=
(
kwargs
and
kwargs
.
pop
(
'name'
,
None
))
name
=
(
kwargs
and
kwargs
.
pop
(
'name'
,
None
))
...
@@ -480,6 +528,7 @@ def local_dimshuffle_lift(node):
...
@@ -480,6 +528,7 @@ def local_dimshuffle_lift(node):
op
.
new_order
,
op
.
new_order
,
op
.
inplace
)(
inp
)
op
.
inplace
)(
inp
)
new_inputs
.
append
(
apply_local_dimshuffle_lift
(
new_inp
))
new_inputs
.
append
(
apply_local_dimshuffle_lift
(
new_inp
))
copy_stack_trace
(
node
.
outputs
[
0
],
new_inputs
)
ret
=
inode
.
op
(
*
new_inputs
,
**
dict
(
return_list
=
True
))
ret
=
inode
.
op
(
*
new_inputs
,
**
dict
(
return_list
=
True
))
return
ret
return
ret
if
inode
and
isinstance
(
inode
.
op
,
DimShuffle
):
if
inode
and
isinstance
(
inode
.
op
,
DimShuffle
):
...
@@ -487,6 +536,7 @@ def local_dimshuffle_lift(node):
...
@@ -487,6 +536,7 @@ def local_dimshuffle_lift(node):
op
.
new_order
]
op
.
new_order
]
inplace
=
op
.
inplace
and
inode
.
op
.
inplace
inplace
=
op
.
inplace
and
inode
.
op
.
inplace
iinput
=
inode
.
inputs
[
0
]
iinput
=
inode
.
inputs
[
0
]
# remove useless dimshuffle
# remove useless dimshuffle
if
(
new_order
==
list
(
range
(
len
(
new_order
)))
and
if
(
new_order
==
list
(
range
(
len
(
new_order
)))
and
len
(
new_order
)
==
iinput
.
type
.
ndim
):
len
(
new_order
)
==
iinput
.
type
.
ndim
):
...
@@ -494,7 +544,9 @@ def local_dimshuffle_lift(node):
...
@@ -494,7 +544,9 @@ def local_dimshuffle_lift(node):
else
:
else
:
ret
=
op
.
__class__
(
iinput
.
type
.
broadcastable
,
new_order
,
ret
=
op
.
__class__
(
iinput
.
type
.
broadcastable
,
new_order
,
inplace
)(
iinput
)
inplace
)(
iinput
)
return
[
apply_local_dimshuffle_lift
(
ret
)]
ret
=
apply_local_dimshuffle_lift
(
ret
)
copy_stack_trace
(
node
.
outputs
[
0
],
ret
)
return
[
ret
]
@register_canonicalize
@register_canonicalize
...
@@ -519,7 +571,12 @@ def local_lift_transpose_through_dot(node):
...
@@ -519,7 +571,12 @@ def local_lift_transpose_through_dot(node):
x
,
y
=
node
.
inputs
[
0
]
.
owner
.
inputs
x
,
y
=
node
.
inputs
[
0
]
.
owner
.
inputs
if
x
.
ndim
==
y
.
ndim
==
2
:
if
x
.
ndim
==
y
.
ndim
==
2
:
return
[
T
.
dot
(
y
.
T
,
x
.
T
)]
# Output is dot product of transposed inputs in reverse order
ret
=
[
T
.
dot
(
y
.
T
,
x
.
T
)]
# Copy over stack trace to output from result of dot-product
copy_stack_trace
(
node
.
inputs
[
0
],
ret
)
return
ret
@gof.local_optimizer
([
DimShuffle
])
@gof.local_optimizer
([
DimShuffle
])
...
@@ -528,7 +585,9 @@ def dimshuffle_as_view(node):
...
@@ -528,7 +585,9 @@ def dimshuffle_as_view(node):
if
not
isinstance
(
op
,
DimShuffle
)
or
op
.
inplace
:
if
not
isinstance
(
op
,
DimShuffle
)
or
op
.
inplace
:
return
False
return
False
new_op
=
op
.
__class__
(
op
.
input_broadcastable
,
op
.
new_order
,
inplace
=
True
)
new_op
=
op
.
__class__
(
op
.
input_broadcastable
,
op
.
new_order
,
inplace
=
True
)
return
[
new_op
(
*
node
.
inputs
)]
v
=
new_op
(
*
node
.
inputs
)
copy_stack_trace
(
node
.
outputs
[
0
],
v
)
return
[
v
]
# Step 60 is the inplace optimization stage.
# Step 60 is the inplace optimization stage.
compile
.
optdb
.
register
(
'dimshuffle_as_view'
,
compile
.
optdb
.
register
(
'dimshuffle_as_view'
,
...
@@ -562,6 +621,8 @@ def local_tensor_scalar_tensor(node):
...
@@ -562,6 +621,8 @@ def local_tensor_scalar_tensor(node):
s
=
node
.
inputs
[
0
]
s
=
node
.
inputs
[
0
]
if
s
.
owner
and
isinstance
(
s
.
owner
.
op
,
T
.
ScalarFromTensor
):
if
s
.
owner
and
isinstance
(
s
.
owner
.
op
,
T
.
ScalarFromTensor
):
t
=
s
.
owner
.
inputs
[
0
]
t
=
s
.
owner
.
inputs
[
0
]
# We don't need to copy over any stack traces here
return
[
t
]
return
[
t
]
...
@@ -574,6 +635,8 @@ def local_scalar_tensor_scalar(node):
...
@@ -574,6 +635,8 @@ def local_scalar_tensor_scalar(node):
t
=
node
.
inputs
[
0
]
t
=
node
.
inputs
[
0
]
if
t
.
owner
and
isinstance
(
t
.
owner
.
op
,
T
.
TensorFromScalar
):
if
t
.
owner
and
isinstance
(
t
.
owner
.
op
,
T
.
TensorFromScalar
):
s
=
t
.
owner
.
inputs
[
0
]
s
=
t
.
owner
.
inputs
[
0
]
# We don't need to copy over any stack traces here
return
[
s
]
return
[
s
]
#####################################
#####################################
...
@@ -1290,7 +1353,9 @@ def local_fill_to_alloc(node):
...
@@ -1290,7 +1353,9 @@ def local_fill_to_alloc(node):
rval
=
[
T
.
cast
(
v
,
node
.
outputs
[
0
]
.
type
.
dtype
)]
rval
=
[
T
.
cast
(
v
,
node
.
outputs
[
0
]
.
type
.
dtype
)]
elif
r
.
type
.
broadcastable
==
node
.
outputs
[
0
]
.
type
.
broadcastable
:
elif
r
.
type
.
broadcastable
==
node
.
outputs
[
0
]
.
type
.
broadcastable
:
# we are broadcasting v somehow, but not r
# we are broadcasting v somehow, but not r
rval
=
[
broadcast_like
(
v
,
r
,
node
.
fgraph
,
dtype
=
v
.
dtype
)]
o
=
broadcast_like
(
v
,
r
,
node
.
fgraph
,
dtype
=
v
.
dtype
)
copy_stack_trace
(
node
.
outputs
[
0
],
o
)
rval
=
[
o
]
else
:
else
:
# we are broadcasting both v and r,
# we are broadcasting both v and r,
# the output shape must be computed
# the output shape must be computed
...
@@ -1323,6 +1388,7 @@ def local_useless_fill(node):
...
@@ -1323,6 +1388,7 @@ def local_useless_fill(node):
r
,
v
=
node
.
inputs
r
,
v
=
node
.
inputs
if
v
.
type
==
node
.
outputs
[
0
]
.
type
:
if
v
.
type
==
node
.
outputs
[
0
]
.
type
:
# this is a useless fill, erase it.
# this is a useless fill, erase it.
# also, we don't need to copy over any stack traces here
return
[
v
]
return
[
v
]
compile
.
optdb
[
'canonicalize'
]
.
register
(
'local_useless_fill'
,
compile
.
optdb
[
'canonicalize'
]
.
register
(
'local_useless_fill'
,
in2out
(
local_useless_fill
),
in2out
(
local_useless_fill
),
...
@@ -1341,6 +1407,7 @@ def local_useless_alloc(node):
...
@@ -1341,6 +1407,7 @@ def local_useless_alloc(node):
"""
"""
if
node
.
op
==
T
.
alloc
:
if
node
.
op
==
T
.
alloc
:
if
node
.
inputs
[
0
]
.
type
==
node
.
outputs
[
0
]
.
type
:
if
node
.
inputs
[
0
]
.
type
==
node
.
outputs
[
0
]
.
type
:
# We don't need to copy over any stack traces here
return
[
node
.
inputs
[
0
]]
return
[
node
.
inputs
[
0
]]
...
@@ -1353,7 +1420,11 @@ def local_shape_to_shape_i(node):
...
@@ -1353,7 +1420,11 @@ def local_shape_to_shape_i(node):
if
not
hasattr
(
node
.
fgraph
,
'shape_feature'
):
if
not
hasattr
(
node
.
fgraph
,
'shape_feature'
):
return
return
shape_feature
=
node
.
fgraph
.
shape_feature
shape_feature
=
node
.
fgraph
.
shape_feature
return
[
shape_feature
.
make_vector_shape
(
node
.
inputs
[
0
])]
ret
=
shape_feature
.
make_vector_shape
(
node
.
inputs
[
0
])
# We need to copy over stack trace from input to output
copy_stack_trace
(
node
.
outputs
[
0
],
ret
)
return
[
ret
]
# TODO: Not sure what type of node we are expecting here
# TODO: Not sure what type of node we are expecting here
...
@@ -1411,6 +1482,7 @@ def local_subtensor_make_vector(node):
...
@@ -1411,6 +1482,7 @@ def local_subtensor_make_vector(node):
return
return
if
isinstance
(
idx
,
(
int
,
numpy
.
integer
)):
if
isinstance
(
idx
,
(
int
,
numpy
.
integer
)):
# We don't need to copy over any stack traces here
return
[
x
.
owner
.
inputs
[
idx
]]
return
[
x
.
owner
.
inputs
[
idx
]]
elif
isinstance
(
idx
,
Variable
):
elif
isinstance
(
idx
,
Variable
):
if
idx
.
ndim
==
0
:
if
idx
.
ndim
==
0
:
...
@@ -1420,12 +1492,17 @@ def local_subtensor_make_vector(node):
...
@@ -1420,12 +1492,17 @@ def local_subtensor_make_vector(node):
if
isinstance
(
v
,
numpy
.
integer
):
if
isinstance
(
v
,
numpy
.
integer
):
# Python 2.4 wants to index only with Python integers
# Python 2.4 wants to index only with Python integers
v
=
int
(
v
)
v
=
int
(
v
)
# We don't need to copy over any stack traces here
return
[
x
.
owner
.
inputs
[
v
]]
return
[
x
.
owner
.
inputs
[
v
]]
except
NotScalarConstantError
:
except
NotScalarConstantError
:
pass
pass
elif
idx
.
ndim
==
1
and
isinstance
(
idx
,
T
.
Constant
):
elif
idx
.
ndim
==
1
and
isinstance
(
idx
,
T
.
Constant
):
values
=
list
(
map
(
int
,
list
(
idx
.
value
)))
values
=
list
(
map
(
int
,
list
(
idx
.
value
)))
return
[
make_vector
(
*
[
x
.
owner
.
inputs
[
v
]
for
v
in
values
])]
ret
=
[
make_vector
(
*
[
x
.
owner
.
inputs
[
v
]
for
v
in
values
])]
# Copy over stack trace from previous output to new output
copy_stack_trace
(
node
.
outputs
[
0
],
ret
)
return
ret
else
:
else
:
raise
TypeError
(
'case not expected'
)
raise
TypeError
(
'case not expected'
)
elif
isinstance
(
idx
,
slice
):
elif
isinstance
(
idx
,
slice
):
...
@@ -1461,22 +1538,33 @@ def local_useless_elemwise(node):
...
@@ -1461,22 +1538,33 @@ def local_useless_elemwise(node):
if
node
.
op
.
scalar_op
==
theano
.
scalar
.
eq
and
len
(
node
.
inputs
)
==
2
:
if
node
.
op
.
scalar_op
==
theano
.
scalar
.
eq
and
len
(
node
.
inputs
)
==
2
:
if
node
.
inputs
[
0
]
==
node
.
inputs
[
1
]:
if
node
.
inputs
[
0
]
==
node
.
inputs
[
1
]:
# it is the same var in the graph. That will always be true
# it is the same var in the graph. That will always be true
ret
urn
[
T
.
fill
(
node
.
inputs
[
0
],
ret
=
[
T
.
fill
(
node
.
inputs
[
0
],
T
.
constant
(
1.0
,
T
.
constant
(
1.0
,
dtype
=
node
.
outputs
[
0
]
.
type
.
dtype
))]
dtype
=
node
.
outputs
[
0
]
.
type
.
dtype
))]
# Copy stack trace from input to constant output
copy_stack_trace
(
node
.
outputs
[
0
],
ret
)
return
ret
elif
node
.
op
.
scalar_op
==
theano
.
scalar
.
neq
and
len
(
node
.
inputs
)
==
2
:
elif
node
.
op
.
scalar_op
==
theano
.
scalar
.
neq
and
len
(
node
.
inputs
)
==
2
:
if
node
.
inputs
[
0
]
==
node
.
inputs
[
1
]:
if
node
.
inputs
[
0
]
==
node
.
inputs
[
1
]:
# it is the same var in the graph. That will always be false
# it is the same var in the graph. That will always be false
ret
urn
[
T
.
fill
(
node
.
inputs
[
0
],
ret
=
[
T
.
fill
(
node
.
inputs
[
0
],
T
.
constant
(
0.0
,
T
.
constant
(
0.0
,
dtype
=
node
.
outputs
[
0
]
.
type
.
dtype
))]
dtype
=
node
.
outputs
[
0
]
.
type
.
dtype
))]
# Copy stack trace from input to constant output
copy_stack_trace
(
node
.
outputs
[
0
],
ret
)
return
ret
elif
node
.
op
.
scalar_op
==
theano
.
scalar
.
mul
and
len
(
node
.
inputs
)
==
1
:
elif
node
.
op
.
scalar_op
==
theano
.
scalar
.
mul
and
len
(
node
.
inputs
)
==
1
:
# No need to copy over any stack trace
return
[
node
.
inputs
[
0
]]
return
[
node
.
inputs
[
0
]]
elif
node
.
op
.
scalar_op
==
theano
.
scalar
.
add
and
len
(
node
.
inputs
)
==
1
:
elif
node
.
op
.
scalar_op
==
theano
.
scalar
.
add
and
len
(
node
.
inputs
)
==
1
:
# No need to copy over any stack trace
return
[
node
.
inputs
[
0
]]
return
[
node
.
inputs
[
0
]]
elif
(
node
.
op
.
scalar_op
==
theano
.
scalar
.
identity
and
elif
(
node
.
op
.
scalar_op
==
theano
.
scalar
.
identity
and
len
(
node
.
inputs
)
==
1
):
len
(
node
.
inputs
)
==
1
):
# No need to copy over any stack trace
return
[
node
.
inputs
[
0
]]
return
[
node
.
inputs
[
0
]]
...
@@ -1491,7 +1579,14 @@ def local_alloc_unary(node):
...
@@ -1491,7 +1579,14 @@ def local_alloc_unary(node):
x
=
a
.
owner
.
inputs
[
0
]
x
=
a
.
owner
.
inputs
[
0
]
shp
=
a
.
owner
.
inputs
[
1
:]
shp
=
a
.
owner
.
inputs
[
1
:]
v
=
node
.
op
(
x
)
v
=
node
.
op
(
x
)
return
[
T
.
alloc
(
T
.
cast
(
v
,
node
.
outputs
[
0
]
.
dtype
),
*
shp
)]
copy_stack_trace
(
node
.
outputs
[
0
],
v
)
ret
=
T
.
alloc
(
T
.
cast
(
v
,
node
.
outputs
[
0
]
.
dtype
),
*
shp
)
# Is it really necessary to copy over stack trace here?
# after all, T.alloc and T.cast should preserve the stack trace from x,
# but perhaps the trace is lost in "v = node.op(x)"?
copy_stack_trace
(
node
.
outputs
[
0
],
ret
)
return
[
ret
]
@register_canonicalize
@register_canonicalize
...
@@ -1514,6 +1609,7 @@ def local_cast_cast(node):
...
@@ -1514,6 +1609,7 @@ def local_cast_cast(node):
not
isinstance
(
x
.
owner
.
op
.
scalar_op
,
scalar
.
Cast
)):
not
isinstance
(
x
.
owner
.
op
.
scalar_op
,
scalar
.
Cast
)):
return
return
if
node
.
op
.
scalar_op
.
o_type
==
x
.
owner
.
op
.
scalar_op
.
o_type
:
if
node
.
op
.
scalar_op
.
o_type
==
x
.
owner
.
op
.
scalar_op
.
o_type
:
# We don't need to copy over any stack traces here
return
[
x
]
return
[
x
]
...
@@ -1547,6 +1643,8 @@ def local_func_inv(node):
...
@@ -1547,6 +1643,8 @@ def local_func_inv(node):
for
inv_pair
in
inv_pairs
:
for
inv_pair
in
inv_pairs
:
if
is_inverse_pair
(
node_op
,
prev_op
,
inv_pair
):
if
is_inverse_pair
(
node_op
,
prev_op
,
inv_pair
):
# We don't need to copy stack trace, because the optimization
# is trivial and maintains the earlier stack trace
return
x
.
owner
.
inputs
return
x
.
owner
.
inputs
return
return
...
@@ -1669,9 +1767,14 @@ def local_remove_useless_assert(node):
...
@@ -1669,9 +1767,14 @@ def local_remove_useless_assert(node):
cond
.
append
(
c
)
cond
.
append
(
c
)
if
len
(
cond
)
==
0
:
if
len
(
cond
)
==
0
:
# We don't need to copy over any stack traces here
return
[
node
.
inputs
[
0
]]
return
[
node
.
inputs
[
0
]]
if
len
(
cond
)
!=
len
(
node
.
inputs
)
-
1
:
if
len
(
cond
)
!=
len
(
node
.
inputs
)
-
1
:
return
[
assert_
(
node
.
inputs
[
0
],
*
cond
)]
ret
=
assert_
(
node
.
inputs
[
0
],
*
cond
)
# We copy over stack trace from the output of the original assert
copy_stack_trace
(
node
.
outputs
[
0
],
ret
)
return
[
ret
]
@gof.local_optimizer
([
Assert
])
@gof.local_optimizer
([
Assert
])
...
@@ -1685,6 +1788,7 @@ def local_remove_all_assert(node):
...
@@ -1685,6 +1788,7 @@ def local_remove_all_assert(node):
if
not
isinstance
(
node
.
op
,
Assert
):
if
not
isinstance
(
node
.
op
,
Assert
):
return
return
# We don't need to copy over any stack traces here
return
[
node
.
inputs
[
0
]]
return
[
node
.
inputs
[
0
]]
# Disabled by default
# Disabled by default
compile
.
optdb
[
'canonicalize'
]
.
register
(
'local_remove_all_assert'
,
compile
.
optdb
[
'canonicalize'
]
.
register
(
'local_remove_all_assert'
,
...
@@ -1819,12 +1923,20 @@ def local_elemwise_alloc_op(ElemwiseOP, AllocOP, DimShuffleOP):
...
@@ -1819,12 +1923,20 @@ def local_elemwise_alloc_op(ElemwiseOP, AllocOP, DimShuffleOP):
# We need to keep the dimshuffle. It could swap axes or
# We need to keep the dimshuffle. It could swap axes or
# add dimensions anywhere.
# add dimensions anywhere.
new_i
.
append
(
i
.
owner
.
op
(
alloc_input
))
r_i
=
i
.
owner
.
op
(
alloc_input
)
# Copy stack trace from i to new_i
copy_stack_trace
(
i
,
r_i
)
new_i
.
append
(
r_i
)
else
:
else
:
new_i
.
append
(
i
)
new_i
.
append
(
i
)
new_i
[
assert_op_idx
]
=
assert_op
new_i
[
assert_op_idx
]
=
assert_op
return
node
.
op
(
*
new_i
,
return_list
=
True
)
ret
=
node
.
op
(
*
new_i
,
return_list
=
True
)
# Copy over stack trace from previous outputs to new outputs.
copy_stack_trace
(
node
.
outputs
,
ret
)
return
ret
return
local_elemwise_alloc
return
local_elemwise_alloc
...
@@ -1857,7 +1969,7 @@ theano.configparser.AddConfigVar(
...
@@ -1857,7 +1969,7 @@ theano.configparser.AddConfigVar(
theano
.
configparser
.
BoolParam
(
True
),
theano
.
configparser
.
BoolParam
(
True
),
in_c_key
=
False
)
in_c_key
=
False
)
#######################
#####
#######################
# Constant Canonicalization
# Constant Canonicalization
############################
############################
...
@@ -4974,7 +5086,11 @@ def constant_folding(node):
...
@@ -4974,7 +5086,11 @@ def constant_folding(node):
constant
=
output
.
type
.
Constant
constant
=
output
.
type
.
Constant
except
AttributeError
:
except
AttributeError
:
constant
=
Constant
constant
=
Constant
rval
.
append
(
constant
(
output
.
type
,
storage_map
[
output
][
0
]))
v
=
constant
(
output
.
type
,
storage_map
[
output
][
0
])
copy_stack_trace
(
output
,
v
)
rval
.
append
(
v
)
return
rval
return
rval
...
@@ -5854,7 +5970,10 @@ def local_add_mul_fusion(node):
...
@@ -5854,7 +5970,10 @@ def local_add_mul_fusion(node):
isinstance
(
inp
.
owner
.
op
.
scalar_op
,
s_op
)):
isinstance
(
inp
.
owner
.
op
.
scalar_op
,
s_op
)):
l
=
list
(
node
.
inputs
)
l
=
list
(
node
.
inputs
)
l
.
remove
(
inp
)
l
.
remove
(
inp
)
return
[
node
.
op
(
*
(
l
+
inp
.
owner
.
inputs
))]
output_node
=
node
.
op
(
*
(
l
+
inp
.
owner
.
inputs
))
copy_stack_trace
(
node
.
outputs
[
0
],
output_node
)
return
[
output_node
]
if
config
.
tensor
.
local_elemwise_fusion
:
if
config
.
tensor
.
local_elemwise_fusion
:
_logger
.
debug
(
"enabling optimization fusion elemwise in fast_run"
)
_logger
.
debug
(
"enabling optimization fusion elemwise in fast_run"
)
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论