Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
ce1eeab9
提交
ce1eeab9
authored
4月 09, 2014
作者:
abergeron
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1797 from nouiz/fast_opt
Make the slow scan test fast!
上级
4a77221b
be6c8bc0
隐藏空白字符变更
内嵌
并排
正在显示
4 个修改的文件
包含
235 行增加
和
219 行删除
+235
-219
function_module.py
theano/compile/function_module.py
+2
-0
basic.py
theano/tensor/basic.py
+167
-160
opt.py
theano/tensor/opt.py
+9
-5
test_opt.py
theano/tensor/tests/test_opt.py
+57
-54
没有找到文件。
theano/compile/function_module.py
浏览文件 @
ce1eeab9
...
...
@@ -1077,6 +1077,7 @@ class FunctionMaker(object):
self
.
mode
=
mode
self
.
accept_inplace
=
accept_inplace
self
.
function_builder
=
function_builder
self
.
on_unused_input
=
on_unused_input
# Used only for the pickling
self
.
required
=
[(
i
.
value
is
None
)
for
i
in
self
.
inputs
]
self
.
refeed
=
[
...
...
@@ -1215,6 +1216,7 @@ def _pickle_FunctionMaker(self):
accept_inplace
=
self
.
accept_inplace
,
function_builder
=
self
.
function_builder
,
profile
=
self
.
profile
,
on_unused_input
=
self
.
on_unused_input
,
)
return
(
_constructor_FunctionMaker
,
(
kwargs
,))
...
...
theano/tensor/basic.py
浏览文件 @
ce1eeab9
...
...
@@ -508,13 +508,31 @@ class EmptyConstantError(NotScalarConstantError):
"""
def
numpy_scalar
(
data
):
""" Return a scalar stored in a numpy ndarray, or raise
NotScalarConstantError if the numpy ndarray is not a scalar
"""
# handle case where data is numpy.array([])
if
data
.
ndim
>
0
and
(
len
(
data
.
shape
)
==
0
or
__builtins__
[
'max'
](
data
.
shape
)
==
0
):
assert
numpy
.
all
(
numpy
.
array
([])
==
data
)
raise
EmptyConstantError
()
try
:
numpy
.
complex
(
data
)
# works for all numeric scalars
return
data
except
Exception
:
raise
NotScalarConstantError
(
'v.data is non-numeric, non-scalar, or has more than one'
' unique value'
,
data
)
get_scalar_constant_value_elemwises
=
(
scal
.
Cast
,
scal
.
Switch
,
scal
.
NEQ
,
scal
.
EQ
,
scal
.
LT
,
scal
.
GT
,
scal
.
LE
,
scal
.
GE
,
scal
.
Sub
,
scal
.
Add
,
scal
.
Mod
,
scal
.
Mul
,
scal
.
IntDiv
,
scal
.
TrueDiv
)
def
get_scalar_constant_value
(
v
):
def
get_scalar_constant_value
(
orig_v
,
elemwise
=
True
):
"""return the constant scalar(0-D) value underlying variable `v`
If v is the output of dimshuffles, fills, allocs, rebroadcasts, cast
...
...
@@ -523,169 +541,158 @@ def get_scalar_constant_value(v):
If `v` is not some view of constant scalar data, then raise a
NotScalarConstantError.
:param elemwise: If False, we won't try to go into elemwise.
So this call is faster.
:note: There may be another function similar to this one in the
code, but I'm not sure where it is.
"""
v
=
orig_v
while
True
:
if
v
is
None
:
# None is not a scalar (and many uses of this function seem to depend
# on passing it None)
raise
NotScalarConstantError
()
if
v
is
None
:
# None is not a scalar (and many uses of this function seem to depend
# on passing it None)
raise
NotScalarConstantError
()
if
isinstance
(
v
,
(
numpy
.
integer
,
int
,
float
)):
return
numpy
.
asarray
(
v
)
def
numpy_scalar
(
data
):
""" Return a scalar stored in a numpy ndarray, or raise
NotScalarConstantError if the numpy ndarray is not a scalar
"""
# handle case where data is numpy.array([])
if
data
.
ndim
>
0
and
(
len
(
data
.
shape
)
==
0
or
__builtins__
[
'max'
](
data
.
shape
)
==
0
):
assert
numpy
.
all
(
numpy
.
array
([])
==
data
)
raise
EmptyConstantError
()
try
:
numpy
.
complex
(
data
)
# works for all numeric scalars
return
data
except
Exception
:
raise
NotScalarConstantError
(
'v.data is non-numeric, non-scalar, or has more than one'
' unique value'
,
data
)
if
isinstance
(
v
,
(
numpy
.
integer
,
int
,
float
)):
return
numpy
.
asarray
(
v
)
if
isinstance
(
v
,
numpy
.
ndarray
):
return
numpy_scalar
(
v
)
if
isinstance
(
v
,
numpy
.
ndarray
):
return
numpy_scalar
(
v
)
if
isinstance
(
v
,
Constant
):
if
getattr
(
v
.
tag
,
'unique_value'
,
None
)
is
not
None
:
data
=
v
.
tag
.
unique_value
else
:
data
=
v
.
data
return
numpy_scalar
(
data
)
if
getattr
(
v
,
'owner'
,
None
):
if
isinstance
(
v
.
owner
.
op
,
(
Alloc
,
DimShuffle
,
Rebroadcast
,
compile
.
ops
.
OutputGuard
,
compile
.
DeepCopyOp
)):
return
get_scalar_constant_value
(
v
.
owner
.
inputs
[
0
])
elif
(
isinstance
(
v
.
owner
.
op
,
theano
.
compile
.
ops
.
Shape_i
)
and
isinstance
(
v
.
owner
.
inputs
[
0
],
Constant
)):
return
v
.
owner
.
inputs
[
0
]
.
data
.
shape
[
v
.
owner
.
op
.
i
]
# Don't act as the constant_folding optimization here as this
# fct is used too early in the optimization phase. This would
# mess with the stabilization optimization and be too slow.
# We put all the scalar Ops used by get_canonical_form_slice()
# to allow it to determine the broadcast pattern correctly.
elif
isinstance
(
v
.
owner
.
op
,
scal
.
ScalarOp
):
if
isinstance
(
v
.
owner
.
op
,
scal
.
Second
):
# We don't need both input to be constant for second
shape
,
val
=
v
.
owner
.
inputs
return
get_scalar_constant_value
(
val
)
if
isinstance
(
v
.
owner
.
op
,
get_scalar_constant_value_elemwises
):
const
=
[
get_scalar_constant_value
(
i
)
for
i
in
v
.
owner
.
inputs
]
ret
=
[[
None
]]
v
.
owner
.
op
.
perform
(
v
.
owner
,
const
,
ret
)
return
ret
[
0
][
0
]
elif
isinstance
(
v
.
owner
.
op
,
Elemwise
):
if
isinstance
(
v
.
owner
.
op
.
scalar_op
,
scal
.
Second
):
# We don't need both input to be constant for second
shape
,
val
=
v
.
owner
.
inputs
return
get_scalar_constant_value
(
val
)
elif
isinstance
(
v
.
owner
.
op
.
scalar_op
,
get_scalar_constant_value_elemwises
):
const
=
[
get_scalar_constant_value
(
i
)
for
i
in
v
.
owner
.
inputs
]
ret
=
[[
None
]]
v
.
owner
.
op
.
perform
(
v
.
owner
,
const
,
ret
)
return
ret
[
0
][
0
]
elif
isinstance
(
v
.
owner
.
op
,
theano
.
tensor
.
subtensor
.
Subtensor
)
and
v
.
ndim
==
0
:
if
isinstance
(
v
.
owner
.
inputs
[
0
],
TensorConstant
):
cdata
=
tuple
(
v
.
owner
.
op
.
get_constant_idx
(
v
.
owner
.
inputs
))
try
:
return
v
.
owner
.
inputs
[
0
]
.
data
.
__getitem__
(
cdata
)
except
IndexError
:
raise
IndexError
(
str
(
tuple
(
v
.
owner
.
op
.
idx_list
))
+
" is not a valid index into "
+
str
(
v
.
owner
.
inputs
[
0
]
.
data
))
# The index list 'idx_list' should have length the same
# shape as the input.
# TODO: implement the case where we take a scalar in a matrix
assert
len
(
v
.
owner
.
op
.
idx_list
)
==
v
.
owner
.
inputs
[
0
]
.
ndim
# Needed to make better graph in this test in theano/tensor/tests:
# test_sharedvar.py:test_shared_options.test_specify_shape_partial
if
(
v
.
owner
.
inputs
[
0
]
.
owner
and
isinstance
(
v
.
owner
.
inputs
[
0
]
.
owner
.
op
,
Join
)
and
# Ensure the Join is joining only scalar variables (so that
# the constant value can be found at the same index as the one
# used in the sub-tensor).
python_all
(
var
.
ndim
==
0
for
var
in
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
)
and
len
(
v
.
owner
.
op
.
idx_list
)
==
1
):
idx
=
v
.
owner
.
op
.
idx_list
[
0
]
if
isinstance
(
idx
,
gof
.
Type
):
idx
=
get_scalar_constant_value
(
v
.
owner
.
inputs
[
1
])
# Note the '+ 1' is because the first argument to Join is the
# axis.
ret
=
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
[
idx
+
1
]
ret
=
get_scalar_constant_value
(
ret
)
# join can cast implicitly its input in some case.
return
theano
.
_asarray
(
ret
,
dtype
=
v
.
type
.
dtype
)
elif
(
v
.
owner
.
inputs
[
0
]
.
owner
and
isinstance
(
v
.
owner
.
inputs
[
0
]
.
owner
.
op
,
theano
.
tensor
.
opt
.
MakeVector
)
and
# MakeVector normally accept only scalar as input.
# We put this check in case there is change in the future
python_all
(
var
.
ndim
==
0
for
var
in
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
)
and
len
(
v
.
owner
.
op
.
idx_list
)
==
1
):
idx
=
v
.
owner
.
op
.
idx_list
[
0
]
if
isinstance
(
idx
,
gof
.
Type
):
idx
=
get_scalar_constant_value
(
v
.
owner
.
inputs
[
1
])
# Python 2.4 does not support indexing with numpy.integer
# So we cast it.
idx
=
int
(
idx
)
ret
=
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
[
idx
]
ret
=
get_scalar_constant_value
(
ret
)
# MakeVector can cast implicitly its input in some case.
return
theano
.
_asarray
(
ret
,
dtype
=
v
.
type
.
dtype
)
# This is needed when we take the grad as the Shape op
# are not already changed into MakeVector
owner
=
v
.
owner
leftmost_parent
=
owner
.
inputs
[
0
]
if
(
leftmost_parent
.
owner
and
isinstance
(
leftmost_parent
.
owner
.
op
,
theano
.
tensor
.
Shape
)):
op
=
owner
.
op
idx_list
=
op
.
idx_list
idx
=
idx_list
[
0
]
if
isinstance
(
idx
,
gof
.
Type
):
idx
=
get_scalar_constant_value
(
owner
.
inputs
[
1
])
grandparent
=
leftmost_parent
.
owner
.
inputs
[
0
]
gp_broadcastable
=
grandparent
.
type
.
broadcastable
ndim
=
grandparent
.
type
.
ndim
assert
ndim
==
len
(
gp_broadcastable
)
if
not
(
idx
<
len
(
gp_broadcastable
)):
msg
=
(
"get_scalar_constant_value detected "
+
"deterministic IndexError: x.shape[
%
d] "
+
"when x.ndim=
%
d."
)
%
(
ndim
,
idx
)
if
config
.
exception_verbosity
==
'high'
:
msg
+=
'x=
%
s'
%
min_informative_str
(
v
)
else
:
msg
+=
'x=
%
s'
%
str
(
v
)
raise
ValueError
(
msg
)
if
gp_broadcastable
[
idx
]:
return
numpy
.
asarray
(
1
)
raise
NotScalarConstantError
(
v
)
if
isinstance
(
v
,
Constant
):
if
getattr
(
v
.
tag
,
'unique_value'
,
None
)
is
not
None
:
data
=
v
.
tag
.
unique_value
else
:
data
=
v
.
data
return
numpy_scalar
(
data
)
if
getattr
(
v
,
'owner'
,
None
):
if
isinstance
(
v
.
owner
.
op
,
(
Alloc
,
DimShuffle
,
Rebroadcast
,
compile
.
ops
.
OutputGuard
,
compile
.
DeepCopyOp
)):
v
=
v
.
owner
.
inputs
[
0
]
continue
elif
isinstance
(
v
.
owner
.
op
,
theano
.
compile
.
ops
.
Shape_i
):
if
isinstance
(
v
.
owner
.
inputs
[
0
],
Constant
):
return
v
.
owner
.
inputs
[
0
]
.
data
.
shape
[
v
.
owner
.
op
.
i
]
# Don't act as the constant_folding optimization here as this
# fct is used too early in the optimization phase. This would
# mess with the stabilization optimization and be too slow.
# We put all the scalar Ops used by get_canonical_form_slice()
# to allow it to determine the broadcast pattern correctly.
elif
isinstance
(
v
.
owner
.
op
,
scal
.
ScalarOp
):
if
isinstance
(
v
.
owner
.
op
,
scal
.
Second
):
# We don't need both input to be constant for second
shape
,
val
=
v
.
owner
.
inputs
v
=
val
continue
if
isinstance
(
v
.
owner
.
op
,
get_scalar_constant_value_elemwises
):
const
=
[
get_scalar_constant_value
(
i
)
for
i
in
v
.
owner
.
inputs
]
ret
=
[[
None
]]
v
.
owner
.
op
.
perform
(
v
.
owner
,
const
,
ret
)
return
ret
[
0
][
0
]
elif
elemwise
and
isinstance
(
v
.
owner
.
op
,
Elemwise
):
if
isinstance
(
v
.
owner
.
op
.
scalar_op
,
scal
.
Second
):
# We don't need both input to be constant for second
shape
,
val
=
v
.
owner
.
inputs
v
=
val
continue
elif
isinstance
(
v
.
owner
.
op
.
scalar_op
,
get_scalar_constant_value_elemwises
):
const
=
[
get_scalar_constant_value
(
i
)
for
i
in
v
.
owner
.
inputs
]
ret
=
[[
None
]]
v
.
owner
.
op
.
perform
(
v
.
owner
,
const
,
ret
)
return
ret
[
0
][
0
]
elif
isinstance
(
v
.
owner
.
op
,
theano
.
tensor
.
subtensor
.
Subtensor
)
and
v
.
ndim
==
0
:
if
isinstance
(
v
.
owner
.
inputs
[
0
],
TensorConstant
):
cdata
=
tuple
(
v
.
owner
.
op
.
get_constant_idx
(
v
.
owner
.
inputs
))
try
:
return
v
.
owner
.
inputs
[
0
]
.
data
.
__getitem__
(
cdata
)
except
IndexError
:
raise
IndexError
(
str
(
tuple
(
v
.
owner
.
op
.
idx_list
))
+
" is not a valid index into "
+
str
(
v
.
owner
.
inputs
[
0
]
.
data
))
# The index list 'idx_list' should have length the same
# shape as the input.
# TODO: implement the case where we take a scalar in a matrix
assert
len
(
v
.
owner
.
op
.
idx_list
)
==
v
.
owner
.
inputs
[
0
]
.
ndim
# Needed to make better graph in this test in theano/tensor/tests:
# test_sharedvar.py:test_shared_options.test_specify_shape_partial
if
(
v
.
owner
.
inputs
[
0
]
.
owner
and
isinstance
(
v
.
owner
.
inputs
[
0
]
.
owner
.
op
,
Join
)
and
# Ensure the Join is joining only scalar variables (so that
# the constant value can be found at the same index as the one
# used in the sub-tensor).
python_all
(
var
.
ndim
==
0
for
var
in
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
)
and
len
(
v
.
owner
.
op
.
idx_list
)
==
1
):
idx
=
v
.
owner
.
op
.
idx_list
[
0
]
if
isinstance
(
idx
,
gof
.
Type
):
idx
=
get_scalar_constant_value
(
v
.
owner
.
inputs
[
1
])
# Note the '+ 1' is because the first argument to Join is the
# axis.
ret
=
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
[
idx
+
1
]
ret
=
get_scalar_constant_value
(
ret
)
# join can cast implicitly its input in some case.
return
theano
.
_asarray
(
ret
,
dtype
=
v
.
type
.
dtype
)
elif
(
v
.
owner
.
inputs
[
0
]
.
owner
and
isinstance
(
v
.
owner
.
inputs
[
0
]
.
owner
.
op
,
theano
.
tensor
.
opt
.
MakeVector
)
and
# MakeVector normally accept only scalar as input.
# We put this check in case there is change in the future
python_all
(
var
.
ndim
==
0
for
var
in
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
)
and
len
(
v
.
owner
.
op
.
idx_list
)
==
1
):
idx
=
v
.
owner
.
op
.
idx_list
[
0
]
if
isinstance
(
idx
,
gof
.
Type
):
idx
=
get_scalar_constant_value
(
v
.
owner
.
inputs
[
1
])
# Python 2.4 does not support indexing with numpy.integer
# So we cast it.
idx
=
int
(
idx
)
ret
=
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
[
idx
]
ret
=
get_scalar_constant_value
(
ret
)
# MakeVector can cast implicitly its input in some case.
return
theano
.
_asarray
(
ret
,
dtype
=
v
.
type
.
dtype
)
# This is needed when we take the grad as the Shape op
# are not already changed into MakeVector
owner
=
v
.
owner
leftmost_parent
=
owner
.
inputs
[
0
]
if
(
leftmost_parent
.
owner
and
isinstance
(
leftmost_parent
.
owner
.
op
,
theano
.
tensor
.
Shape
)):
op
=
owner
.
op
idx_list
=
op
.
idx_list
idx
=
idx_list
[
0
]
if
isinstance
(
idx
,
gof
.
Type
):
idx
=
get_scalar_constant_value
(
owner
.
inputs
[
1
])
grandparent
=
leftmost_parent
.
owner
.
inputs
[
0
]
gp_broadcastable
=
grandparent
.
type
.
broadcastable
ndim
=
grandparent
.
type
.
ndim
assert
ndim
==
len
(
gp_broadcastable
)
if
not
(
idx
<
len
(
gp_broadcastable
)):
msg
=
(
"get_scalar_constant_value detected "
+
"deterministic IndexError: x.shape[
%
d] "
+
"when x.ndim=
%
d."
)
%
(
ndim
,
idx
)
if
config
.
exception_verbosity
==
'high'
:
msg
+=
'x=
%
s'
%
min_informative_str
(
v
)
else
:
msg
+=
'x=
%
s'
%
str
(
v
)
raise
ValueError
(
msg
)
if
gp_broadcastable
[
idx
]:
return
numpy
.
asarray
(
1
)
raise
NotScalarConstantError
(
v
)
# Easy constructors
...
...
@@ -3075,7 +3082,7 @@ pprint.assign(pow, printing.OperatorPrinter('**', 1, 'right'))
##########################
def
extract_constant
(
x
):
def
extract_constant
(
x
,
elemwise
=
True
):
'''
This function is basically a call to tensor.get_scalar_constant_value. The
main difference is the behaviour in case of failure. While
...
...
@@ -3085,7 +3092,7 @@ def extract_constant(x):
ScalarVariable, we convert it to a tensor with tensor_from_scalar.
'''
try
:
x
=
get_scalar_constant_value
(
x
)
x
=
get_scalar_constant_value
(
x
,
elemwise
=
elemwise
)
except
NotScalarConstantError
:
pass
if
(
isinstance
(
x
,
scal
.
ScalarVariable
)
or
...
...
theano/tensor/opt.py
浏览文件 @
ce1eeab9
...
...
@@ -1581,7 +1581,7 @@ def local_upcast_elemwise_constant_inputs(node):
else
:
try
:
# works only for scalars
cval_i
=
get_scalar_constant_value
(
i
)
cval_i
=
get_scalar_constant_value
(
i
,
elemwise
=
False
)
if
all
(
i
.
broadcastable
):
new_inputs
.
append
(
T
.
shape_padleft
(
T
.
cast
(
cval_i
,
output_dtype
),
...
...
@@ -2327,7 +2327,7 @@ def local_remove_switch_const_cond(node):
"""
if
(
isinstance
(
node
.
op
,
T
.
Elemwise
)
and
isinstance
(
node
.
op
.
scalar_op
,
scalar
.
basic
.
Switch
)):
cond
=
T
.
extract_constant
(
node
.
inputs
[
0
])
cond
=
T
.
extract_constant
(
node
.
inputs
[
0
]
,
elemwise
=
False
)
if
type
(
cond
)
is
numpy
.
ndarray
and
cond
.
ndim
==
0
:
if
cond
==
0
:
out
=
node
.
inputs
[
2
]
...
...
@@ -2377,7 +2377,8 @@ def local_mul_switch_sink(node):
if
i
.
owner
and
i
.
owner
.
op
==
T
.
switch
:
switch
=
i
.
owner
try
:
if
get_scalar_constant_value
(
switch
.
inputs
[
1
])
==
0.
:
if
(
isinstance
(
switch
.
inputs
[
0
],
Constant
)
and
get_scalar_constant_value
(
switch
.
inputs
[
1
])
==
0.
):
listmul
=
node
.
inputs
[:
idx
]
+
node
.
inputs
[
idx
+
1
:]
fct
=
[
T
.
switch
(
switch
.
inputs
[
0
],
0
,
T
.
mul
(
*
(
listmul
+
[
switch
.
inputs
[
2
]])))]
...
...
@@ -2387,7 +2388,8 @@ def local_mul_switch_sink(node):
except
NotScalarConstantError
:
pass
try
:
if
get_scalar_constant_value
(
switch
.
inputs
[
2
])
==
0.
:
if
(
isinstance
(
switch
.
inputs
[
2
],
Constant
)
and
get_scalar_constant_value
(
switch
.
inputs
[
2
])
==
0.
):
listmul
=
node
.
inputs
[:
idx
]
+
node
.
inputs
[
idx
+
1
:]
fct
=
[
T
.
switch
(
switch
.
inputs
[
0
],
T
.
mul
(
*
(
listmul
+
[
switch
.
inputs
[
1
]])),
0
)]
...
...
@@ -3784,7 +3786,7 @@ def local_abs_merge(node):
for
i
in
node
.
inputs
:
if
i
.
owner
and
i
.
owner
.
op
==
T
.
abs_
:
inputs
.
append
(
i
.
owner
.
inputs
[
0
])
el
se
:
el
if
isinstance
(
i
,
Constant
)
:
try
:
const
=
get_scalar_constant_value
(
i
)
except
NotScalarConstantError
:
...
...
@@ -3792,6 +3794,8 @@ def local_abs_merge(node):
if
not
(
const
>=
0
)
.
all
():
return
False
inputs
.
append
(
i
)
else
:
return
False
return
[
T
.
abs_
(
T
.
mul
(
*
inputs
))]
if
node
.
op
==
T
.
true_div
and
sum
([
i
.
owner
.
op
==
T
.
abs_
for
i
in
node
.
inputs
if
i
.
owner
])
==
2
:
...
...
theano/tensor/tests/test_opt.py
浏览文件 @
ce1eeab9
...
...
@@ -1495,11 +1495,11 @@ def test_log1p():
f
=
function
([
x
],
T
.
log
(
1
+
(
x
)),
mode
=
m
)
assert
[
node
.
op
for
node
in
f
.
maker
.
fgraph
.
toposort
()]
==
[
T
.
log1p
]
f
=
function
([
x
],
T
.
log
(
1
+
(
-
x
)),
mode
=
m
)
assert
[
node
.
op
for
node
in
f
.
maker
.
fgraph
.
toposort
()]
==
[
T
.
neg
,
inplace
.
log1p_inplace
]
assert
[
node
.
op
for
node
in
f
.
maker
.
fgraph
.
toposort
()]
==
[
T
.
neg
,
inplace
.
log1p_inplace
]
f
=
function
([
x
],
-
T
.
log
(
1
+
(
-
x
)),
mode
=
m
)
assert
[
node
.
op
for
node
in
f
.
maker
.
fgraph
.
toposort
()]
==
[
T
.
neg
,
inplace
.
log1p_inplace
,
inplace
.
neg_inplace
]
assert
[
node
.
op
for
node
in
f
.
maker
.
fgraph
.
toposort
()]
==
[
T
.
neg
,
inplace
.
log1p_inplace
,
inplace
.
neg_inplace
]
# check trickier cases (and use different dtype)
y
=
fmatrix
()
...
...
@@ -1507,12 +1507,12 @@ def test_log1p():
print
f
.
maker
.
fgraph
.
toposort
()
# the first three ops are Shape_i, Shape_i, and Dimshuffle
theano
.
printing
.
debugprint
(
f
)
assert
[
node
.
op
for
node
in
f
.
maker
.
fgraph
.
toposort
()][
3
:]
\
==
[
T
.
log1p
,
tensor
.
alloc
]
assert
[
node
.
op
for
node
in
f
.
maker
.
fgraph
.
toposort
()][
3
:]
==
[
T
.
log1p
,
tensor
.
alloc
]
f
=
function
([
x
,
y
],
T
.
log
(
0
+
(
x
)
+
tensor
.
fill
(
y
,
1.0
)),
mode
=
m
)
theano
.
printing
.
debugprint
(
f
)
assert
[
node
.
op
for
node
in
f
.
maker
.
fgraph
.
toposort
()][
3
:]
\
==
[
T
.
log1p
,
tensor
.
alloc
]
assert
[
node
.
op
for
node
in
f
.
maker
.
fgraph
.
toposort
()][
3
:]
==
[
T
.
log1p
,
tensor
.
alloc
]
f
=
function
([
x
,
y
],
T
.
log
(
2
+
(
x
)
-
tensor
.
fill
(
y
,
1.0
)),
mode
=
m
)
theano
.
printing
.
debugprint
(
f
)
assert
[
node
.
op
for
node
in
f
.
maker
.
fgraph
.
toposort
()][
3
:]
\
...
...
@@ -1611,7 +1611,7 @@ def test_local_useless_subtensor():
prog
=
f
.
maker
.
fgraph
.
toposort
()
if
res
:
assert
isinstance
(
prog
[
0
]
.
op
,
theano
.
tensor
.
basic
.
SpecifyShape
),
dims
SpecifyShape
),
dims
assert
prog
[
1
]
.
op
==
tensor
.
exp
,
dims
assert
len
(
prog
)
==
2
,
dims
else
:
...
...
@@ -1628,7 +1628,7 @@ def test_local_useless_subtensor():
((
slice
(
0
,
x
.
shape
[
1
]),
slice
(
0
,
x
.
shape
[
1
]),
),
False
),
((
slice
(
0
,
x
.
shape
[
1
]),
2
),
False
),
((
slice
(
0
,
x
.
shape
[
1
]),
slice
(
x
.
shape
[
0
]
-
x
.
shape
[
0
],
x
.
shape
[
1
]),),
False
),
x
.
shape
[
1
]),),
False
),
((
slice
(
0
,
T
.
scalar_from_tensor
(
x
.
shape
[
0
])),
),
True
),
]):
f
=
function
([
x
],
tensor
.
exp
(
x
)
.
__getitem__
(
dims
),
mode
=
mode_opt
)
...
...
@@ -1710,7 +1710,7 @@ class test_local_subtensor_lift(unittest.TestCase):
assert
isinstance
(
prog
[
1
]
.
op
,
tensor
.
Subtensor
)
# first subtensor
assert
isinstance
(
prog
[
2
]
.
op
,
tensor
.
Subtensor
)
# first subtensor
assert
isinstance
(
prog
[
3
]
.
op
.
scalar_op
,
theano
.
scalar
.
Composite
)
# Composite{add,add}
Composite
)
# Composite{add,add}
assert
len
(
prog
)
==
4
f
([[
0
,
1
],
[
2
,
3
]],
4
,
[[
4
,
5
],
[
6
,
7
]])
# let debugmode test something
...
...
@@ -1727,7 +1727,7 @@ class test_local_subtensor_lift(unittest.TestCase):
assert
isinstance
(
prog
[
1
]
.
op
,
tensor
.
Subtensor
)
# first subtensor
assert
isinstance
(
prog
[
2
]
.
op
,
tensor
.
Subtensor
)
# first subtensor
assert
isinstance
(
prog
[
3
]
.
op
.
scalar_op
,
theano
.
scalar
.
Composite
)
# Composite{add,add}
Composite
)
# Composite{add,add}
assert
len
(
prog
)
==
4
f
([[
0
,
1
],
[
2
,
3
]],
4
,
[[
4
,
5
],
[
6
,
7
]])
# let debugmode test something
...
...
@@ -1767,12 +1767,12 @@ class test_local_subtensor_lift(unittest.TestCase):
x
=
tensor
.
matrix
(
'x'
)
y
=
tensor
.
vector
(
'y'
)
f
=
function
([
x
,
y
],
[
tensor
.
exp
(
x
+
y
)[
0
],
tensor
.
exp
(
x
+
y
)
+
x
],
mode
=
mode_opt
)
mode
=
mode_opt
)
prog
=
f
.
maker
.
fgraph
.
toposort
()
assert
isinstance
(
prog
[
0
]
.
op
,
tensor
.
DimShuffle
)
assert
isinstance
(
prog
[
1
]
.
op
.
scalar_op
,
theano
.
scalar
.
Composite
)
# Composite{add,exp}
Composite
)
# Composite{add,exp}
assert
prog
[
2
]
.
op
==
tensor
.
add
assert
isinstance
(
prog
[
3
]
.
op
,
tensor
.
Subtensor
)
# first subtensor
assert
len
(
prog
)
==
4
...
...
@@ -2039,10 +2039,10 @@ class test_local_subtensor_merge(unittest.TestCase):
# Some cases of merge: shape, (start, stop, step) of first,
# (start, stop, step) of second subtensor
cases
=
[
((
2
,
3
),
(
None
,
None
,
None
),
(
None
,
None
,
-
1
)),
((
12
,
1
),
(
None
,
None
,
-
4
),
(
None
,
None
,
1
)),
((
5
,
3
),
(
1
,
4
,
2
),
(
None
,
None
,
-
1
)),
]
((
2
,
3
),
(
None
,
None
,
None
),
(
None
,
None
,
-
1
)),
((
12
,
1
),
(
None
,
None
,
-
4
),
(
None
,
None
,
1
)),
((
5
,
3
),
(
1
,
4
,
2
),
(
None
,
None
,
-
1
)),
]
x
=
tensor
.
matrix
(
'x'
)
for
shape
,
sl1
,
sl2
in
cases
:
...
...
@@ -2063,13 +2063,13 @@ class test_local_subtensor_merge(unittest.TestCase):
e2
=
tensor
.
iscalar
(
'e2'
)
s2
=
tensor
.
iscalar
(
's2'
)
f
=
function
([
x
,
b1
,
e1
,
s1
,
b2
,
e2
,
s2
],
x
[
b1
:
e1
:
s1
][
b2
:
e2
:
s2
],
mode
=
mode_opt
)
mode
=
mode_opt
)
#theano.printing.debugprint(f, print_type=True)
topo
=
f
.
maker
.
fgraph
.
toposort
()
#print [t for t in topo if isinstance(t.op, tensor.Subtensor)]
assert
len
([
t
for
t
in
topo
if
isinstance
(
t
.
op
,
tensor
.
Subtensor
)])
==
1
Subtensor
)])
==
1
#print topo[-1].op
assert
isinstance
(
topo
[
-
1
]
.
op
,
DeepCopyOp
)
...
...
@@ -2079,9 +2079,9 @@ class test_local_subtensor_merge(unittest.TestCase):
e2r
=
self
.
rng
.
permutation
(
range
(
-
8
,
8
))[:
2
]
s1r
=
self
.
rng
.
permutation
([
-
7
,
-
6
,
-
5
,
-
4
,
-
3
,
-
2
,
-
1
,
1
,
2
,
3
,
4
,
5
,
6
,
7
])[:
2
]
2
,
3
,
4
,
5
,
6
,
7
])[:
2
]
s2r
=
self
.
rng
.
permutation
([
-
7
,
-
6
,
-
5
,
-
4
,
-
3
,
-
2
,
-
1
,
1
,
2
,
3
,
4
,
5
,
6
,
7
])[:
2
]
2
,
3
,
4
,
5
,
6
,
7
])[:
2
]
for
x_s
in
self
.
x_shapes
:
x_val
=
self
.
rng
.
uniform
(
size
=
x_s
)
.
astype
(
config
.
floatX
)
...
...
@@ -2152,7 +2152,7 @@ class test_local_subtensor_merge(unittest.TestCase):
topo
=
f
.
maker
.
fgraph
.
toposort
()
#print [t for t in topo if isinstance(t.op, tensor.Subtensor)]
assert
len
([
t
for
t
in
topo
if
isinstance
(
t
.
op
,
tensor
.
Subtensor
)])
==
1
Subtensor
)])
==
1
#print topo[-1].op
assert
isinstance
(
topo
[
-
1
]
.
op
,
DeepCopyOp
)
...
...
@@ -2178,7 +2178,7 @@ class test_local_subtensor_merge(unittest.TestCase):
except
IndexError
:
n_index_err
+=
1
self
.
assertRaises
(
IndexError
,
f
,
x_val
,
b_v
,
e_v
,
s_v
,
i_v
)
f
,
x_val
,
b_v
,
e_v
,
s_v
,
i_v
)
else
:
# Executed if the "try" clause did not raise
# any exception
...
...
@@ -2241,7 +2241,7 @@ class test_local_subtensor_merge(unittest.TestCase):
topo
=
f
.
maker
.
fgraph
.
toposort
()
#print [t for t in topo if isinstance(t.op, tensor.Subtensor)]
assert
len
([
t
for
t
in
topo
if
isinstance
(
t
.
op
,
tensor
.
Subtensor
)])
<=
1
tensor
.
Subtensor
)])
<=
1
assert
isinstance
(
topo
[
-
1
]
.
op
,
DeepCopyOp
)
for
x_s
in
self
.
x_shapes
:
...
...
@@ -2298,7 +2298,7 @@ class test_local_subtensor_merge(unittest.TestCase):
topo
=
f
.
maker
.
fgraph
.
toposort
()
#print [t for t in topo if isinstance(t.op, tensor.Subtensor)]
assert
len
([
t
for
t
in
topo
if
isinstance
(
t
.
op
,
tensor
.
Subtensor
)])
<=
1
tensor
.
Subtensor
)])
<=
1
assert
isinstance
(
topo
[
-
1
]
.
op
,
DeepCopyOp
)
for
x_s
in
self
.
x_shapes
:
...
...
@@ -2333,7 +2333,8 @@ class Test_alloc_zero(unittest.TestCase):
def
setUp
(
self
):
mode
=
theano
.
compile
.
mode
.
get_default_mode
()
self
.
mode
=
mode
.
including
(
"local_incsubtensor_of_allocs"
,
"local_setsubtensor_of_allocs"
,
"local_0_dot_x"
)
"local_setsubtensor_of_allocs"
,
"local_0_dot_x"
)
def
test_setsubtensor_allocs0
(
self
):
x
=
tensor
.
matrix
()
...
...
@@ -2343,32 +2344,32 @@ class Test_alloc_zero(unittest.TestCase):
z
=
tensor
.
set_subtensor
(
x0
[:
4
],
y0
)
f
=
theano
.
function
([
x
,
y
],
z
,
mode
=
self
.
mode
)
assert
numpy
.
all
([
not
isinstance
(
x
.
op
,
tensor
.
IncSubtensor
)
for
x
in
f
.
maker
.
fgraph
.
toposort
()])
f
.
maker
.
fgraph
.
toposort
()])
def
test_setsubtensor_allocs1
(
self
):
y
=
tensor
.
matrix
()
x0
=
tensor
.
constant
(
numpy
.
asarray
(
numpy
.
zeros
((
4
,
4
)),
dtype
=
config
.
floatX
))
dtype
=
config
.
floatX
))
y0
=
tensor
.
zeros_like
(
y
)
z
=
tensor
.
set_subtensor
(
x0
[:
4
],
y0
)
f
=
theano
.
function
([
y
],
z
,
mode
=
self
.
mode
)
assert
numpy
.
all
([
not
isinstance
(
x
.
op
,
tensor
.
IncSubtensor
)
for
x
in
f
.
maker
.
fgraph
.
toposort
()])
f
.
maker
.
fgraph
.
toposort
()])
def
test_setsubtensor_allocs1t
(
self
):
y
=
tensor
.
matrix
()
x0
=
tensor
.
constant
(
numpy
.
asarray
(
numpy
.
zeros
((
4
,
4
)),
dtype
=
config
.
floatX
))
dtype
=
config
.
floatX
))
y0
=
tensor
.
zeros_like
(
y
)
z
=
tensor
.
set_subtensor
(
x0
[:
4
],
y0
.
T
)
f
=
theano
.
function
([
y
],
z
,
mode
=
mode_opt
)
assert
numpy
.
all
([
not
isinstance
(
x
.
op
,
tensor
.
IncSubtensor
)
for
x
in
f
.
maker
.
fgraph
.
toposort
()])
f
.
maker
.
fgraph
.
toposort
()])
def
test_setsubtensor_allocs2
(
self
):
x
=
tensor
.
matrix
()
y0
=
tensor
.
constant
(
numpy
.
asarray
(
numpy
.
zeros_like
((
4
,
4
)),
dtype
=
config
.
floatX
))
dtype
=
config
.
floatX
))
x0
=
tensor
.
zeros_like
(
x
)
z
=
tensor
.
set_subtensor
(
x0
[:
4
],
y0
)
f
=
theano
.
function
([
x
],
z
,
mode
=
self
.
mode
)
...
...
@@ -2382,7 +2383,7 @@ class Test_alloc_zero(unittest.TestCase):
z
=
tensor
.
inc_subtensor
(
x
[:
4
],
y0
)
f
=
theano
.
function
([
x
,
y
],
z
,
mode
=
self
.
mode
)
assert
numpy
.
all
([
not
isinstance
(
x
.
op
,
tensor
.
IncSubtensor
)
for
x
in
f
.
maker
.
fgraph
.
toposort
()])
f
.
maker
.
fgraph
.
toposort
()])
def
test_incsubtensor_allocs0t
(
self
):
x
=
tensor
.
matrix
()
...
...
@@ -2391,12 +2392,12 @@ class Test_alloc_zero(unittest.TestCase):
z
=
tensor
.
inc_subtensor
(
x
[:
4
],
y0
.
T
)
f
=
theano
.
function
([
x
,
y
],
z
,
mode
=
mode_opt
)
assert
numpy
.
all
([
not
isinstance
(
x
.
op
,
tensor
.
IncSubtensor
)
for
x
in
f
.
maker
.
fgraph
.
toposort
()])
f
.
maker
.
fgraph
.
toposort
()])
def
test_incsubtensor_allocs1
(
self
):
x
=
tensor
.
matrix
()
y0
=
tensor
.
constant
(
numpy
.
asarray
(
numpy
.
zeros_like
((
4
,
4
)),
dtype
=
config
.
floatX
))
dtype
=
config
.
floatX
))
z
=
tensor
.
inc_subtensor
(
x
[:
4
],
y0
)
f
=
theano
.
function
([
x
],
z
,
mode
=
self
.
mode
)
assert
numpy
.
all
([
not
isinstance
(
x
.
op
,
tensor
.
IncSubtensor
)
for
x
in
...
...
@@ -2427,7 +2428,7 @@ class Test_alloc_zero(unittest.TestCase):
f
(
_e1
[
1
],
_e2
[
1
])
f
(
_e1
[
2
],
_e2
[
2
])
assert
numpy
.
all
([
not
isinstance
(
x
.
op
,
tensor
.
Dot
)
for
x
in
f
.
maker
.
fgraph
.
toposort
()
])
f
.
maker
.
fgraph
.
toposort
()])
#test that we don't remove shape errors
self
.
assertRaises
((
ValueError
,
AssertionError
),
f
,
...
...
@@ -2475,7 +2476,7 @@ def test_local_subtensor_of_alloc():
(
slice
(
1
,
3
),
slice
(
None
,
-
1
)),
(
slice
(
None
,
None
,
2
)),
(
slice
(
1
,
None
,
2
)),
]
]
for
slices
in
slicess
:
z
=
yx
.
__getitem__
(
slices
)
f
=
theano
.
function
([
x
],
z
)
...
...
@@ -2809,8 +2810,8 @@ class test_assert(utt.InferShapeTester):
x
=
T
.
scalar
()
y
=
T
.
scalar
()
f
=
theano
.
function
([
x
,
y
],
theano
.
tensor
.
opt
.
assert_
(
x
,
y
,
1
),
mode
=
mode
)
f
=
theano
.
function
([
x
,
y
],
theano
.
tensor
.
opt
.
assert_
(
x
,
y
,
1
),
mode
=
mode
)
assert
f
(
1
,
1
)
==
1
assert
f
(
5
,
1
)
==
5
topo
=
f
.
maker
.
fgraph
.
toposort
()
...
...
@@ -2827,8 +2828,8 @@ class test_assert(utt.InferShapeTester):
x
=
T
.
scalar
()
y
=
T
.
scalar
()
f
=
theano
.
function
([
x
,
y
],
theano
.
tensor
.
opt
.
assert_
(
x
,
y
,
0
),
mode
=
mode
)
f
=
theano
.
function
([
x
,
y
],
theano
.
tensor
.
opt
.
assert_
(
x
,
y
,
0
),
mode
=
mode
)
self
.
assertRaises
(
AssertionError
,
f
,
1
,
0
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
len
(
topo
)
==
2
...
...
@@ -2843,14 +2844,14 @@ class test_assert(utt.InferShapeTester):
bdscal_val
=
numpy
.
random
.
rand
()
+
1
out
=
theano
.
tensor
.
opt
.
assert_
(
adscal
,
bdscal
)
self
.
_compile_and_check
([
adscal
,
bdscal
],
[
out
],
[
adscal_val
,
bdscal_val
],
Assert
)
[
adscal_val
,
bdscal_val
],
Assert
)
admat
=
dmatrix
()
admat_val
=
numpy
.
random
.
rand
(
3
,
4
)
adscal_val
+=
1
out
=
theano
.
tensor
.
opt
.
assert_
(
admat
,
adscal
,
bdscal
)
self
.
_compile_and_check
([
admat
,
adscal
,
bdscal
],
[
out
],
[
admat_val
,
adscal_val
,
bdscal_val
],
Assert
)
[
admat_val
,
adscal_val
,
bdscal_val
],
Assert
)
def
test_local_mul_specialize
():
...
...
@@ -3177,8 +3178,9 @@ def test_constant_get_stabilized():
class
T_local_switch_sink
(
unittest
.
TestCase
):
def
setUp
(
self
):
# condition values
self
.
condm
=
numpy
.
asarray
([[
0.1
,
0
,
1
,
-
1
],
[
0.
,
0.
,
0.
,
0.
],
[
1
,
1
,
1
,
1
]])
self
.
condm
=
numpy
.
asarray
([[
0.1
,
0
,
1
,
-
1
],
[
0.
,
0.
,
0.
,
0.
],
[
1
,
1
,
1
,
1
]])
self
.
condv
=
numpy
.
asarray
([
0.1
,
0
,
1
,
-
1
])
self
.
conds
=
[
0.1
,
0
,
1
,
-
1
]
...
...
@@ -3256,14 +3258,14 @@ class T_local_erf(unittest.TestCase):
f
=
theano
.
function
([
x
],
1
+
T
.
erf
(
x
),
mode
=
self
.
mode
)
print
f
.
maker
.
fgraph
.
toposort
()
assert
[
n
.
op
for
n
in
f
.
maker
.
fgraph
.
toposort
()]
==
[
T
.
mul
,
T
.
erfc
],
f
.
maker
.
fgraph
.
toposort
()
assert
[
n
.
op
for
n
in
f
.
maker
.
fgraph
.
toposort
()]
==
[
T
.
mul
,
T
.
erfc
],
f
.
maker
.
fgraph
.
toposort
()
f
(
val
)
f
=
theano
.
function
([
x
],
T
.
erf
(
x
)
+
1
,
mode
=
self
.
mode
)
print
f
.
maker
.
fgraph
.
toposort
()
assert
[
n
.
op
for
n
in
f
.
maker
.
fgraph
.
toposort
()]
==
[
T
.
mul
,
T
.
erfc
],
f
.
maker
.
fgraph
.
toposort
()
assert
[
n
.
op
for
n
in
f
.
maker
.
fgraph
.
toposort
()]
==
[
T
.
mul
,
T
.
erfc
],
f
.
maker
.
fgraph
.
toposort
()
f
(
val
)
f
=
theano
.
function
([
x
],
T
.
erf
(
x
)
+
2
,
mode
=
self
.
mode
)
...
...
@@ -3277,7 +3279,7 @@ class T_local_erf(unittest.TestCase):
def
test_local_one_minus_erf
(
self
):
val
=
numpy
.
asarray
([
-
30
,
-
3
,
-
2
,
-
1
,
0
,
1
,
2
,
3
,
30
],
dtype
=
config
.
floatX
)
dtype
=
config
.
floatX
)
x
=
T
.
vector
()
f
=
theano
.
function
([
x
],
1
-
T
.
erf
(
x
),
mode
=
self
.
mode
)
...
...
@@ -3305,7 +3307,7 @@ class T_local_erf(unittest.TestCase):
assert
topo
[
0
]
.
op
==
T
.
erf
,
f
.
maker
.
fgraph
.
toposort
()
assert
isinstance
(
topo
[
1
]
.
op
,
T
.
Elemwise
),
f
.
maker
.
fgraph
.
toposort
()
assert
isinstance
(
topo
[
1
]
.
op
.
scalar_op
,
scal
.
Add
)
\
or
isinstance
(
topo
[
1
]
.
op
.
scalar_op
,
scal
.
Sub
),
f
.
maker
.
fgraph
.
toposort
()
or
isinstance
(
topo
[
1
]
.
op
.
scalar_op
,
scal
.
Sub
),
f
.
maker
.
fgraph
.
toposort
()
print
f
(
val
)
def
test_local_erf_minus_one
(
self
):
...
...
@@ -3342,10 +3344,11 @@ class T_local_erf(unittest.TestCase):
class
T_local_erfc
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
mode_fusion
=
theano
.
compile
.
mode
.
get_default_mode
()
.
including
(
'canonicalize'
)
.
including
(
'fast_run'
)
.
excluding
(
'gpu'
)
'canonicalize'
)
.
including
(
'fast_run'
)
.
excluding
(
'gpu'
)
self
.
mode
=
self
.
mode_fusion
.
excluding
(
'fusion'
)
self
.
mode
.
_optimizer
.
position_cutoff
=
1.50001
if
theano
.
config
.
cxx
==
''
and
not
theano
.
scalar
.
basic_scipy
.
imported_scipy_special
:
if
(
theano
.
config
.
cxx
==
''
and
not
theano
.
scalar
.
basic_scipy
.
imported_scipy_special
):
raise
SkipTest
(
"erfc need a c++ compiler or scipy"
)
def
test_local_one_minus_erfc
(
self
):
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论