Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
9ca0033f
提交
9ca0033f
authored
2月 29, 2012
作者:
nouiz
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #493 from lamblin/function_unused_inputs
Detect unused inputs to theano.function
上级
e152e17b
8401a3f8
隐藏空白字符变更
内嵌
并排
正在显示
20 个修改的文件
包含
201 行增加
和
88 行删除
+201
-88
NEWS.txt
NEWS.txt
+14
-4
builders.py
theano/compile/builders.py
+4
-1
debugmode.py
theano/compile/debugmode.py
+8
-1
function.py
theano/compile/function.py
+6
-1
function_module.py
theano/compile/function_module.py
+54
-4
module.py
theano/compile/module.py
+3
-3
pfunc.py
theano/compile/pfunc.py
+8
-2
test_function_module.py
theano/compile/tests/test_function_module.py
+36
-3
test_pfunc.py
theano/compile/tests/test_pfunc.py
+4
-10
gradient.py
theano/gradient.py
+3
-2
test_nnet.py
theano/sandbox/cuda/tests/test_nnet.py
+4
-4
test_opt.py
theano/sandbox/cuda/tests/test_opt.py
+4
-4
test_var.py
theano/sandbox/cuda/tests/test_var.py
+1
-1
scan.py
theano/scan_module/scan.py
+2
-1
scan_op.py
theano/scan_module/scan_op.py
+2
-1
test_scan.py
theano/scan_module/tests/test_scan.py
+3
-3
test_basic.py
theano/tensor/tests/test_basic.py
+18
-18
test_blas.py
theano/tensor/tests/test_blas.py
+8
-6
test_opt.py
theano/tensor/tests/test_opt.py
+14
-14
test_rop.py
theano/tests/test_rop.py
+5
-5
没有找到文件。
NEWS.txt
浏览文件 @
9ca0033f
...
@@ -2,10 +2,20 @@
...
@@ -2,10 +2,20 @@
Update in the Trunk since the last release:
Update in the Trunk since the last release:
* Added in the tutorial documentation on how to extend Theano.
Documentation
This explains how to make a Theano Op from a Python function.
* Added in the tutorial documentation on how to extend Theano.
http://deeplearning.net/software/theano/tutorial/extending_theano.html
This explains how to make a Theano Op from a Python function.
http://deeplearning.net/software/theano/tutorial/extending_theano.html
(Frédéric B.)
Interface change
* theano.function does not accept duplicate inputs, so function([x, x], ...)
does not work anymore. (Pascal L.)
* theano.function now raises an error if some of the provided inputs are
not part of the computational graph needed to compute the output, for
instance, function([x, y], [y]). You can use the kwarg
``on_unused_input={'raise', 'warn', 'ignore'}`` to control this.
(Pascal L.)
=============
=============
Release Notes
Release Notes
...
...
theano/compile/builders.py
浏览文件 @
9ca0033f
...
@@ -59,9 +59,12 @@ class OpFromGraph(gof.Op):
...
@@ -59,9 +59,12 @@ class OpFromGraph(gof.Op):
if
g
is
None
:
if
g
is
None
:
self
.
grad_ops
.
append
(
lambda
*
args
:
None
)
self
.
grad_ops
.
append
(
lambda
*
args
:
None
)
else
:
else
:
# It is normal if some inputs are not needed in order
# to compute the gradient, so we ignore them.
self
.
grad_ops
.
append
(
OpFromGraph
(
inputs
+
output_grads
,
self
.
grad_ops
.
append
(
OpFromGraph
(
inputs
+
output_grads
,
[
g
],
[
g
],
grad_depth
=
grad_depth
-
1
))
grad_depth
=
grad_depth
-
1
,
on_unused_input
=
'ignore'
))
def
__eq__
(
self
,
other
):
def
__eq__
(
self
,
other
):
#TODO: recognize a copy
#TODO: recognize a copy
return
self
is
other
return
self
is
other
...
...
theano/compile/debugmode.py
浏览文件 @
9ca0033f
...
@@ -1735,7 +1735,8 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
...
@@ -1735,7 +1735,8 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
def
__init__
(
self
,
inputs
,
outputs
,
optimizer
,
mode
,
def
__init__
(
self
,
inputs
,
outputs
,
optimizer
,
mode
,
accept_inplace
=
False
,
accept_inplace
=
False
,
function_builder
=
Function
,
function_builder
=
Function
,
profile
=
None
):
profile
=
None
,
on_unused_input
=
'raise'
):
"""
"""
:type inputs: a list of SymbolicInput instances
:type inputs: a list of SymbolicInput instances
...
@@ -1748,6 +1749,9 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
...
@@ -1748,6 +1749,9 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
inplace operations in the graph from the inputs to
inplace operations in the graph from the inputs to
the outputs
the outputs
:param on_unused_input: What to do if a variable in the 'inputs' list is
not used in the graph. Possible values are 'raise', 'warn', and 'ignore'.
:note: this function sets TensorType.filter_checks_isfinite
:note: this function sets TensorType.filter_checks_isfinite
when `mode.check_isfinite` is True
when `mode.check_isfinite` is True
...
@@ -1772,6 +1776,9 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
...
@@ -1772,6 +1776,9 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
[
i
.
update
for
i
in
inputs
[
i
.
update
for
i
in
inputs
if
getattr
(
i
,
'update'
,
False
)])
if
getattr
(
i
,
'update'
,
False
)])
# Check if some input variables are unused
self
.
_check_unused_inputs
(
inputs
,
outputs
,
on_unused_input
)
#TODO: REMOVE THIS CRUFT - it's complicated for SymbolicInputKits
#TODO: REMOVE THIS CRUFT - it's complicated for SymbolicInputKits
indices
=
[[
input
]
+
self
.
expand_in
(
input
,
_inputs
)
for
input
in
inputs
]
indices
=
[[
input
]
+
self
.
expand_in
(
input
,
_inputs
)
for
input
in
inputs
]
expanded_inputs
=
reduce
(
list
.
__add__
,
[
list
(
z
)
expanded_inputs
=
reduce
(
list
.
__add__
,
[
list
(
z
)
...
...
theano/compile/function.py
浏览文件 @
9ca0033f
...
@@ -13,7 +13,8 @@ from numpy import any #for to work in python 2.4
...
@@ -13,7 +13,8 @@ from numpy import any #for to work in python 2.4
def
function
(
inputs
,
outputs
=
None
,
mode
=
None
,
updates
=
[],
givens
=
[],
def
function
(
inputs
,
outputs
=
None
,
mode
=
None
,
updates
=
[],
givens
=
[],
no_default_updates
=
False
,
accept_inplace
=
False
,
name
=
None
,
no_default_updates
=
False
,
accept_inplace
=
False
,
name
=
None
,
rebuild_strict
=
True
,
allow_input_downcast
=
None
,
profile
=
None
):
rebuild_strict
=
True
,
allow_input_downcast
=
None
,
profile
=
None
,
on_unused_input
=
'raise'
):
"""
"""
Return a callable object that will calculate `outputs` from `inputs`.
Return a callable object that will calculate `outputs` from `inputs`.
...
@@ -68,6 +69,9 @@ def function(inputs, outputs=None, mode=None, updates=[], givens=[],
...
@@ -68,6 +69,9 @@ def function(inputs, outputs=None, mode=None, updates=[], givens=[],
instance. If argument is `True` then a new ProfileStats instance will be
instance. If argument is `True` then a new ProfileStats instance will be
used. This profiling object will be available via self.profile.
used. This profiling object will be available via self.profile.
:param on_unused_input: What to do if a variable in the 'inputs' list is
not used in the graph. Possible values are 'raise', 'warn', and 'ignore'.
:note: Regarding givens: Be careful to make sure that these substitutions are
:note: Regarding givens: Be careful to make sure that these substitutions are
independent--behaviour when Var1 of one pair appears in the graph leading to Var2 in
independent--behaviour when Var1 of one pair appears in the graph leading to Var2 in
another expression is undefined. Replacements specified with givens are different from
another expression is undefined. Replacements specified with givens are different from
...
@@ -111,6 +115,7 @@ def function(inputs, outputs=None, mode=None, updates=[], givens=[],
...
@@ -111,6 +115,7 @@ def function(inputs, outputs=None, mode=None, updates=[], givens=[],
accept_inplace
=
accept_inplace
,
name
=
name
,
accept_inplace
=
accept_inplace
,
name
=
name
,
rebuild_strict
=
rebuild_strict
,
rebuild_strict
=
rebuild_strict
,
allow_input_downcast
=
allow_input_downcast
,
allow_input_downcast
=
allow_input_downcast
,
on_unused_input
=
on_unused_input
,
profile
=
profile
)
profile
=
profile
)
# We need to add the flag check_aliased inputs if we have any mutable or
# We need to add the flag check_aliased inputs if we have any mutable or
# borrowed used defined inputs
# borrowed used defined inputs
...
...
theano/compile/function_module.py
浏览文件 @
9ca0033f
...
@@ -958,7 +958,7 @@ class FunctionMaker(object):
...
@@ -958,7 +958,7 @@ class FunctionMaker(object):
def
__init__
(
self
,
inputs
,
outputs
,
def
__init__
(
self
,
inputs
,
outputs
,
mode
=
None
,
accept_inplace
=
False
,
function_builder
=
Function
,
mode
=
None
,
accept_inplace
=
False
,
function_builder
=
Function
,
profile
=
None
):
profile
=
None
,
on_unused_input
=
'raise'
):
"""
"""
:type inputs: a list of SymbolicInput instances
:type inputs: a list of SymbolicInput instances
...
@@ -972,6 +972,12 @@ class FunctionMaker(object):
...
@@ -972,6 +972,12 @@ class FunctionMaker(object):
:param accept_inplace: True iff it is acceptable to have inplace operations
:param accept_inplace: True iff it is acceptable to have inplace operations
in the graph from the inputs to the outputs
in the graph from the inputs to the outputs
:param on_unused_input: What to do if a variable in the 'inputs' list
is not used in the graph. Possible values are:
- 'raise' (default): raise an error
- 'warn': log a warning
- 'ignore': do not do anything
"""
"""
mode
=
mode_module
.
get_mode
(
mode
)
mode
=
mode_module
.
get_mode
(
mode
)
...
@@ -1005,6 +1011,10 @@ class FunctionMaker(object):
...
@@ -1005,6 +1011,10 @@ class FunctionMaker(object):
_inputs
=
gof
.
graph
.
inputs
([
o
.
variable
for
o
in
outputs
]
+
[
i
.
update
_inputs
=
gof
.
graph
.
inputs
([
o
.
variable
for
o
in
outputs
]
+
[
i
.
update
for
i
in
inputs
if
getattr
(
i
,
'update'
,
False
)])
for
i
in
inputs
if
getattr
(
i
,
'update'
,
False
)])
# Check if some input variables are unused
self
.
_check_unused_inputs
(
inputs
,
outputs
,
on_unused_input
)
#TODO: REMOVE THIS CRUFT - it's complicated for SymbolicInputKits
#TODO: REMOVE THIS CRUFT - it's complicated for SymbolicInputKits
indices
=
[[
input
]
+
self
.
expand_in
(
input
,
_inputs
)
for
input
in
inputs
]
indices
=
[[
input
]
+
self
.
expand_in
(
input
,
_inputs
)
for
input
in
inputs
]
expanded_inputs
=
reduce
(
list
.
__add__
,
[
list
(
z
)
for
x
,
y
,
z
in
indices
],
[])
expanded_inputs
=
reduce
(
list
.
__add__
,
[
list
(
z
)
for
x
,
y
,
z
in
indices
],
[])
...
@@ -1072,6 +1082,41 @@ class FunctionMaker(object):
...
@@ -1072,6 +1082,41 @@ class FunctionMaker(object):
(
i
.
value
!=
None
and
not
isinstance
(
i
.
value
,
gof
.
Container
)
and
i
.
update
==
None
)
(
i
.
value
!=
None
and
not
isinstance
(
i
.
value
,
gof
.
Container
)
and
i
.
update
==
None
)
for
i
in
self
.
inputs
]
for
i
in
self
.
inputs
]
def
_check_unused_inputs
(
self
,
inputs
,
outputs
,
on_unused_input
):
if
on_unused_input
==
'ignore'
:
return
# There should be two categories of variables in inputs:
# - variables that have to be provided (used_inputs)
# - shared variables that will be updated
used_inputs
=
gof
.
graph
.
ancestors
(
([
o
.
variable
for
o
in
outputs
]
+
[
i
.
update
for
i
in
inputs
if
getattr
(
i
,
'update'
,
False
)]),
blockers
=
[
i
.
variable
for
i
in
inputs
])
msg
=
(
"theano.function was asked to create a function computing "
"outputs given certain inputs, but one of the provided "
"input variables is not part of the computational graph "
"needed to compute the outputs:
%
s.
\n
%
s"
)
warn_msg
=
(
"To make this warning into an error, you can pass the "
"parameter on_unused_input='raise' to theano.function. "
"To disable it completely, use on_unused_input='ignore'."
)
err_msg
=
(
"To make this error into a warning, you can pass the "
"parameter on_unused_input='warn' to theano.function. "
"To disable it completely, use on_unused_input='ignore'."
)
for
i
in
inputs
:
if
((
i
.
variable
not
in
used_inputs
)
and
(
i
.
update
is
None
)):
if
on_unused_input
==
'warn'
:
warnings
.
warn
(
msg
%
(
i
.
variable
,
warn_msg
),
stacklevel
=
5
)
elif
on_unused_input
==
'raise'
:
raise
ValueError
(
msg
%
(
i
.
variable
,
err_msg
))
else
:
raise
ValueError
((
"Invalid value for keyword "
"on_unused_input of theano.function: '
%
s'. "
"valid values are 'raise', 'warn', and 'ignore'."
%
on_unused_input
))
def
create
(
self
,
input_storage
=
None
,
trustme
=
False
):
def
create
(
self
,
input_storage
=
None
,
trustme
=
False
):
"""
"""
Create a function.
Create a function.
...
@@ -1202,7 +1247,8 @@ def check_equal(x, y):
...
@@ -1202,7 +1247,8 @@ def check_equal(x, y):
def
register_checker
(
checker
):
def
register_checker
(
checker
):
__checkers
.
insert
(
0
,
checker
)
__checkers
.
insert
(
0
,
checker
)
def
orig_function
(
inputs
,
outputs
,
mode
=
None
,
accept_inplace
=
False
,
name
=
None
,
profile
=
None
):
def
orig_function
(
inputs
,
outputs
,
mode
=
None
,
accept_inplace
=
False
,
name
=
None
,
profile
=
None
,
on_unused_input
=
'raise'
):
"""
"""
Return a Function that will calculate the outputs from the inputs.
Return a Function that will calculate the outputs from the inputs.
...
@@ -1232,6 +1278,8 @@ def orig_function(inputs, outputs, mode=None, accept_inplace = False, name=None,
...
@@ -1232,6 +1278,8 @@ def orig_function(inputs, outputs, mode=None, accept_inplace = False, name=None,
:param profile: None or ProfileStats instance
:param profile: None or ProfileStats instance
:param on_unused_input: What to do if a variable in the 'inputs' list is
not used in the graph. Possible values are 'raise', 'warn', and 'ignore'.
"""
"""
#Every element of the input list will be upgraded to an `In` instance if necessary,
#Every element of the input list will be upgraded to an `In` instance if necessary,
...
@@ -1262,7 +1310,8 @@ def orig_function(inputs, outputs, mode=None, accept_inplace = False, name=None,
...
@@ -1262,7 +1310,8 @@ def orig_function(inputs, outputs, mode=None, accept_inplace = False, name=None,
outputs
,
outputs
,
mode
[
0
],
mode
[
0
],
accept_inplace
=
accept_inplace
,
accept_inplace
=
accept_inplace
,
profile
=
profile
)
.
create
(
profile
=
profile
,
on_unused_input
=
on_unused_input
)
.
create
(
defaults
)
defaults
)
else
:
else
:
if
profile
:
if
profile
:
...
@@ -1292,7 +1341,8 @@ def orig_function(inputs, outputs, mode=None, accept_inplace = False, name=None,
...
@@ -1292,7 +1341,8 @@ def orig_function(inputs, outputs, mode=None, accept_inplace = False, name=None,
outputs
,
outputs
,
mode
,
mode
,
accept_inplace
=
accept_inplace
,
accept_inplace
=
accept_inplace
,
profile
=
profile
)
.
create
(
profile
=
profile
,
on_unused_input
=
on_unused_input
)
.
create
(
defaults
)
defaults
)
t2
=
time
.
time
()
t2
=
time
.
time
()
...
...
theano/compile/module.py
浏览文件 @
9ca0033f
...
@@ -473,9 +473,9 @@ class Method(Component):
...
@@ -473,9 +473,9 @@ class Method(Component):
else
:
else
:
effective_mode
=
self
.
mode
effective_mode
=
self
.
mode
#
backport
#
We ignore unused inputs, since all the inputs are passed
#effective_mode = mode if self.mode is None else self.mode
rval
=
F
.
orig_function
(
inputs
,
outputs
,
effective_mode
,
rval
=
F
.
orig_function
(
inputs
,
outputs
,
effective_mode
)
on_unused_input
=
'ignore'
)
memo
[
self
]
=
rval
memo
[
self
]
=
rval
return
rval
return
rval
...
...
theano/compile/pfunc.py
浏览文件 @
9ca0033f
...
@@ -324,7 +324,7 @@ class Param(object):
...
@@ -324,7 +324,7 @@ class Param(object):
def
pfunc
(
params
,
outputs
=
None
,
mode
=
None
,
updates
=
[],
givens
=
[],
def
pfunc
(
params
,
outputs
=
None
,
mode
=
None
,
updates
=
[],
givens
=
[],
no_default_updates
=
False
,
accept_inplace
=
False
,
name
=
None
,
no_default_updates
=
False
,
accept_inplace
=
False
,
name
=
None
,
rebuild_strict
=
True
,
allow_input_downcast
=
None
,
rebuild_strict
=
True
,
allow_input_downcast
=
None
,
profile
=
None
):
profile
=
None
,
on_unused_input
=
'raise'
):
"""Function-constructor for graphs with shared variables.
"""Function-constructor for graphs with shared variables.
:type params: list of either Variable or Param instances.
:type params: list of either Variable or Param instances.
...
@@ -372,6 +372,11 @@ def pfunc(params, outputs=None, mode=None, updates=[], givens=[],
...
@@ -372,6 +372,11 @@ def pfunc(params, outputs=None, mode=None, updates=[], givens=[],
with that string as its `message` attribute. This profiling object will be
with that string as its `message` attribute. This profiling object will be
available via self.profile.
available via self.profile.
:type on_unused_input: str
:param on_unused_input: What to do if a variable in the 'inputs' list
is not used in the graph. Possible values are 'raise', 'warn', and
'ignore.
:rtype: theano.compile.Function
:rtype: theano.compile.Function
:returns: a callable object that will compute the outputs (given the inputs)
:returns: a callable object that will compute the outputs (given the inputs)
...
@@ -460,7 +465,8 @@ def pfunc(params, outputs=None, mode=None, updates=[], givens=[],
...
@@ -460,7 +465,8 @@ def pfunc(params, outputs=None, mode=None, updates=[], givens=[],
inputs
.
append
(
si
)
inputs
.
append
(
si
)
return
orig_function
(
inputs
,
cloned_outputs
,
mode
,
return
orig_function
(
inputs
,
cloned_outputs
,
mode
,
accept_inplace
=
accept_inplace
,
name
=
name
,
profile
=
profile
)
accept_inplace
=
accept_inplace
,
name
=
name
,
profile
=
profile
,
on_unused_input
=
on_unused_input
)
def
_pfunc_param_to_in
(
param
,
strict
=
False
,
allow_downcast
=
None
):
def
_pfunc_param_to_in
(
param
,
strict
=
False
,
allow_downcast
=
None
):
...
...
theano/compile/tests/test_function_module.py
浏览文件 @
9ca0033f
...
@@ -54,26 +54,45 @@ class T_function(unittest.TestCase):
...
@@ -54,26 +54,45 @@ class T_function(unittest.TestCase):
def
test_missing_inputs
(
self
):
def
test_missing_inputs
(
self
):
MissingInputException
=
TypeError
MissingInputException
=
TypeError
UnusedInputException
=
ValueError
def
fn
():
def
fn
():
x
,
s
=
T
.
scalars
(
'xs'
)
x
,
s
=
T
.
scalars
(
'xs'
)
fn
=
function
([],
[
x
])
fn
=
function
([],
[
x
])
checkfor
(
self
,
fn
,
MissingInputException
)
checkfor
(
self
,
fn
,
MissingInputException
)
def
fn
():
x
,
s
=
T
.
scalars
(
'xs'
)
# Ignore unused input s, as it hides the other error
fn
=
function
([
s
],
[
x
],
on_unused_input
=
'ignore'
)
checkfor
(
self
,
fn
,
MissingInputException
)
def
fn
():
def
fn
():
x
,
s
=
T
.
scalars
(
'xs'
)
x
,
s
=
T
.
scalars
(
'xs'
)
fn
=
function
([
s
],
[
x
])
fn
=
function
([
s
],
[
x
])
checkfor
(
self
,
fn
,
UnusedInputException
)
def
fn
():
x
,
s
=
T
.
scalars
(
'xs'
)
# Ignore unused input s, as it hides the other error
fn
=
function
([
s
],
x
,
on_unused_input
=
'ignore'
)
checkfor
(
self
,
fn
,
MissingInputException
)
checkfor
(
self
,
fn
,
MissingInputException
)
def
fn
():
def
fn
():
x
,
s
=
T
.
scalars
(
'xs'
)
x
,
s
=
T
.
scalars
(
'xs'
)
fn
=
function
([
s
],
x
)
fn
=
function
([
s
],
x
)
checkfor
(
self
,
fn
,
UnusedInputException
)
def
fn
():
x
,
s
=
T
.
scalars
(
'xs'
)
# Ignore unused input s, as it hides the other error
fn
=
function
([
s
],
Out
(
x
),
on_unused_input
=
'ignore'
)
checkfor
(
self
,
fn
,
MissingInputException
)
checkfor
(
self
,
fn
,
MissingInputException
)
def
fn
():
def
fn
():
x
,
s
=
T
.
scalars
(
'xs'
)
x
,
s
=
T
.
scalars
(
'xs'
)
fn
=
function
([
s
],
Out
(
x
))
fn
=
function
([
s
],
Out
(
x
))
checkfor
(
self
,
fn
,
Missing
InputException
)
checkfor
(
self
,
fn
,
Unused
InputException
)
def
fn
():
def
fn
():
x
,
s
=
T
.
scalars
(
'xs'
)
x
,
s
=
T
.
scalars
(
'xs'
)
...
@@ -124,7 +143,8 @@ class T_function(unittest.TestCase):
...
@@ -124,7 +143,8 @@ class T_function(unittest.TestCase):
x
,
s
=
T
.
scalars
(
'xs'
)
x
,
s
=
T
.
scalars
(
'xs'
)
#x's name is ignored because it is followed by anonymous parameter a.
#x's name is ignored because it is followed by anonymous parameter a.
f
=
function
([
x
,
a
,
s
],
a
/
s
)
# Ignore unused input x, as it hides the other error
f
=
function
([
x
,
a
,
s
],
a
/
s
,
on_unused_input
=
'ignore'
)
self
.
assertTrue
(
f
(
9
,
1
,
2
)
==
0.5
)
self
.
assertTrue
(
f
(
9
,
1
,
2
)
==
0.5
)
self
.
assertTrue
(
f
(
9
,
2
,
1
)
==
2.0
)
self
.
assertTrue
(
f
(
9
,
2
,
1
)
==
2.0
)
self
.
assertTrue
(
f
(
9
,
2
,
s
=
1
)
==
2.0
)
self
.
assertTrue
(
f
(
9
,
2
,
s
=
1
)
==
2.0
)
...
@@ -355,6 +375,20 @@ class T_function(unittest.TestCase):
...
@@ -355,6 +375,20 @@ class T_function(unittest.TestCase):
f
(
o
+.
1
)
#should clobber the memory used to store four
f
(
o
+.
1
)
#should clobber the memory used to store four
assert
not
numpy
.
all
(
four
==
4
)
assert
not
numpy
.
all
(
four
==
4
)
def
test_disconnected_input
(
self
):
a
=
T
.
scalar
(
'a'
)
v
=
T
.
vector
(
'v'
)
self
.
assertRaises
(
ValueError
,
function
,
[
a
,
v
],
v
*
2
)
f
=
function
([
a
,
v
],
v
*
2
,
on_unused_input
=
'ignore'
)
def
test_masked_input
(
self
):
m
=
T
.
matrix
(
'm'
)
mt
=
m
.
T
mt
.
name
=
'm.T'
self
.
assertRaises
(
ValueError
,
function
,
[
m
,
mt
],
mt
*
2
)
f
=
function
([
m
,
mt
],
mt
*
2
,
on_unused_input
=
'ignore'
)
class
T_picklefunction
(
unittest
.
TestCase
):
class
T_picklefunction
(
unittest
.
TestCase
):
def
test_deepcopy
(
self
):
def
test_deepcopy
(
self
):
...
@@ -631,7 +665,6 @@ class T_picklefunction(unittest.TestCase):
...
@@ -631,7 +665,6 @@ class T_picklefunction(unittest.TestCase):
assert
blah
.
f1
[
blah
.
s
]
!=
blah2
.
f1
[
blah2
.
s
]
assert
blah
.
f1
[
blah
.
s
]
!=
blah2
.
f1
[
blah2
.
s
]
class
SomethingToPickle
(
object
):
class
SomethingToPickle
(
object
):
def
__init__
(
self
):
def
__init__
(
self
):
a
=
T
.
scalar
()
# the a is for 'anonymous' (un-named).
a
=
T
.
scalar
()
# the a is for 'anonymous' (un-named).
...
...
theano/compile/tests/test_pfunc.py
浏览文件 @
9ca0033f
...
@@ -872,15 +872,12 @@ class Test_aliasing_rules(unittest.TestCase):
...
@@ -872,15 +872,12 @@ class Test_aliasing_rules(unittest.TestCase):
orig_b
=
numpy
.
zeros
((
2
,
2
))
-.
5
orig_b
=
numpy
.
zeros
((
2
,
2
))
-.
5
A
=
self
.
shared
(
orig_a
)
A
=
self
.
shared
(
orig_a
)
B
=
self
.
shared
(
orig_b
)
B
=
self
.
shared
(
orig_b
)
C
=
tensor
.
dmatrix
()
z
=
numpy
.
zeros
((
2
,
2
))
data_of_a
=
data_of
(
A
)
data_of_a
=
data_of
(
A
)
data_of_b
=
data_of
(
B
)
data_of_b
=
data_of
(
B
)
f
=
pfunc
([
C
],
[],
updates
=
[(
A
,
B
),(
B
,
A
)])
f
=
pfunc
([],
[],
updates
=
[(
A
,
B
),(
B
,
A
)])
f
(
z
)
f
()
# correctness
# correctness
assert
numpy
.
all
(
data_of
(
A
)
==
-.
5
)
assert
numpy
.
all
(
data_of
(
A
)
==
-.
5
)
assert
numpy
.
all
(
data_of
(
B
)
==
+.
5
)
assert
numpy
.
all
(
data_of
(
B
)
==
+.
5
)
...
@@ -902,16 +899,13 @@ class Test_aliasing_rules(unittest.TestCase):
...
@@ -902,16 +899,13 @@ class Test_aliasing_rules(unittest.TestCase):
orig_b
=
numpy
.
zeros
((
2
,
2
))
-.
5
orig_b
=
numpy
.
zeros
((
2
,
2
))
-.
5
A
=
self
.
shared
(
orig_a
)
A
=
self
.
shared
(
orig_a
)
B
=
self
.
shared
(
orig_b
)
B
=
self
.
shared
(
orig_b
)
C
=
tensor
.
dmatrix
()
z
=
numpy
.
zeros
((
2
,
2
))
data_of_a
=
data_of
(
A
)
data_of_a
=
data_of
(
A
)
data_of_b
=
data_of
(
B
)
data_of_b
=
data_of
(
B
)
f
=
pfunc
([
C
],
[],
updates
=
[(
A
,
B
[:,::
-
1
]),(
B
,
A
.
T
)])
f
=
pfunc
([],
[],
updates
=
[(
A
,
B
[:,::
-
1
]),(
B
,
A
.
T
)])
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
f
(
z
)
f
()
# correctness (doesn't actually test the view...)
# correctness (doesn't actually test the view...)
assert
numpy
.
all
(
data_of
(
A
)
==
-.
5
)
assert
numpy
.
all
(
data_of
(
A
)
==
-.
5
)
assert
numpy
.
all
(
data_of
(
B
)
==
+.
5
)
assert
numpy
.
all
(
data_of
(
B
)
==
+.
5
)
...
...
theano/gradient.py
浏览文件 @
9ca0033f
...
@@ -765,10 +765,11 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None, abs_tol=None,
...
@@ -765,10 +765,11 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None, abs_tol=None,
def
function
(
inputs
,
output
):
def
function
(
inputs
,
output
):
if
mode
is
None
:
if
mode
is
None
:
f
=
compile
.
function
(
inputs
,
output
,
accept_inplace
=
True
,
f
=
compile
.
function
(
inputs
,
output
,
accept_inplace
=
True
,
allow_input_downcast
=
True
)
allow_input_downcast
=
True
,
on_unused_input
=
'ignore'
)
else
:
else
:
f
=
compile
.
function
(
inputs
,
output
,
accept_inplace
=
True
,
f
=
compile
.
function
(
inputs
,
output
,
accept_inplace
=
True
,
allow_input_downcast
=
True
,
mode
=
mode
)
allow_input_downcast
=
True
,
mode
=
mode
,
on_unused_input
=
'ignore'
)
return
f
return
f
tensor_pt
=
[
TensorType
(
tensor_pt
=
[
TensorType
(
...
...
theano/sandbox/cuda/tests/test_nnet.py
浏览文件 @
9ca0033f
...
@@ -57,9 +57,9 @@ def test_GpuCrossentropySoftmaxArgmax1HotWithBias():
...
@@ -57,9 +57,9 @@ def test_GpuCrossentropySoftmaxArgmax1HotWithBias():
y_pred
=
T
.
argmax
(
p_y_given_x
,
axis
=-
1
)
y_pred
=
T
.
argmax
(
p_y_given_x
,
axis
=-
1
)
loss
=
-
T
.
mean
(
T
.
log
(
p_y_given_x
)[
T
.
arange
(
y
.
shape
[
0
]),
y
])
loss
=
-
T
.
mean
(
T
.
log
(
p_y_given_x
)[
T
.
arange
(
y
.
shape
[
0
]),
y
])
dW
=
T
.
grad
(
loss
,
dot_result
)
dW
=
T
.
grad
(
loss
,
dot_result
)
classify
=
theano
.
function
(
inputs
=
[
x
,
y
,
b
,
dot_result
],
outputs
=
[
loss
,
y_pred
,
dW
],
classify
=
theano
.
function
(
inputs
=
[
y
,
b
,
dot_result
],
outputs
=
[
loss
,
y_pred
,
dW
],
mode
=
mode_without_gpu
)
mode
=
mode_without_gpu
)
classify_gpu
=
theano
.
function
(
inputs
=
[
x
,
y
,
b
,
dot_result
],
outputs
=
[
loss
,
y_pred
,
dW
],
classify_gpu
=
theano
.
function
(
inputs
=
[
y
,
b
,
dot_result
],
outputs
=
[
loss
,
y_pred
,
dW
],
mode
=
mode_with_gpu
)
mode
=
mode_with_gpu
)
#theano.printing.debugprint(classify)
#theano.printing.debugprint(classify)
#theano.printing.debugprint(classify_gpu)
#theano.printing.debugprint(classify_gpu)
...
@@ -67,8 +67,8 @@ def test_GpuCrossentropySoftmaxArgmax1HotWithBias():
...
@@ -67,8 +67,8 @@ def test_GpuCrossentropySoftmaxArgmax1HotWithBias():
assert
any
([
isinstance
(
node
.
op
,
T
.
nnet
.
CrossentropySoftmaxArgmax1HotWithBias
)
for
node
in
classify
.
maker
.
env
.
toposort
()])
assert
any
([
isinstance
(
node
.
op
,
T
.
nnet
.
CrossentropySoftmaxArgmax1HotWithBias
)
for
node
in
classify
.
maker
.
env
.
toposort
()])
assert
any
([
isinstance
(
node
.
op
,
cuda
.
nnet
.
GpuCrossentropySoftmaxArgmax1HotWithBias
)
for
node
in
classify_gpu
.
maker
.
env
.
toposort
()])
assert
any
([
isinstance
(
node
.
op
,
cuda
.
nnet
.
GpuCrossentropySoftmaxArgmax1HotWithBias
)
for
node
in
classify_gpu
.
maker
.
env
.
toposort
()])
out
=
classify
(
xx
,
yy
,
b_values
,
dot_value
)
out
=
classify
(
yy
,
b_values
,
dot_value
)
gout
=
classify_gpu
(
xx
,
yy
,
b_values
,
dot_value
)
gout
=
classify_gpu
(
yy
,
b_values
,
dot_value
)
assert
len
(
out
)
==
len
(
gout
)
==
3
assert
len
(
out
)
==
len
(
gout
)
==
3
assert
numpy
.
allclose
(
out
[
0
],
gout
[
0
])
assert
numpy
.
allclose
(
out
[
0
],
gout
[
0
])
...
...
theano/sandbox/cuda/tests/test_opt.py
浏览文件 @
9ca0033f
...
@@ -165,7 +165,7 @@ def test_huge_elemwise_fusion():
...
@@ -165,7 +165,7 @@ def test_huge_elemwise_fusion():
"""
"""
shape
=
(
2
,
3
,
4
,
5
,
6
)
shape
=
(
2
,
3
,
4
,
5
,
6
)
ttype
=
tensor
.
tensor
(
dtype
=
'float32'
,
broadcastable
=
(
False
,)
*
len
(
shape
))
ttype
=
tensor
.
tensor
(
dtype
=
'float32'
,
broadcastable
=
(
False
,)
*
len
(
shape
))
vars
=
[
tensor
.
tanh
(
ttype
)
for
x
in
range
(
10
)]
vars
=
[
tensor
.
tanh
(
ttype
)
for
x
in
range
(
7
)]
f
=
pfunc
(
vars
,
[
vars
[
0
]
-
vars
[
1
]
-
vars
[
2
]
-
vars
[
3
]
-
vars
[
4
]
-
vars
[
5
]
-
vars
[
6
]],
mode
=
mode_with_gpu
)
f
=
pfunc
(
vars
,
[
vars
[
0
]
-
vars
[
1
]
-
vars
[
2
]
-
vars
[
3
]
-
vars
[
4
]
-
vars
[
5
]
-
vars
[
6
]],
mode
=
mode_with_gpu
)
topo
=
f
.
maker
.
env
.
toposort
()
topo
=
f
.
maker
.
env
.
toposort
()
#theano.printing.debugprint(f)
#theano.printing.debugprint(f)
...
@@ -177,14 +177,14 @@ def test_huge_elemwise_fusion():
...
@@ -177,14 +177,14 @@ def test_huge_elemwise_fusion():
assert
isinstance
(
topo
[
8
]
.
op
.
scalar_op
,
theano
.
scalar
.
basic
.
Composite
)
assert
isinstance
(
topo
[
8
]
.
op
.
scalar_op
,
theano
.
scalar
.
basic
.
Composite
)
#let debugmode catch errors
#let debugmode catch errors
gen
=
lambda
:
theano
.
_asarray
(
numpy
.
random
.
rand
(
*
shape
),
dtype
=
'float32'
)
gen
=
lambda
:
theano
.
_asarray
(
numpy
.
random
.
rand
(
*
shape
),
dtype
=
'float32'
)
f
(
gen
(),
gen
(),
gen
(),
gen
(),
gen
(),
gen
(),
gen
()
,
gen
(),
gen
(),
gen
()
)
f
(
gen
(),
gen
(),
gen
(),
gen
(),
gen
(),
gen
(),
gen
())
# Test the case where we can't put the computation on the gpu! their is too many
# Test the case where we can't put the computation on the gpu! their is too many
# dimensions to the input to have 2 inputs to the op!
# dimensions to the input to have 2 inputs to the op!
shape
=
(
1
,
2
,
3
,
4
,
5
,
6
,
7
,
2
,
2
,
3
,
2
,
1
,
2
,
2
,
2
,)
shape
=
(
1
,
2
,
3
,
4
,
5
,
6
,
7
,
2
,
2
,
3
,
2
,
1
,
2
,
2
,
2
,)
ttype
=
tensor
.
tensor
(
dtype
=
'float32'
,
broadcastable
=
(
False
,)
*
len
(
shape
))
ttype
=
tensor
.
tensor
(
dtype
=
'float32'
,
broadcastable
=
(
False
,)
*
len
(
shape
))
vars
=
[
tensor
.
tanh
(
ttype
)
for
x
in
range
(
10
)]
vars
=
[
tensor
.
tanh
(
ttype
)
for
x
in
range
(
7
)]
f
=
pfunc
(
vars
,
[
vars
[
0
]
-
vars
[
1
]
-
vars
[
2
]
-
vars
[
3
]
-
vars
[
4
]
-
vars
[
5
]
-
vars
[
6
]],
mode
=
mode_with_gpu
)
f
=
pfunc
(
vars
,
[
vars
[
0
]
-
vars
[
1
]
-
vars
[
2
]
-
vars
[
3
]
-
vars
[
4
]
-
vars
[
5
]
-
vars
[
6
]],
mode
=
mode_with_gpu
)
topo
=
f
.
maker
.
env
.
toposort
()
topo
=
f
.
maker
.
env
.
toposort
()
#theano.printing.debugprint(f)
#theano.printing.debugprint(f)
...
@@ -193,7 +193,7 @@ def test_huge_elemwise_fusion():
...
@@ -193,7 +193,7 @@ def test_huge_elemwise_fusion():
assert
sum
([
isinstance
(
node
.
op
,
tensor
.
Elemwise
)
for
node
in
topo
])
==
1
assert
sum
([
isinstance
(
node
.
op
,
tensor
.
Elemwise
)
for
node
in
topo
])
==
1
#let debugmode catch errors
#let debugmode catch errors
gen
=
lambda
:
theano
.
_asarray
(
numpy
.
random
.
rand
(
*
shape
),
dtype
=
'float32'
)
gen
=
lambda
:
theano
.
_asarray
(
numpy
.
random
.
rand
(
*
shape
),
dtype
=
'float32'
)
f
(
gen
(),
gen
(),
gen
(),
gen
(),
gen
(),
gen
(),
gen
()
,
gen
(),
gen
(),
gen
()
)
f
(
gen
(),
gen
(),
gen
(),
gen
(),
gen
(),
gen
(),
gen
())
def
gen
(
shape
):
def
gen
(
shape
):
return
theano
.
_asarray
(
numpy
.
random
.
rand
(
*
shape
),
dtype
=
'float32'
)
return
theano
.
_asarray
(
numpy
.
random
.
rand
(
*
shape
),
dtype
=
'float32'
)
...
...
theano/sandbox/cuda/tests/test_var.py
浏览文件 @
9ca0033f
...
@@ -50,7 +50,7 @@ def test_givens():
...
@@ -50,7 +50,7 @@ def test_givens():
data
=
numpy
.
float32
([
1
,
2
,
3
,
4
])
data
=
numpy
.
float32
([
1
,
2
,
3
,
4
])
x
=
f32sc
(
data
)
x
=
f32sc
(
data
)
y
=
x
**
2
y
=
x
**
2
f
=
theano
.
function
([
x
],
y
,
givens
=
{
x
:
x
+
1
})
f
=
theano
.
function
([],
y
,
givens
=
{
x
:
x
+
1
})
class
T_updates
(
unittest
.
TestCase
):
class
T_updates
(
unittest
.
TestCase
):
# Test that you can use a TensorType expression to update a
# Test that you can use a TensorType expression to update a
...
...
theano/scan_module/scan.py
浏览文件 @
9ca0033f
...
@@ -793,7 +793,8 @@ def scan(fn,
...
@@ -793,7 +793,8 @@ def scan(fn,
dummy_outs
,
dummy_outs
,
updates
=
updates
,
updates
=
updates
,
mode
=
compile
.
mode
.
Mode
(
linker
=
'py'
,
mode
=
compile
.
mode
.
Mode
(
linker
=
'py'
,
optimizer
=
None
))
optimizer
=
None
),
on_unused_input
=
'ignore'
)
##
##
### Step 5. Re-arange inputs of scan into a more strict order
### Step 5. Re-arange inputs of scan into a more strict order
...
...
theano/scan_module/scan_op.py
浏览文件 @
9ca0033f
...
@@ -480,7 +480,8 @@ class Scan(PureOp):
...
@@ -480,7 +480,8 @@ class Scan(PureOp):
wrapped_outputs
,
wrapped_outputs
,
mode
=
self
.
mode_instance
,
mode
=
self
.
mode_instance
,
name
=
self
.
name
,
name
=
self
.
name
,
profile
=
profile
)
profile
=
profile
,
on_unused_input
=
'ignore'
)
try
:
try
:
cython_mintaps
=
numpy
.
asarray
(
self
.
mintaps
,
dtype
=
'int32'
)
cython_mintaps
=
numpy
.
asarray
(
self
.
mintaps
,
dtype
=
'int32'
)
...
...
theano/scan_module/tests/test_scan.py
浏览文件 @
9ca0033f
...
@@ -2222,7 +2222,7 @@ class T_Scan(unittest.TestCase):
...
@@ -2222,7 +2222,7 @@ class T_Scan(unittest.TestCase):
sx
,
upx
=
theano
.
scan
(
sum
,
sequences
=
[
x
])
sx
,
upx
=
theano
.
scan
(
sum
,
sequences
=
[
x
])
sy
,
upy
=
theano
.
scan
(
sum
,
sequences
=
[
x
])
sy
,
upy
=
theano
.
scan
(
sum
,
sequences
=
[
x
])
f
=
theano
.
function
([
x
,
y
],
[
sx
,
sy
],
mode
=
mode_with_opt
)
f
=
theano
.
function
([
x
],
[
sx
,
sy
],
mode
=
mode_with_opt
)
topo
=
f
.
maker
.
env
.
toposort
()
topo
=
f
.
maker
.
env
.
toposort
()
scans
=
filter
(
lambda
n
:
scans
=
filter
(
lambda
n
:
isinstance
(
n
.
op
,
theano
.
scan_module
.
scan_op
.
Scan
),
topo
)
isinstance
(
n
.
op
,
theano
.
scan_module
.
scan_op
.
Scan
),
topo
)
...
@@ -2231,7 +2231,7 @@ class T_Scan(unittest.TestCase):
...
@@ -2231,7 +2231,7 @@ class T_Scan(unittest.TestCase):
sx
,
upx
=
theano
.
scan
(
sum
,
sequences
=
[
x
])
sx
,
upx
=
theano
.
scan
(
sum
,
sequences
=
[
x
])
sy
,
upy
=
theano
.
scan
(
sum
,
sequences
=
[
x
],
mode
=
'FAST_COMPILE'
)
sy
,
upy
=
theano
.
scan
(
sum
,
sequences
=
[
x
],
mode
=
'FAST_COMPILE'
)
f
=
theano
.
function
([
x
,
y
],
[
sx
,
sy
],
f
=
theano
.
function
([
x
],
[
sx
,
sy
],
mode
=
mode_with_opt
)
mode
=
mode_with_opt
)
topo
=
f
.
maker
.
env
.
toposort
()
topo
=
f
.
maker
.
env
.
toposort
()
scans
=
filter
(
lambda
n
:
scans
=
filter
(
lambda
n
:
...
@@ -2241,7 +2241,7 @@ class T_Scan(unittest.TestCase):
...
@@ -2241,7 +2241,7 @@ class T_Scan(unittest.TestCase):
sx
,
upx
=
theano
.
scan
(
sum
,
sequences
=
[
x
])
sx
,
upx
=
theano
.
scan
(
sum
,
sequences
=
[
x
])
sy
,
upy
=
theano
.
scan
(
sum
,
sequences
=
[
x
],
truncate_gradient
=
1
)
sy
,
upy
=
theano
.
scan
(
sum
,
sequences
=
[
x
],
truncate_gradient
=
1
)
f
=
theano
.
function
([
x
,
y
],
[
sx
,
sy
],
mode
=
mode_with_opt
)
f
=
theano
.
function
([
x
],
[
sx
,
sy
],
mode
=
mode_with_opt
)
topo
=
f
.
maker
.
env
.
toposort
()
topo
=
f
.
maker
.
env
.
toposort
()
scans
=
filter
(
lambda
n
:
scans
=
filter
(
lambda
n
:
isinstance
(
n
.
op
,
theano
.
scan_module
.
scan_op
.
Scan
),
topo
)
isinstance
(
n
.
op
,
theano
.
scan_module
.
scan_op
.
Scan
),
topo
)
...
...
theano/tensor/tests/test_basic.py
浏览文件 @
9ca0033f
...
@@ -57,13 +57,15 @@ else:
...
@@ -57,13 +57,15 @@ else:
utt
.
seed_rng
()
utt
.
seed_rng
()
def
inplace_func
(
inputs
,
outputs
,
mode
=
None
,
allow_input_downcast
=
False
):
def
inplace_func
(
inputs
,
outputs
,
mode
=
None
,
allow_input_downcast
=
False
,
on_unused_input
=
'raise'
):
if
mode
is
None
:
if
mode
is
None
:
mode
=
get_default_mode
()
mode
=
get_default_mode
()
return
function
(
inputs
,
outputs
,
return
function
(
inputs
,
outputs
,
mode
=
mode
,
mode
=
mode
,
allow_input_downcast
=
allow_input_downcast
,
allow_input_downcast
=
allow_input_downcast
,
accept_inplace
=
True
)
accept_inplace
=
True
,
on_unused_input
=
on_unused_input
)
def
eval_outputs
(
outputs
):
def
eval_outputs
(
outputs
):
...
@@ -3551,22 +3553,20 @@ class T_divimpl(unittest.TestCase):
...
@@ -3551,22 +3553,20 @@ class T_divimpl(unittest.TestCase):
f
=
fscalar
()
f
=
fscalar
()
c
=
cscalar
()
c
=
cscalar
()
assert
numpy
.
allclose
(
function
([
i
,
ii
,
d
,
f
,
c
],
i
/
d
)(
5
,
3
,
7.0
,
11.0
,
numpy
.
complex
(
5
,
3
)),
assert
numpy
.
allclose
(
function
([
i
,
d
],
i
/
d
)(
5
,
7.0
),
(
5.0
/
7.0
))
(
5.0
/
7.0
))
assert
numpy
.
allclose
(
function
([
i
,
d
],
d
/
i
)(
5
,
7.0
),
(
7.0
/
5.0
))
assert
numpy
.
allclose
(
function
([
i
,
ii
,
d
,
f
,
c
],
d
/
i
)(
5
,
3
,
7.0
,
11.0
,
numpy
.
complex
(
5
,
3
)),
assert
numpy
.
allclose
(
function
([
i
,
f
],
i
/
f
)(
5
,
11.0
),
(
5.0
/
11.0
))
(
7.0
/
5.0
))
assert
numpy
.
allclose
(
function
([
i
,
f
],
f
/
i
)(
5
,
11.0
),
(
11.0
/
5.0
))
assert
numpy
.
allclose
(
function
([
i
,
ii
,
d
,
f
,
c
],
i
/
f
)(
5
,
3
,
7.0
,
11.0
,
numpy
.
complex
(
5
,
3
)),
assert
numpy
.
allclose
(
function
([
i
,
ii
],
i
//
ii
)(
5
,
3
),
(
5
/
3
))
(
5.0
/
11.0
))
assert
numpy
.
allclose
(
function
([
i
,
ii
],
ii
//
i
)(
5
,
3
),
(
3
/
5
))
assert
numpy
.
allclose
(
function
([
i
,
ii
,
d
,
f
,
c
],
f
/
i
)(
5
,
3
,
7.0
,
11.0
,
numpy
.
complex
(
5
,
3
)),
assert
numpy
.
allclose
(
function
([
i
,
ii
],
true_div
(
i
,
ii
))(
5
,
3
),
(
11.0
/
5.0
))
(
5.
/
3.
))
assert
numpy
.
allclose
(
function
([
i
,
ii
,
d
,
f
,
c
],
i
//
ii
)(
5
,
3
,
7.0
,
11.0
,
numpy
.
complex
(
5
,
3
)),
assert
numpy
.
allclose
(
function
([
i
,
ii
],
true_div
(
ii
,
i
))(
5
,
3
),
(
5
/
3
))
(
3.
/
5.
))
assert
numpy
.
allclose
(
function
([
i
,
ii
,
d
,
f
,
c
],
ii
//
i
)(
5
,
3
,
7.0
,
11.0
,
numpy
.
complex
(
5
,
3
)),
assert
numpy
.
allclose
(
function
([
i
,
c
],
i
/
c
)(
5
,
numpy
.
complex
(
5
,
3
)),
(
3
/
5
))
(
5.
/
(
5
+
3
j
)))
assert
numpy
.
allclose
(
function
([
i
,
ii
,
d
,
f
,
c
],
true_div
(
i
,
ii
))(
5
,
3
,
7.0
,
11.0
,
numpy
.
complex
(
5
,
3
)),
assert
numpy
.
allclose
(
function
([
i
,
c
],
c
/
i
)(
5
,
numpy
.
complex
(
5
,
3
)),
(
5.
/
3.
))
((
5
+
3
j
)
/
5.
))
assert
numpy
.
allclose
(
function
([
i
,
ii
,
d
,
f
,
c
],
true_div
(
ii
,
i
))(
5
,
3
,
7.0
,
11.0
,
numpy
.
complex
(
5
,
3
)),
(
3.
/
5.
))
class
T_mean
(
unittest
.
TestCase
):
class
T_mean
(
unittest
.
TestCase
):
...
...
theano/tensor/tests/test_blas.py
浏览文件 @
9ca0033f
...
@@ -399,7 +399,8 @@ def just_gemm(i, o, ishapes = [(4,3), (3,5), (4,5), (), ()], max_graphlen=0):
...
@@ -399,7 +399,8 @@ def just_gemm(i, o, ishapes = [(4,3), (3,5), (4,5), (), ()], max_graphlen=0):
f
=
inplace_func
(
f
=
inplace_func
(
[
Param
(
ii
,
mutable
=
True
,
allow_downcast
=
True
)
for
ii
in
i
],
[
Param
(
ii
,
mutable
=
True
,
allow_downcast
=
True
)
for
ii
in
i
],
o
,
o
,
mode
=
'FAST_RUN'
)
mode
=
'FAST_RUN'
,
on_unused_input
=
'ignore'
)
at_least_one_gemm
=
False
at_least_one_gemm
=
False
for
node
in
f
.
maker
.
env
.
nodes
:
for
node
in
f
.
maker
.
env
.
nodes
:
if
node
.
op
==
T
.
dot
:
if
node
.
op
==
T
.
dot
:
...
@@ -410,7 +411,7 @@ def just_gemm(i, o, ishapes = [(4,3), (3,5), (4,5), (), ()], max_graphlen=0):
...
@@ -410,7 +411,7 @@ def just_gemm(i, o, ishapes = [(4,3), (3,5), (4,5), (), ()], max_graphlen=0):
at_least_one_gemm
=
True
at_least_one_gemm
=
True
assert
at_least_one_gemm
assert
at_least_one_gemm
g
=
inplace_func
(
i
,
o
,
mode
=
compile
.
Mode
(
linker
=
'py'
,
optimizer
=
None
),
g
=
inplace_func
(
i
,
o
,
mode
=
compile
.
Mode
(
linker
=
'py'
,
optimizer
=
None
),
allow_input_downcast
=
True
)
allow_input_downcast
=
True
,
on_unused_input
=
'ignore'
)
for
node
in
g
.
maker
.
env
.
nodes
:
for
node
in
g
.
maker
.
env
.
nodes
:
if
node
.
op
==
gemm_inplace
:
if
node
.
op
==
gemm_inplace
:
raise
Exception
(
'gemm_inplace in original graph'
)
raise
Exception
(
'gemm_inplace in original graph'
)
...
@@ -475,11 +476,12 @@ def test_gemm_opt_double_gemm():
...
@@ -475,11 +476,12 @@ def test_gemm_opt_double_gemm():
+
gemm_inplace
(
Z
,
b
,
S
.
T
,
R
.
T
,
T
.
constant
(
1.0
)
.
astype
(
'float64'
)))]
+
gemm_inplace
(
Z
,
b
,
S
.
T
,
R
.
T
,
T
.
constant
(
1.0
)
.
astype
(
'float64'
)))]
try
:
try
:
f
=
inplace_func
([
Param
(
ii
,
mutable
=
True
)
for
ii
in
i
],
o
,
f
=
inplace_func
([
Param
(
ii
,
mutable
=
True
)
for
ii
in
i
],
o
,
mode
=
'FAST_RUN'
)
mode
=
'FAST_RUN'
,
on_unused_input
=
'ignore'
)
for
node
in
f
.
maker
.
env
.
nodes
:
for
node
in
f
.
maker
.
env
.
nodes
:
if
node
.
op
==
T
.
dot
:
raise
Failure
(
'dot in graph'
)
if
node
.
op
==
T
.
dot
:
raise
Failure
(
'dot in graph'
)
if
node
.
op
==
_dot22
:
raise
Failure
(
'_dot22 in graph'
)
if
node
.
op
==
_dot22
:
raise
Failure
(
'_dot22 in graph'
)
g
=
inplace_func
(
i
,
o
,
mode
=
compile
.
Mode
(
linker
=
'py'
,
optimizer
=
None
))
g
=
inplace_func
(
i
,
o
,
mode
=
compile
.
Mode
(
linker
=
'py'
,
optimizer
=
None
),
on_unused_input
=
'ignore'
)
#for node in g.maker.env.nodes:
#for node in g.maker.env.nodes:
# if node.op == gemm_inplace: raise Failure('gemm_inplace in graph')
# if node.op == gemm_inplace: raise Failure('gemm_inplace in graph')
...
@@ -658,7 +660,7 @@ def test_inplace0():
...
@@ -658,7 +660,7 @@ def test_inplace0():
X
,
Y
,
Z
,
a
,
b
=
T
.
dmatrix
(
'X'
),
T
.
dmatrix
(
'Y'
),
T
.
dmatrix
(
'Z'
),
T
.
dscalar
(
'a'
),
T
.
dscalar
(
'b'
)
X
,
Y
,
Z
,
a
,
b
=
T
.
dmatrix
(
'X'
),
T
.
dmatrix
(
'Y'
),
T
.
dmatrix
(
'Z'
),
T
.
dscalar
(
'a'
),
T
.
dscalar
(
'b'
)
R
,
S
,
c
=
T
.
dmatrix
(
'R'
),
T
.
dmatrix
(
'S'
),
T
.
dscalar
(
'c'
)
R
,
S
,
c
=
T
.
dmatrix
(
'R'
),
T
.
dmatrix
(
'S'
),
T
.
dscalar
(
'c'
)
f
=
inplace_func
([
X
,
Y
,
Z
,
a
,
b
,
R
,
S
,
c
],
f
=
inplace_func
([
Z
,
b
,
R
,
S
],
[
Z
*
(
Z
+
b
*
T
.
dot
(
R
,
S
)
.
T
)],
mode
=
'FAST_RUN'
)
[
Z
*
(
Z
+
b
*
T
.
dot
(
R
,
S
)
.
T
)],
mode
=
'FAST_RUN'
)
if
(
gemm_inplace
in
[
n
.
op
for
n
in
f
.
maker
.
env
.
nodes
]):
if
(
gemm_inplace
in
[
n
.
op
for
n
in
f
.
maker
.
env
.
nodes
]):
print
pp
(
f
.
maker
.
env
.
outputs
[
0
])
print
pp
(
f
.
maker
.
env
.
outputs
[
0
])
...
@@ -676,7 +678,7 @@ def test_inplace0():
...
@@ -676,7 +678,7 @@ def test_inplace0():
def
test_inplace1
():
def
test_inplace1
():
X
,
Y
,
Z
,
a
,
b
=
XYZab
()
X
,
Y
,
Z
,
a
,
b
=
XYZab
()
# with > 2 terms in the overall addition
# with > 2 terms in the overall addition
f
=
inplace_func
([
X
,
Y
,
Z
,
a
,
b
],
f
=
inplace_func
([
X
,
Y
,
Z
],
[
Z
+
Z
+
T
.
dot
(
X
,
Y
)],
mode
=
'FAST_RUN'
)
[
Z
+
Z
+
T
.
dot
(
X
,
Y
)],
mode
=
'FAST_RUN'
)
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
# it doesn't work inplace because we didn't mark Z as mutable input
# it doesn't work inplace because we didn't mark Z as mutable input
...
...
theano/tensor/tests/test_opt.py
浏览文件 @
9ca0033f
...
@@ -713,14 +713,14 @@ def test_local_merge_abs():
...
@@ -713,14 +713,14 @@ def test_local_merge_abs():
mode
=
theano
.
compile
.
mode
.
get_mode
(
mode
)
.
excluding
(
mode
=
theano
.
compile
.
mode
.
get_mode
(
mode
)
.
excluding
(
"local_elemwise_fusion"
)
"local_elemwise_fusion"
)
f
=
theano
.
function
([
x
,
y
,
z
],
(
abs
(
y
*
z
*
-
2
)),
mode
=
mode
)
f
=
theano
.
function
([
y
,
z
],
(
abs
(
y
*
z
*
-
2
)),
mode
=
mode
)
f
(
x_val
,
y_val
,
z_val
)
f
(
y_val
,
z_val
)
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
assert
isinstance
(
f
.
maker
.
env
.
toposort
()[
1
]
.
op
.
scalar_op
,
scal
.
Abs
)
assert
isinstance
(
f
.
maker
.
env
.
toposort
()[
1
]
.
op
.
scalar_op
,
scal
.
Abs
)
assert
len
(
f
.
maker
.
env
.
toposort
())
==
2
assert
len
(
f
.
maker
.
env
.
toposort
())
==
2
f
=
theano
.
function
([
x
,
y
,
z
],
abs
(
x
/
y
),
mode
=
mode
)
f
=
theano
.
function
([
x
,
y
],
abs
(
x
/
y
),
mode
=
mode
)
f
(
x_val
,
y_val
,
z_val
)
f
(
x_val
,
y_val
)
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
assert
isinstance
(
f
.
maker
.
env
.
toposort
()[
1
]
.
op
.
scalar_op
,
scal
.
Abs
)
assert
isinstance
(
f
.
maker
.
env
.
toposort
()[
1
]
.
op
.
scalar_op
,
scal
.
Abs
)
assert
len
(
f
.
maker
.
env
.
toposort
())
==
2
assert
len
(
f
.
maker
.
env
.
toposort
())
==
2
...
@@ -2214,8 +2214,7 @@ class test_shapeoptimizer(unittest.TestCase):
...
@@ -2214,8 +2214,7 @@ class test_shapeoptimizer(unittest.TestCase):
mode
=
'FAST_RUN'
mode
=
'FAST_RUN'
v
=
T
.
vector
()
v
=
T
.
vector
()
m
=
T
.
matrix
()
f
=
function
([
v
],
v
.
dimshuffle
(
'x'
,
'x'
,
0
)
.
shape
[
1
],
mode
=
mode
)
f
=
function
([
v
,
m
],
v
.
dimshuffle
(
'x'
,
'x'
,
0
)
.
shape
[
1
],
mode
=
mode
)
topo
=
f
.
maker
.
env
.
toposort
()
topo
=
f
.
maker
.
env
.
toposort
()
assert
len
(
topo
)
==
1
assert
len
(
topo
)
==
1
assert
topo
[
0
]
.
op
==
theano
.
compile
.
function_module
.
deep_copy_op
assert
topo
[
0
]
.
op
==
theano
.
compile
.
function_module
.
deep_copy_op
...
@@ -2371,34 +2370,34 @@ def test_local_mul_specialize():
...
@@ -2371,34 +2370,34 @@ def test_local_mul_specialize():
v
=
T
.
vector
()
v
=
T
.
vector
()
m
=
T
.
vector
()
m
=
T
.
vector
()
f
=
function
([
v
,
m
],
v
*
1
,
mode
=
mode
)
f
=
function
([
v
],
v
*
1
,
mode
=
mode
)
nodes
=
[
node
.
op
for
node
in
f
.
maker
.
env
.
toposort
()]
nodes
=
[
node
.
op
for
node
in
f
.
maker
.
env
.
toposort
()]
print
nodes
print
nodes
nodes
==
[
theano
.
compile
.
function_module
.
deep_copy_op
]
nodes
==
[
theano
.
compile
.
function_module
.
deep_copy_op
]
f
=
function
([
v
,
m
],
v
*
0
,
mode
=
mode
)
f
=
function
([
v
],
v
*
0
,
mode
=
mode
)
nodes
=
[
node
.
op
for
node
in
f
.
maker
.
env
.
toposort
()]
nodes
=
[
node
.
op
for
node
in
f
.
maker
.
env
.
toposort
()]
print
nodes
print
nodes
assert
nodes
==
[
Shape_i
(
0
),
T
.
alloc
]
assert
nodes
==
[
Shape_i
(
0
),
T
.
alloc
]
f
=
function
([
v
,
m
],
v
*
(
-
1
),
mode
=
mode
)
f
=
function
([
v
],
v
*
(
-
1
),
mode
=
mode
)
nodes
=
[
node
.
op
for
node
in
f
.
maker
.
env
.
toposort
()]
nodes
=
[
node
.
op
for
node
in
f
.
maker
.
env
.
toposort
()]
print
nodes
print
nodes
assert
nodes
==
[
T
.
neg
]
assert
nodes
==
[
T
.
neg
]
f
=
function
([
v
,
m
],
v
*
1
*
(
-
m
),
mode
=
mode
)
f
=
function
([
v
,
m
],
v
*
1
*
(
-
m
),
mode
=
mode
)
nodes
=
[
node
.
op
for
node
in
f
.
maker
.
env
.
toposort
()]
nodes
=
[
node
.
op
for
node
in
f
.
maker
.
env
.
toposort
()]
print
nodes
print
nodes
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
assert
nodes
==
[
T
.
mul
,
inplace
.
neg_inplace
]
assert
nodes
==
[
T
.
mul
,
inplace
.
neg_inplace
]
f
=
function
([
v
,
m
],
v
*
0
*
(
-
m
),
mode
=
mode
)
f
=
function
([
v
,
m
],
v
*
0
*
(
-
m
),
mode
=
mode
)
nodes
=
[
node
.
op
for
node
in
f
.
maker
.
env
.
toposort
()]
nodes
=
[
node
.
op
for
node
in
f
.
maker
.
env
.
toposort
()]
print
nodes
print
nodes
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
assert
nodes
==
[
Shape_i
(
0
),
T
.
alloc
]
assert
nodes
==
[
Shape_i
(
0
),
T
.
alloc
]
f
=
function
([
v
,
m
],
v
*
(
-
1
)
*
(
-
m
),
mode
=
mode
)
f
=
function
([
v
,
m
],
v
*
(
-
1
)
*
(
-
m
),
mode
=
mode
)
nodes
=
[
node
.
op
for
node
in
f
.
maker
.
env
.
toposort
()]
nodes
=
[
node
.
op
for
node
in
f
.
maker
.
env
.
toposort
()]
print
nodes
print
nodes
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
...
@@ -3256,7 +3255,8 @@ class T_local_sum_dimshuffle(unittest.TestCase):
...
@@ -3256,7 +3255,8 @@ class T_local_sum_dimshuffle(unittest.TestCase):
try
:
try
:
for
i
,
s
in
enumerate
(
sums
):
for
i
,
s
in
enumerate
(
sums
):
print
i
print
i
f
=
theano
.
function
([
a
,
b
,
c
,
d
],
s
,
mode
=
self
.
mode
)
f
=
theano
.
function
([
a
,
b
,
c
,
d
],
s
,
mode
=
self
.
mode
,
on_unused_input
=
'ignore'
)
theano
.
printing
.
debugprint
(
f
)
theano
.
printing
.
debugprint
(
f
)
g
=
f
.
maker
.
env
.
toposort
()
g
=
f
.
maker
.
env
.
toposort
()
#print 'g =', g
#print 'g =', g
...
@@ -3294,7 +3294,7 @@ def test_make_vector():
...
@@ -3294,7 +3294,7 @@ def test_make_vector():
]:
]:
mv
=
opt
.
MakeVector
(
dtype
=
dtype
)(
*
inputs
)
mv
=
opt
.
MakeVector
(
dtype
=
dtype
)(
*
inputs
)
assert
mv
.
dtype
==
dtype
assert
mv
.
dtype
==
dtype
f
=
theano
.
function
([
b
,
i
,
d
],
mv
)
f
=
theano
.
function
([
b
,
i
,
d
],
mv
,
on_unused_input
=
'ignore'
)
f_val
=
f
(
val
[
b
],
val
[
i
],
val
[
d
])
f_val
=
f
(
val
[
b
],
val
[
i
],
val
[
d
])
#print 'f_val =', f_val
#print 'f_val =', f_val
...
...
theano/tests/test_rop.py
浏览文件 @
9ca0033f
...
@@ -106,12 +106,12 @@ class RopLop_checker(unittest.TestCase):
...
@@ -106,12 +106,12 @@ class RopLop_checker(unittest.TestCase):
vv
=
numpy
.
asarray
(
self
.
rng
.
uniform
(
size
=
self
.
mat_in_shape
),
vv
=
numpy
.
asarray
(
self
.
rng
.
uniform
(
size
=
self
.
mat_in_shape
),
theano
.
config
.
floatX
)
theano
.
config
.
floatX
)
yv
=
tensor
.
Rop
(
y
,
self
.
mx
,
self
.
mv
)
yv
=
tensor
.
Rop
(
y
,
self
.
mx
,
self
.
mv
)
rop_f
=
function
([
self
.
mx
,
self
.
mv
],
yv
)
rop_f
=
function
([
self
.
mx
,
self
.
mv
],
yv
,
on_unused_input
=
'ignore'
)
sy
,
_
=
theano
.
scan
(
lambda
i
,
y
,
x
,
v
:
\
sy
,
_
=
theano
.
scan
(
lambda
i
,
y
,
x
,
v
:
\
(
tensor
.
grad
(
y
[
i
],
x
)
*
v
)
.
sum
(),
(
tensor
.
grad
(
y
[
i
],
x
)
*
v
)
.
sum
(),
sequences
=
tensor
.
arange
(
y
.
shape
[
0
]),
sequences
=
tensor
.
arange
(
y
.
shape
[
0
]),
non_sequences
=
[
y
,
self
.
mx
,
self
.
mv
])
non_sequences
=
[
y
,
self
.
mx
,
self
.
mv
])
scan_f
=
function
([
self
.
mx
,
self
.
mv
],
sy
)
scan_f
=
function
([
self
.
mx
,
self
.
mv
],
sy
,
on_unused_input
=
'ignore'
)
v1
=
rop_f
(
vx
,
vv
)
v1
=
rop_f
(
vx
,
vv
)
v2
=
scan_f
(
vx
,
vv
)
v2
=
scan_f
(
vx
,
vv
)
...
@@ -146,13 +146,13 @@ class RopLop_checker(unittest.TestCase):
...
@@ -146,13 +146,13 @@ class RopLop_checker(unittest.TestCase):
theano
.
config
.
floatX
)
theano
.
config
.
floatX
)
yv
=
tensor
.
Rop
(
y
,
self
.
x
,
self
.
v
)
yv
=
tensor
.
Rop
(
y
,
self
.
x
,
self
.
v
)
rop_f
=
function
([
self
.
x
,
self
.
v
],
yv
)
rop_f
=
function
([
self
.
x
,
self
.
v
],
yv
,
on_unused_input
=
'ignore'
)
J
,
_
=
theano
.
scan
(
lambda
i
,
y
,
x
:
tensor
.
grad
(
y
[
i
],
x
),
J
,
_
=
theano
.
scan
(
lambda
i
,
y
,
x
:
tensor
.
grad
(
y
[
i
],
x
),
sequences
=
tensor
.
arange
(
y
.
shape
[
0
]),
sequences
=
tensor
.
arange
(
y
.
shape
[
0
]),
non_sequences
=
[
y
,
self
.
x
])
non_sequences
=
[
y
,
self
.
x
])
sy
=
tensor
.
dot
(
J
,
self
.
v
)
sy
=
tensor
.
dot
(
J
,
self
.
v
)
scan_f
=
function
([
self
.
x
,
self
.
v
],
sy
)
scan_f
=
function
([
self
.
x
,
self
.
v
],
sy
,
on_unused_input
=
'ignore'
)
v1
=
rop_f
(
vx
,
vv
)
v1
=
rop_f
(
vx
,
vv
)
v2
=
scan_f
(
vx
,
vv
)
v2
=
scan_f
(
vx
,
vv
)
...
@@ -168,7 +168,7 @@ class RopLop_checker(unittest.TestCase):
...
@@ -168,7 +168,7 @@ class RopLop_checker(unittest.TestCase):
theano
.
config
.
floatX
)
theano
.
config
.
floatX
)
yv
=
tensor
.
Lop
(
y
,
self
.
x
,
self
.
v
)
yv
=
tensor
.
Lop
(
y
,
self
.
x
,
self
.
v
)
lop_f
=
function
([
self
.
x
,
self
.
v
],
yv
)
lop_f
=
function
([
self
.
x
,
self
.
v
],
yv
,
on_unused_input
=
'ignore'
)
J
,
_
=
theano
.
scan
(
lambda
i
,
y
,
x
:
tensor
.
grad
(
y
[
i
],
x
),
J
,
_
=
theano
.
scan
(
lambda
i
,
y
,
x
:
tensor
.
grad
(
y
[
i
],
x
),
sequences
=
tensor
.
arange
(
y
.
shape
[
0
]),
sequences
=
tensor
.
arange
(
y
.
shape
[
0
]),
non_sequences
=
[
y
,
self
.
x
])
non_sequences
=
[
y
,
self
.
x
])
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论