Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
244857d2
提交
244857d2
authored
1月 20, 2010
作者:
Olivier Delalleau
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Deleting the 'examples' sub-directory, which is deprecated
上级
8779c7eb
显示空白字符变更
内嵌
并排
正在显示
3 个修改的文件
包含
0 行增加
和
586 行删除
+0
-586
logistic_regression.py
examples/logistic_regression.py
+0
-163
test_logistic_regression.py
examples/tests/test_logistic_regression.py
+0
-102
test_wiki.py
examples/tests/test_wiki.py
+0
-321
没有找到文件。
examples/logistic_regression.py
deleted
100644 → 0
浏览文件 @
8779c7eb
import
sys
sys
.
path
.
insert
(
0
,
'..'
)
import
theano
from
theano
import
tensor
as
T
from
theano.tensor
import
nnet
from
theano.compile
import
module
from
theano
import
printing
,
pprint
from
theano
import
compile
import
numpy
as
N
class
LogisticRegressionN
(
module
.
FancyModule
):
class
InstanceType
(
module
.
FancyModuleInstance
):
def
initialize
(
self
,
n_in
,
n_out
,
seed
=
None
):
#self.component is the LogisticRegressionTemplate instance that built this guy.
rng
=
N
.
random
.
RandomState
(
seed
)
self
.
w
=
rng
.
randn
(
n_in
,
n_out
)
self
.
b
=
rng
.
randn
(
n_out
)
self
.
lr
=
0.01
self
.
__hide__
=
[
'params'
]
def
__eq__
(
self
,
other
):
if
not
isinstance
(
other
.
component
,
LogisticRegressionN
)
and
not
isinstance
(
other
.
component
,
LogisticRegression2
):
raise
NotImplementedError
#we compare the member.
if
(
N
.
abs
(
self
.
w
-
other
.
w
)
<
1e-8
)
.
all
()
and
(
N
.
abs
(
self
.
b
-
other
.
b
)
<
1e-8
)
.
all
()
and
self
.
lr
==
other
.
lr
:
return
True
return
False
def
__hash__
(
self
):
raise
NotImplementedError
def
__init__
(
self
,
x
=
None
,
targ
=
None
):
super
(
LogisticRegressionN
,
self
)
.
__init__
()
#boilerplate
self
.
x
=
x
if
x
is
not
None
else
T
.
matrix
()
self
.
targ
=
targ
if
targ
is
not
None
else
T
.
lvector
()
self
.
w
=
module
.
Member
(
T
.
matrix
())
#automatically names
self
.
b
=
module
.
Member
(
T
.
vector
())
#automatically names
self
.
lr
=
module
.
Member
(
T
.
dscalar
())
#provides an external interface to change it
#and makes it an implicit input to any Method you build.
self
.
params
=
[
self
.
w
,
self
.
b
]
xent
,
y
=
nnet
.
crossentropy_softmax_1hot
(
T
.
dot
(
self
.
x
,
self
.
w
)
+
self
.
b
,
self
.
targ
)
xent
=
T
.
sum
(
xent
)
self
.
y
=
y
self
.
xent
=
xent
gparams
=
T
.
grad
(
xent
,
self
.
params
)
self
.
update
=
module
.
Method
([
self
.
x
,
self
.
targ
],
xent
,
updates
=
dict
((
p
,
p
-
self
.
lr
*
g
)
for
p
,
g
in
zip
(
self
.
params
,
gparams
)))
self
.
apply
=
module
.
Method
([
self
.
x
],
T
.
argmax
(
T
.
dot
(
self
.
x
,
self
.
w
)
+
self
.
b
,
axis
=
1
))
class
LogisticRegression2
(
module
.
FancyModule
):
class
InstanceType
(
module
.
FancyModuleInstance
):
def
initialize
(
self
,
n_in
,
seed
=
1827
):
#self.component is the LogisticRegressionTemplate instance that built this guy.
rng
=
N
.
random
.
RandomState
(
seed
)
self
.
w
=
rng
.
randn
(
n_in
,
1
)
self
.
b
=
rng
.
randn
(
1
)
self
.
lr
=
0.01
self
.
__hide__
=
[
'params'
]
def
__eq__
(
self
,
other
):
if
not
isinstance
(
other
.
component
,
LogisticRegressionN
)
and
not
isinstance
(
other
.
component
,
LogisticRegression2
):
raise
NotImplementedError
#we compare the member.
if
(
N
.
abs
(
self
.
w
-
other
.
w
)
<
1e-8
)
.
all
()
and
(
N
.
abs
(
self
.
b
-
other
.
b
)
<
1e-8
)
.
all
()
and
self
.
lr
==
other
.
lr
:
return
True
return
False
def
__hash__
(
self
):
raise
NotImplementedError
def
__init__
(
self
,
x
=
None
,
targ
=
None
):
super
(
LogisticRegression2
,
self
)
.
__init__
()
#boilerplate
self
.
x
=
x
if
x
is
not
None
else
T
.
matrix
()
self
.
targ
=
targ
if
targ
is
not
None
else
T
.
lcol
()
self
.
w
=
module
.
Member
(
T
.
dmatrix
())
#automatically names
self
.
b
=
module
.
Member
(
T
.
dvector
())
#automatically names
self
.
lr
=
module
.
Member
(
T
.
dscalar
())
#provides an external interface to change it
#and makes it an implicit input to any Method you build.
self
.
params
=
[
self
.
w
,
self
.
b
]
y
=
nnet
.
sigmoid
(
T
.
dot
(
self
.
x
,
self
.
w
))
xent_elem
=
-
self
.
targ
*
T
.
log
(
y
)
-
(
1.0
-
self
.
targ
)
*
T
.
log
(
1.0
-
y
)
xent
=
T
.
sum
(
xent_elem
)
self
.
y
=
y
self
.
xent_elem
=
xent_elem
self
.
xent
=
xent
gparams
=
T
.
grad
(
xent
,
self
.
params
)
self
.
update
=
module
.
Method
([
self
.
x
,
self
.
targ
],
xent
,
updates
=
dict
((
p
,
p
-
self
.
lr
*
g
)
for
p
,
g
in
zip
(
self
.
params
,
gparams
)))
self
.
apply
=
module
.
Method
([
self
.
x
],
T
.
argmax
(
T
.
dot
(
self
.
x
,
self
.
w
)
+
self
.
b
,
axis
=
1
))
def
main
():
pprint
.
assign
(
nnet
.
crossentropy_softmax_1hot_with_bias_dx
,
printing
.
FunctionPrinter
(
'xsoftmaxdx'
))
pprint
.
assign
(
nnet
.
crossentropy_softmax_argmax_1hot_with_bias
,
printing
.
FunctionPrinter
(
'nll'
,
'softmax'
,
'argmax'
))
if
1
:
lrc
=
LogisticRegressionN
()
print
'================'
print
lrc
.
update
.
pretty
()
print
'================'
print
lrc
.
update
.
pretty
(
mode
=
theano
.
Mode
(
'py'
,
'fast_run'
))
print
'================'
# print lrc.update.pretty(mode = compile.FAST_RUN.excluding('inplace'))
# print '================'
# sys.exit(0)
lr
=
lrc
.
make
(
10
,
2
,
mode
=
theano
.
Mode
(
'c|py'
,
'fast_run'
))
#lr = lrc.make(10, 2, mode=compile.FAST_RUN.excluding('fast_run'))
#lr = lrc.make(10, 2, mode=theano.Mode('py', 'merge')) #'FAST_RUN')
data_x
=
N
.
random
.
randn
(
5
,
10
)
data_y
=
(
N
.
random
.
randn
(
5
)
>
0
)
for
i
in
xrange
(
10000
):
lr
.
lr
=
0.02
xe
=
lr
.
update
(
data_x
,
data_y
)
if
i
%
100
==
0
:
print
i
,
xe
print
print
'TRAINED MODEL:'
print
lr
if
0
:
lrc
=
LogisticRegression2
()
lr
=
lrc
.
make
(
10
,
mode
=
theano
.
Mode
(
'c|py'
,
'merge'
))
#'FAST_RUN')
data_x
=
N
.
random
.
randn
(
5
,
10
)
data_y
=
(
N
.
random
.
randn
(
5
,
1
)
>
0
)
for
i
in
xrange
(
10000
):
xe
=
lr
.
update
(
data_x
,
data_y
)
if
i
%
100
==
0
:
print
i
,
xe
print
print
'TRAINED MODEL:'
print
lr
if
__name__
==
'__main__'
:
main
()
examples/tests/test_logistic_regression.py
deleted
100644 → 0
浏览文件 @
8779c7eb
#!/usr/bin/env python
#
# UNIT TEST
#
import
unittest
import
numpy
from
theano
import
gof
from
theano.gradient
import
*
from
theano
import
gradient
import
theano
import
sys
from
theano
import
tensor
as
T
from
theano.tensor
import
nnet
from
theano.compile
import
module
from
theano
import
printing
,
pprint
from
theano
import
compile
import
numpy
as
N
class
test_logistic_regression_example
(
unittest
.
TestCase
):
def
test_example_main
(
self
):
"""Test that the file execute without trouble"""
import
os
sys
.
path
.
append
(
os
.
path
.
realpath
(
".."
))
import
logistic_regression
logistic_regression
.
main
()
def
test_example_moduleN
(
self
):
"""Test that the LogisticRegressionN module execute the same with different mode"""
import
os
sys
.
path
.
append
(
os
.
path
.
realpath
(
".."
))
import
logistic_regression
pprint
.
assign
(
nnet
.
crossentropy_softmax_1hot_with_bias_dx
,
printing
.
FunctionPrinter
(
'xsoftmaxdx'
))
pprint
.
assign
(
nnet
.
crossentropy_softmax_argmax_1hot_with_bias
,
printing
.
FunctionPrinter
(
'nll'
,
'softmax'
,
'argmax'
))
lrc
=
logistic_regression
.
LogisticRegressionN
()
lr0
=
lrc
.
make
(
10
,
2
,
seed
=
1827
)
lr1
=
lrc
.
make
(
10
,
2
,
mode
=
theano
.
Mode
(
'c|py'
,
'fast_run'
),
seed
=
1827
)
lr2
=
lrc
.
make
(
10
,
2
,
mode
=
theano
.
Mode
(
'py'
,
'fast_run'
),
seed
=
1827
)
lr3
=
lrc
.
make
(
10
,
2
,
mode
=
theano
.
Mode
(
'py'
,
'merge'
),
seed
=
1827
)
#'FAST_RUN')
lr4
=
lrc
.
make
(
10
,
2
,
mode
=
compile
.
FAST_RUN
.
excluding
(
'fast_run'
),
seed
=
1827
)
#FAST_RUN, FAST_COMPILE,
data_x
=
N
.
random
.
randn
(
5
,
10
)
data_y
=
(
N
.
random
.
randn
(
5
)
>
0
)
def
train
(
lr
):
for
i
in
xrange
(
1000
):
lr
.
lr
=
0.02
xe
=
lr
.
update
(
data_x
,
data_y
)
train
(
lr0
)
train
(
lr1
)
train
(
lr2
)
train
(
lr3
)
train
(
lr4
)
assert
lr0
==
lr1
assert
lr0
==
lr2
assert
lr0
==
lr3
assert
lr0
==
lr4
def
test_example_module2
(
self
):
"""Test that the LogisticRegression2 module execute the same with different mode"""
import
os
sys
.
path
.
append
(
os
.
path
.
realpath
(
".."
))
import
logistic_regression
lrc
=
logistic_regression
.
LogisticRegression2
()
#TODO: test 2==N
lr0
=
lrc
.
make
(
10
,
1827
)
lr1
=
lrc
.
make
(
10
,
mode
=
theano
.
Mode
(
'c|py'
,
'fast_run'
),
seed
=
1827
)
lr2
=
lrc
.
make
(
10
,
mode
=
theano
.
Mode
(
'py'
,
'fast_run'
),
seed
=
1827
)
lr3
=
lrc
.
make
(
10
,
mode
=
theano
.
Mode
(
'py'
,
'merge'
),
seed
=
1827
)
#'FAST_RU
lr4
=
lrc
.
make
(
10
,
mode
=
compile
.
FAST_RUN
.
excluding
(
'fast_run'
),
seed
=
1827
)
#FAST_RUN, FAST_COMPILE,
data_x
=
N
.
random
.
randn
(
5
,
10
)
data_y
=
(
N
.
random
.
randn
(
5
)
>
0
)
data_y
=
data_y
.
reshape
((
data_y
.
shape
[
0
],
1
))
#need to be a column
def
train
(
lr
):
for
i
in
xrange
(
1000
):
lr
.
lr
=
0.02
xe
=
lr
.
update
(
data_x
,
data_y
)
train
(
lr0
)
train
(
lr1
)
train
(
lr2
)
train
(
lr3
)
train
(
lr4
)
assert
lr0
==
lr1
assert
lr0
==
lr2
assert
lr0
==
lr3
assert
lr0
==
lr4
# self.fail("NotImplementedError")
if
__name__
==
'__main__'
:
from
theano.tests
import
main
main
(
__file__
)
examples/tests/test_wiki.py
deleted
100644 → 0
浏览文件 @
8779c7eb
import
unittest
import
theano
import
numpy
as
N
from
theano
import
tensor
as
T
from
theano.tensor
import
nnet
as
NN
from
theano.compile
import
module
as
M
class
Blah
(
M
.
ModuleInstance
):
# self.component #refer the Module
# def __init__(self, input = None, target = None, regularize = True):
# super(Blah, self)
def
initialize
(
self
,
input_size
=
None
,
target_size
=
None
,
seed
=
1827
,
**
init
):
if
input_size
and
target_size
:
# initialize w and b in a special way using input_size and target_size
sz
=
(
input_size
,
target_size
)
rng
=
N
.
random
.
RandomState
(
seed
)
self
.
w
=
rng
.
uniform
(
size
=
sz
,
low
=
-
0.5
,
high
=
0.5
)
self
.
b
=
N
.
zeros
(
target_size
)
self
.
stepsize
=
0.01
#we call default_initialize after as we want the parameter to superseed the default value.
M
.
default_initialize
(
self
,
**
init
)
#equivalent to previous line.
def
__eq__
(
self
,
other
):
if
not
isinstance
(
other
.
component
,
SoftmaxXERegression1
)
and
not
isinstance
(
other
.
component
,
SoftmaxXERegression2
):
raise
NotImplementedError
#we compare the member.
if
(
self
.
w
==
other
.
w
)
.
all
()
and
(
self
.
b
==
other
.
b
)
.
all
()
and
self
.
stepsize
==
other
.
stepsize
:
return
True
return
False
def
__hash__
(
self
):
raise
NotImplementedError
def
fit
(
self
,
train
,
test
):
pass
class
RegressionLayer1
(
M
.
Module
):
InstanceType
=
Blah
def
__init__
(
self
,
input
=
None
,
target
=
None
,
regularize
=
True
):
super
(
RegressionLayer1
,
self
)
.
__init__
()
#boilerplate
# MODEL CONFIGURATION
self
.
regularize
=
regularize
# ACQUIRE/MAKE INPUT AND TARGET
if
not
input
:
input
=
T
.
matrix
(
'input'
)
if
not
target
:
target
=
T
.
matrix
(
'target'
)
# HYPER-PARAMETERS
self
.
stepsize
=
M
.
Member
(
T
.
scalar
())
# a stepsize for gradient descent
# PARAMETERS
self
.
w
=
M
.
Member
(
T
.
matrix
())
#the linear transform to apply to our input points
self
.
b
=
M
.
Member
(
T
.
vector
())
#a vector of biases, which make our transform affine instead of linear
# REGRESSION MODEL
self
.
activation
=
T
.
dot
(
input
,
self
.
w
)
+
self
.
b
self
.
prediction
=
self
.
build_prediction
()
# CLASSIFICATION COST
self
.
classification_cost
=
self
.
build_classification_cost
(
target
)
# REGULARIZATION COST
self
.
regularization
=
self
.
build_regularization
()
# TOTAL COST
self
.
cost
=
self
.
classification_cost
if
self
.
regularize
:
self
.
cost
=
self
.
cost
+
self
.
regularization
# GET THE GRADIENTS NECESSARY TO FIT OUR PARAMETERS
self
.
grad_w
,
self
.
grad_b
=
T
.
grad
(
self
.
cost
,
[
self
.
w
,
self
.
b
])
# INTERFACE METHODS
self
.
update
=
M
.
Method
([
input
,
target
],
self
.
cost
,
w
=
self
.
w
-
self
.
stepsize
*
self
.
grad_w
,
b
=
self
.
b
-
self
.
stepsize
*
self
.
grad_b
)
self
.
apply
=
M
.
Method
(
input
,
self
.
prediction
)
def
params
(
self
):
return
self
.
w
,
self
.
b
def
build_regularization
(
self
):
return
T
.
zero
()
# no regularization!
class
RegressionLayer2
(
M
.
Module
):
def
__init__
(
self
,
input
=
None
,
target
=
None
,
regularize
=
True
):
super
(
RegressionLayer2
,
self
)
.
__init__
()
#boilerplate
# MODEL CONFIGURATION
self
.
regularize
=
regularize
# ACQUIRE/MAKE INPUT AND TARGET
if
not
input
:
input
=
T
.
matrix
(
'input'
)
if
not
target
:
target
=
T
.
matrix
(
'target'
)
# HYPER-PARAMETERS
self
.
stepsize
=
M
.
Member
(
T
.
scalar
())
# a stepsize for gradient descent
# PARAMETERS
self
.
w
=
M
.
Member
(
T
.
matrix
())
#the linear transform to apply to our input points
self
.
b
=
M
.
Member
(
T
.
vector
())
#a vector of biases, which make our transform affine instead of linear
# REGRESSION MODEL
self
.
activation
=
T
.
dot
(
input
,
self
.
w
)
+
self
.
b
self
.
prediction
=
self
.
build_prediction
()
# CLASSIFICATION COST
self
.
classification_cost
=
self
.
build_classification_cost
(
target
)
# REGULARIZATION COST
self
.
regularization
=
self
.
build_regularization
()
# TOTAL COST
self
.
cost
=
self
.
classification_cost
if
self
.
regularize
:
self
.
cost
=
self
.
cost
+
self
.
regularization
# GET THE GRADIENTS NECESSARY TO FIT OUR PARAMETERS
self
.
grad_w
,
self
.
grad_b
=
T
.
grad
(
self
.
cost
,
[
self
.
w
,
self
.
b
])
# INTERFACE METHODS
self
.
update
=
M
.
Method
([
input
,
target
],
self
.
cost
,
w
=
self
.
w
-
self
.
stepsize
*
self
.
grad_w
,
b
=
self
.
b
-
self
.
stepsize
*
self
.
grad_b
)
self
.
apply
=
M
.
Method
(
input
,
self
.
prediction
)
def
params
(
self
):
return
self
.
w
,
self
.
b
def
_instance_initialize
(
self
,
obj
,
input_size
=
None
,
target_size
=
None
,
seed
=
1827
,
**
init
):
# obj is an "instance" of this module holding values for each member and
# functions for each method
if
input_size
and
target_size
:
# initialize w and b in a special way using input_size and target_size
sz
=
(
input_size
,
target_size
)
rng
=
N
.
random
.
RandomState
(
seed
)
obj
.
w
=
rng
.
uniform
(
size
=
sz
,
low
=
-
0.5
,
high
=
0.5
)
obj
.
b
=
N
.
zeros
(
target_size
)
obj
.
stepsize
=
0.01
# here we call the default_initialize method, which takes all the name: value
# pairs in init and sets the property with that name to the provided value
# this covers setting stepsize, l2_coef; w and b can be set that way too
# we call it after as we want the parameter to superseed the default value.
M
.
default_initialize
(
obj
,
**
init
)
def
build_regularization
(
self
):
return
T
.
zero
()
# no regularization!
class
SoftmaxXERegression1
(
RegressionLayer1
):
""" XE mean cross entropy"""
def
build_prediction
(
self
):
return
NN
.
softmax
(
self
.
activation
)
def
build_classification_cost
(
self
,
target
):
#self.classification_cost_matrix = target * T.log(self.prediction) + (1 - target) * T.log(1 - self.prediction)
self
.
classification_cost_matrix
=
(
target
-
self
.
prediction
)
**
2
self
.
classification_costs
=
-
T
.
sum
(
self
.
classification_cost_matrix
,
axis
=
1
)
return
T
.
sum
(
self
.
classification_costs
)
def
build_regularization
(
self
):
self
.
l2_coef
=
M
.
Member
(
T
.
scalar
())
# we can add a hyper parameter if we need to
return
self
.
l2_coef
*
T
.
sum
(
self
.
w
*
self
.
w
)
class
SoftmaxXERegression2
(
RegressionLayer2
):
""" XE mean cross entropy"""
def
build_prediction
(
self
):
return
NN
.
softmax
(
self
.
activation
)
def
build_classification_cost
(
self
,
target
):
#self.classification_cost_matrix = target * T.log(self.prediction) + (1 - target) * T.log(1 - self.prediction)
self
.
classification_cost_matrix
=
(
target
-
self
.
prediction
)
**
2
self
.
classification_costs
=
-
T
.
sum
(
self
.
classification_cost_matrix
,
axis
=
1
)
return
T
.
sum
(
self
.
classification_costs
)
def
build_regularization
(
self
):
self
.
l2_coef
=
M
.
Member
(
T
.
scalar
())
# we can add a hyper parameter if we need to
return
self
.
l2_coef
*
T
.
sum
(
self
.
w
*
self
.
w
)
class
T_test_wiki_module
(
unittest
.
TestCase
):
def
test_Module_basic_example1
(
self
):
n
,
c
=
T
.
scalars
(
'nc'
)
inc
=
theano
.
function
([
n
,
((
c
,
c
+
n
),
0
)],
[])
dec
=
theano
.
function
([
n
,
((
c
,
c
-
n
),
inc
.
container
[
c
])],
[])
# we need to pass inc's container in order to share
plus10
=
theano
.
function
([(
c
,
inc
.
container
[
c
])],
c
+
10
)
assert
inc
[
c
]
==
0
inc
(
2
)
assert
inc
[
c
]
==
2
and
dec
[
c
]
==
inc
[
c
]
dec
(
3
)
assert
inc
[
c
]
==
-
1
and
dec
[
c
]
==
inc
[
c
]
assert
plus10
()
==
9
def
test_Module_basic_example2
(
self
):
m
=
M
.
Module
()
n
=
T
.
scalar
(
'n'
)
m
.
c
=
M
.
Member
(
T
.
scalar
())
# state variables must be wrapped with ModuleMember
m
.
inc
=
M
.
Method
(
n
,
[],
c
=
m
.
c
+
n
)
# m.c <= m.c + n
m
.
dec
=
M
.
Method
(
n
,
[],
c
=
m
.
c
-
n
)
# k.c <= k.c - n
m
.
dec
=
M
.
Method
(
n
,
[],
updates
=
{
m
.
c
:
m
.
c
-
n
})
#m.dec = M.Method(n, [], updates = {c: m.c - n})#global c don't exist
#m.dec = M.Method(n, [], m.c = m.c - n) #python don't suppor this syntax
m
.
plus10
=
M
.
Method
([],
m
.
c
+
10
)
# m.c is always accessible since it is a member of this mlass
inst
=
m
.
make
(
c
=
0
)
# here, we make an "instance" of the module with c initialized to 0
assert
inst
.
c
==
0
inst
.
inc
(
2
)
assert
inst
.
c
==
2
inst
.
dec
(
3
)
assert
inst
.
c
==
-
1
assert
inst
.
plus10
()
==
9
inst
=
m
.
make
(
c
=
5
)
# here, we make an "instance" of the module with c initialized to 0
assert
inst
.
c
==
5
inst
.
inc
(
2
)
assert
inst
.
c
==
7
inst
.
dec
(
3
)
assert
inst
.
c
==
4
assert
inst
.
plus10
()
==
14
def
test_Module_nesting_example1
(
self
):
def
make_incdec_function
():
n
,
c
=
T
.
scalars
(
'nc'
)
inc
=
theano
.
function
([
n
,
((
c
,
c
+
n
),
0
)],
[])
dec
=
theano
.
function
([
n
,
((
c
,
c
-
n
),
inc
.
container
[
c
])],
[])
return
inc
,
dec
inc1
,
dec1
=
make_incdec_function
()
inc2
,
dec2
=
make_incdec_function
()
a
,
b
=
T
.
scalars
(
'ab'
)
sum
=
theano
.
function
([(
a
,
inc1
.
container
[
'c'
]),
(
b
,
inc2
.
container
[
'c'
])],
a
+
b
)
inc1
(
2
)
dec1
(
4
)
inc2
(
6
)
assert
inc1
[
'c'
]
==
-
2
and
inc2
[
'c'
]
==
6
assert
sum
()
==
4
# -2 + 6
def
test_Module_nesting_example2
(
self
):
def
make_incdec_module
():
m
=
M
.
Module
()
n
=
T
.
scalar
(
'n'
)
m
.
c
=
M
.
Member
(
T
.
scalar
())
# state variables must be wrapped with ModuleMember
m
.
inc
=
M
.
Method
(
n
,
[],
c
=
m
.
c
+
n
)
# m.c <= m.c + n
m
.
dec
=
M
.
Method
(
n
,
[],
c
=
m
.
c
-
n
)
# k.c <= k.c - n
return
m
m
=
M
.
Module
()
m
.
incdec1
=
make_incdec_module
()
m
.
incdec2
=
make_incdec_module
()
m
.
sum
=
M
.
Method
([],
m
.
incdec1
.
c
+
m
.
incdec2
.
c
)
inst
=
m
.
make
(
incdec1
=
dict
(
c
=
0
),
incdec2
=
dict
(
c
=
0
))
assert
inst
.
incdec1
.
c
==
0
and
inst
.
incdec2
.
c
==
0
inst
.
incdec1
.
inc
(
2
)
inst
.
incdec1
.
dec
(
4
)
inst
.
incdec2
.
inc
(
6
)
assert
inst
.
incdec1
.
c
==
-
2
and
inst
.
incdec2
.
c
==
6
assert
inst
.
sum
()
==
4
# -2 + 6
def
test_Module_Advanced_example
(
self
):
data_x
=
N
.
random
.
randn
(
4
,
10
)
data_y
=
[
[
int
(
x
)]
for
x
in
N
.
random
.
randn
(
4
)
>
0
]
def
test
(
model
):
model
=
model
.
make
(
input_size
=
10
,
target_size
=
1
,
stepsize
=
0.1
)
print
model
.
stepsize
self
.
failUnless
(
model
.
w
.
shape
==
(
10
,
1
)
and
model
.
b
.
shape
==
(
1
,))
assert
model
.
stepsize
==
0.1
for
i
in
xrange
(
1000
):
xe
=
model
.
update
(
data_x
,
data_y
)
if
i
%
100
==
0
:
print
i
,
xe
pass
#for inputs, targets in my_training_set():
#print "cost:", model.update(inputs, targets)
print
"final weights:"
,
model
.
w
print
"final biases:"
,
model
.
b
#Print "some prediction:", model.prediction(some_inputs)
return
model
m1
=
test
(
SoftmaxXERegression1
(
regularize
=
False
))
m2
=
test
(
SoftmaxXERegression2
(
regularize
=
False
))
print
"m1"
,
m1
print
"m2"
,
m2
print
m2
==
m1
print
m1
==
m2
assert
m2
==
m1
and
m1
==
m2
def
test_Module_extending_module_methods
(
self
):
model_module
=
SoftmaxXERegression1
(
regularize
=
False
)
model_module
.
sum
=
M
.
Member
(
T
.
scalar
())
# we add a module member to hold the sum
model_module
.
update
.
updates
.
update
(
sum
=
model_module
.
sum
+
model_module
.
cost
)
# now update will also update sum!
model
=
model_module
.
make
(
input_size
=
4
,
target_size
=
2
,
stepsize
=
0.1
,
sum
=
0
)
# we mustn't forget to initialize the sum
print
model
.
stepsize
self
.
failUnless
(
model
.
w
.
shape
==
(
4
,
2
)
and
model
.
b
.
shape
==
(
2
,))
assert
model
.
stepsize
==
0.1
test
=
model
.
update
([[
0
,
0
,
1
,
0
]],
[[
0
,
1
]])
test
+=
model
.
update
([[
0
,
1
,
0
,
0
]],
[[
1
,
0
]])
assert
model
.
sum
==
test
def
test_Module_basic_example2_more
(
self
):
m
=
M
.
Module
()
m2
=
M
.
Module
()
m2
.
name
=
"m2"
# for better error
#top level don't have name, but other have auto name.
n
=
T
.
scalar
(
'n'
)
m
.
c
=
M
.
Member
(
T
.
scalar
())
# state variables must be wrapped with ModuleMember
m2
.
c
=
M
.
Member
(
T
.
scalar
())
# state variables must be wrapped with ModuleMember
m
.
dec
=
M
.
Method
(
n
,
[],
c
=
m
.
c
-
n
)
m
.
inc
=
M
.
Method
(
n
,
[],
c
=
m
.
c
+
n
)
# m.c <= m.c + n
# m.inc = M.Method(n, [], c = c + n)#fail c not defined
#syntax error
# m.inc = M.Method(n, [], m.c = m.c + n)#fail
m
.
inc
=
M
.
Method
(
n
,
[],
updates
=
{
m
.
c
:
m
.
c
+
n
})
# m.inc = M.Method(n, [], updates={c: m.c + n})#fail with NameError
# m.inc = M.Method(n, [], updates={m.c: c + n})#fail with NameError
# m.inc = M.Method(n, [], updates={c: c + n})#fail with NameError
m
.
inc
=
M
.
Method
(
n
,
[],
updates
=
{
m
.
c
:
m2
.
c
+
n
})
#work! should be allowed?
a
=
M
.
Module
()
a
.
m1
=
m
a
.
m2
=
m2
a
.
make
()
#should work.
# self.assertRaises(m.make(c = 0), Error)
m
.
inc
=
M
.
Method
(
n
,
[],
updates
=
{
m2
.
c
:
m
.
c
+
n
})
#work! should be allowed?
# self.assertRaises(m.make(c = 0), Error)
# m.inc = M.Method(n, [], updates={m.c: m2.c + m.c+ n})#work! should be allowed?
m2
.
inc
=
M
.
Method
(
n
,
[],
updates
=
{
m2
.
c
:
m2
.
c
+
2
*
m
.
c
+
n
})
#work! should be allowed?
# self.assertRaises(m.make(c = 0), Error)
if
__name__
==
'__main__'
:
from
theano.tests
import
main
main
(
"test_wiki"
)
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论