Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
034a5cbd
提交
034a5cbd
authored
11月 18, 2008
作者:
Frederic Bastien
浏览文件
操作
浏览文件
下载
差异文件
Automated merge with
ssh://projects@lgcm.iro.umontreal.ca/hg/theano
上级
5a1fd228
b75f20ff
隐藏空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
122 行增加
和
38 行删除
+122
-38
test_wiki.py
examples/tests/test_wiki.py
+122
-38
没有找到文件。
examples/tests/test_wiki.py
浏览文件 @
034a5cbd
import
unittest
from
theano
import
gof
from
theano
import
compile
from
theano.compile.function_module
import
*
from
theano.scalar
import
*
import
theano
from
theano
import
tensor
import
numpy
as
N
from
theano
import
tensor
as
T
from
theano.tensor
import
nnet
as
NN
import
random
import
numpy
as
N
from
theano.compile
import
module
as
M
class
RegressionLayer
(
M
.
Module
):
class
Blah
(
M
.
ModuleInstance
):
# self.component #refer the Module
# def __init__(self, input = None, target = None, regularize = True):
# super(Blah, self)
def
initialize
(
self
,
input_size
=
None
,
target_size
=
None
,
seed
=
1827
,
**
init
):
if
input_size
and
target_size
:
# initialize w and b in a special way using input_size and target_size
sz
=
(
input_size
,
target_size
)
rng
=
N
.
random
.
RandomState
(
seed
)
self
.
w
=
rng
.
uniform
(
size
=
sz
,
low
=
-
0.5
,
high
=
0.5
)
self
.
b
=
N
.
zeros
(
target_size
)
self
.
stepsize
=
0.01
def
__eq__
(
self
,
other
):
if
not
isinstance
(
other
.
component
,
SoftmaxXERegression1
)
and
not
isinstance
(
other
.
component
,
SoftmaxXERegression2
):
raise
NotImplemented
#we compare the member.
if
(
self
.
w
==
other
.
w
)
.
all
()
and
(
self
.
b
==
other
.
b
)
.
all
()
and
self
.
stepsize
==
other
.
stepsize
:
return
True
return
False
def
__hash__
(
self
):
raise
NotImplemented
def
fit
(
self
,
train
,
test
):
pass
class
RegressionLayer1
(
M
.
Module
):
InstanceType
=
Blah
def
__init__
(
self
,
input
=
None
,
target
=
None
,
regularize
=
True
):
super
(
RegressionLayer1
,
self
)
.
__init__
()
#boilerplate
# MODEL CONFIGURATION
self
.
regularize
=
regularize
# ACQUIRE/MAKE INPUT AND TARGET
if
not
input
:
input
=
T
.
matrix
(
'input'
)
if
not
target
:
target
=
T
.
matrix
(
'target'
)
# HYPER-PARAMETERS
self
.
stepsize
=
M
.
Member
(
T
.
scalar
())
# a stepsize for gradient descent
# PARAMETERS
self
.
w
=
M
.
Member
(
T
.
matrix
())
#the linear transform to apply to our input points
self
.
b
=
M
.
Member
(
T
.
vector
())
#a vector of biases, which make our transform affine instead of linear
# REGRESSION MODEL
self
.
activation
=
T
.
dot
(
input
,
self
.
w
)
+
self
.
b
self
.
prediction
=
self
.
build_prediction
()
# CLASSIFICATION COST
self
.
classification_cost
=
self
.
build_classification_cost
(
target
)
# REGULARIZATION COST
self
.
regularization
=
self
.
build_regularization
()
# TOTAL COST
self
.
cost
=
self
.
classification_cost
if
self
.
regularize
:
self
.
cost
=
self
.
cost
+
self
.
regularization
# GET THE GRADIENTS NECESSARY TO FIT OUR PARAMETERS
self
.
grad_w
,
self
.
grad_b
=
T
.
grad
(
self
.
cost
,
[
self
.
w
,
self
.
b
])
# INTERFACE METHODS
self
.
update
=
M
.
Method
([
input
,
target
],
self
.
cost
,
w
=
self
.
w
-
self
.
stepsize
*
self
.
grad_w
,
b
=
self
.
b
-
self
.
stepsize
*
self
.
grad_b
)
self
.
apply
=
M
.
Method
(
input
,
self
.
prediction
)
def
params
(
self
):
return
self
.
w
,
self
.
b
def
build_regularization
(
self
):
return
T
.
zero
()
# no regularization!
class
RegressionLayer2
(
M
.
Module
):
def
__init__
(
self
,
input
=
None
,
target
=
None
,
regularize
=
True
):
super
(
RegressionLayer
,
self
)
.
__init__
()
#boilerplate
super
(
RegressionLayer
2
,
self
)
.
__init__
()
#boilerplate
# MODEL CONFIGURATION
self
.
regularize
=
regularize
# ACQUIRE/MAKE INPUT AND TARGET
...
...
@@ -48,7 +109,8 @@ class RegressionLayer(M.Module):
self
.
apply
=
M
.
Method
(
input
,
self
.
prediction
)
def
params
(
self
):
return
self
.
w
,
self
.
b
def
_instance_initialize
(
self
,
obj
,
input_size
=
None
,
target_size
=
None
,
**
init
):
def
_instance_initialize
(
self
,
obj
,
input_size
=
None
,
target_size
=
None
,
seed
=
1827
,
**
init
):
# obj is an "instance" of this module holding values for each member and
# functions for each method
#super(RegressionLayer, self).initialize(obj, **init)
...
...
@@ -59,14 +121,28 @@ class RegressionLayer(M.Module):
if
input_size
and
target_size
:
# initialize w and b in a special way using input_size and target_size
sz
=
(
input_size
,
target_size
)
obj
.
w
=
N
.
random
.
uniform
(
size
=
sz
,
low
=
-
0.5
,
high
=
0.5
)
rng
=
N
.
random
.
RandomState
(
seed
)
obj
.
w
=
rng
.
uniform
(
size
=
sz
,
low
=
-
0.5
,
high
=
0.5
)
obj
.
b
=
N
.
zeros
(
target_size
)
obj
.
stepsize
=
0.01
def
build_regularization
(
self
):
return
T
.
zero
()
# no regularization!
class
SoftmaxXERegression1
(
RegressionLayer1
):
""" XE mean cross entropy"""
def
build_prediction
(
self
):
return
NN
.
softmax
(
self
.
activation
)
def
build_classification_cost
(
self
,
target
):
#self.classification_cost_matrix = target * T.log(self.prediction) + (1 - target) * T.log(1 - self.prediction)
self
.
classification_cost_matrix
=
(
target
-
self
.
prediction
)
**
2
self
.
classification_costs
=
-
T
.
sum
(
self
.
classification_cost_matrix
,
axis
=
1
)
return
T
.
sum
(
self
.
classification_costs
)
def
build_regularization
(
self
):
self
.
l2_coef
=
M
.
Member
(
T
.
scalar
())
# we can add a hyper parameter if we need to
return
self
.
l2_coef
*
T
.
sum
(
self
.
w
*
self
.
w
)
class
SoftmaxXERegression
(
RegressionLayer
):
class
SoftmaxXERegression
2
(
RegressionLayer2
):
""" XE mean cross entropy"""
def
build_prediction
(
self
):
return
NN
.
softmax
(
self
.
activation
)
...
...
@@ -147,42 +223,49 @@ class T_function_module(unittest.TestCase):
assert
inst
.
sum
()
==
4
# -2 + 6
def
test_Klass_Advanced_example
(
self
):
model_module
=
SoftmaxXERegression
(
regularize
=
False
)
model
=
model_module
.
make
(
input_size
=
10
,
target_size
=
1
,
stepsize
=
0.1
)
data_x
=
N
.
random
.
randn
(
4
,
10
)
data_y
=
[
[
int
(
x
)]
for
x
in
N
.
random
.
randn
(
4
)
>
0
]
print
data_x
print
print
data_y
for
i
in
xrange
(
1000
):
xe
=
model
.
update
(
data_x
,
data_y
)
if
i
%
100
==
0
:
print
i
,
xe
#for inputs, targets in my_training_set():
#print "cost:", model.update(inputs, targets)
print
"final weights:"
,
model
.
w
print
"final biases:"
,
model
.
b
#print "some prediction:", model.prediction(some_inputs)
# print data_x
# print
# print data_y
def
test
(
model
):
model
=
model
.
make
(
input_size
=
10
,
target_size
=
1
,
stepsize
=
0.1
)
for
i
in
xrange
(
1000
):
xe
=
model
.
update
(
data_x
,
data_y
)
if
i
%
100
==
0
:
print
i
,
xe
pass
#for inputs, targets in my_training_set():
#print "cost:", model.update(inputs, targets)
print
"final weights:"
,
model
.
w
print
"final biases:"
,
model
.
b
#Print "some prediction:", model.prediction(some_inputs)
return
model
m1
=
test
(
SoftmaxXERegression1
(
regularize
=
False
))
m2
=
test
(
SoftmaxXERegression2
(
regularize
=
False
))
print
"m1"
,
m1
print
"m2"
,
m2
print
m2
==
m1
print
m1
==
m2
assert
m2
==
m1
and
m1
==
m2
def
test_Klass_extending_klass_methods
(
self
):
model_module
=
SoftmaxXERegression
(
regularize
=
False
)
model_module
=
SoftmaxXERegression
1
(
regularize
=
False
)
model_module
.
sum
=
M
.
Member
(
T
.
scalar
())
# we add a module member to hold the sum
model_module
.
update
.
extend
(
sum
=
model_module
.
sum
+
model_module
.
cost
)
# now update will also update sum!
model_module
.
update
.
updates
.
update
(
sum
=
model_module
.
sum
+
model_module
.
cost
)
# now update will also update sum!
model
=
model_module
.
make
(
input_size
=
4
,
target_size
=
2
,
stepsize
=
0.1
,
sum
=
0
)
# we mustn't forget to initialize the sum
test
=
model
.
update
([[
0
,
0
,
1
,
0
]],
[[
0
,
1
]])
+
model
.
update
([[
0
,
1
,
0
,
0
]],
[[
1
,
0
]])
test
=
model
.
update
([[
0
,
0
,
1
,
0
]],
[[
0
,
1
]])
test
+=
model
.
update
([[
0
,
1
,
0
,
0
]],
[[
1
,
0
]])
assert
model
.
sum
==
test
...
...
@@ -231,7 +314,8 @@ class T_function_module(unittest.TestCase):
# self.assertRaises(m.make(c = 0), Error)
m
.
inc
=
M
.
Method
(
n
,
[],
updates
=
{
m2
.
c
:
m
.
c
+
n
})
#work! should be allowed?
# self.assertRaises(m.make(c = 0), Error)
# m.inc = M.Method(n, [], updates={m2.c: m2.c + n})#work! should be allowed?
# m.inc = M.Method(n, [], updates={m.c: m2.c + m.c+ n})#work! should be allowed?
m2
.
inc
=
M
.
Method
(
n
,
[],
updates
=
{
m2
.
c
:
m2
.
c
+
2
*
m
.
c
+
n
})
#work! should be allowed?
# self.assertRaises(m.make(c = 0), Error)
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论