Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
30d14420
提交
30d14420
authored
3月 12, 2008
作者:
bergstrj@iro.umontreal.ca
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
moving away from Grad
上级
403d94df
显示空白字符变更
内嵌
并排
正在显示
3 个修改的文件
包含
80 行增加
和
139 行删除
+80
-139
_test_gradient.py
_test_gradient.py
+7
-2
gradient.py
gradient.py
+72
-135
tensor_ops.py
tensor_ops.py
+1
-2
没有找到文件。
_test_gradient.py
浏览文件 @
30d14420
...
@@ -17,7 +17,12 @@ def matrices(n):
...
@@ -17,7 +17,12 @@ def matrices(n):
return
[
matrix
()
for
i
in
xrange
(
n
)]
return
[
matrix
()
for
i
in
xrange
(
n
)]
class
_testCase
(
unittest
.
TestCase
):
class
_testNone
(
unitTest
.
TestCase
):
def
test0
(
self
):
class
_testCase_matinv
:
# (unittest.TestCase):
def
setUp
(
self
):
def
setUp
(
self
):
numpy
.
random
.
seed
(
1
)
numpy
.
random
.
seed
(
1
)
def
matinv
(
self
,
dim
):
def
matinv
(
self
,
dim
):
...
@@ -48,7 +53,7 @@ class _testCase (unittest.TestCase):
...
@@ -48,7 +53,7 @@ class _testCase (unittest.TestCase):
self
.
assertEqual
((
'2.67327580893'
,
'0.000438649434819'
),
self
.
matinv
(
3
))
self
.
assertEqual
((
'2.67327580893'
,
'0.000438649434819'
),
self
.
matinv
(
3
))
class
_testCase_old
:
class
_testCase_old
:
#(unittest.TestCase):
class
posneg
(
T
.
_TensorOp
):
class
posneg
(
T
.
_TensorOp
):
nout
=
2
nout
=
2
...
...
gradient.py
浏览文件 @
30d14420
import
gof
import
gof
class
OrderError
(
Exception
):
def
_unpack_result
(
lst
):
"""Grad has been manipulated in the wrong order"""
if
len
(
lst
)
>
1
:
return
lst
class
Grad
(
object
):
else
"""A dictionary-like class, into which derivative expressions may be added.
return
lst
[
0
]
Attributes:
def
_pack_result
(
arg
):
map - dict: result -> grad(result)
if
gof
.
result
.
is_result
(
arg
):
return
[
arg
]
outputs - list: results from which to backpropagate gradient
return
arg
did_bprop - bool: has bprop been called?
items_got - set: results for which we have returned the gradient
def
grad_sources_inputs
(
sources
,
inputs
):
"""Return a dictionary mapping each result necessary for a source to its gradient
Methods:
sources - a list of gradient sources (explained below)
inputs - a list of results considered to be constant
add() - accumulate a gradient expression
A gradient source is a pair (r, g_r), in which r is a result, and g_r is a
bprop() - recursively construct gradient expressions
result that is a gradient wrt r.
__call__() - retrieve the gradient wrt a given Op or result
__getitem__() - retrieve the gradient wrt a given Op or result
This class operates on graphs of nodes which implement the UpdateGradient interface.
This function traverses the graph backward from the 'r' sources,
calling op.grad(...) when it is provided by an op, and at least one of the
outputs of the op has an associated gradient.
"""
The op.grad(...) functions may be called in several ways (for the
convenience of the op implementer) depending on the number of inputs and
outputs.
def
__init__
(
self
,
dct
=
{}):
If there is one input and one output:
self
.
map
=
{}
op.grad( op.inputs[0], grad(op.outputs[0]))
self
.
outputs
=
[]
self
.
did_bprop
=
False
self
.
items_got
=
set
([])
for
key
,
val
in
dct
.
items
():
self
.
add_output
(
key
,
val
)
def
__contains__
(
self
,
item
)
:
If there are several inputs and one output
:
return
item
in
self
.
map
op.grad( op.inputs, grad(op.outputs[0]))
def
__getitem__
(
self
,
r
)
:
If there is one input and several outputs
:
"""Return the gradient wrt result r
op.grad( op.inputs[0], [grad(o) for o in op.outputs[0]])
r is also added to the set of things for which the gradient has been
If there are multiple inputs and outputs:
given. Subsequent attempts to modify the gradient wrt r will fail
op.grad( op.inputs, [grad(o) for o in op.outputs[0]])
with exception FixedGradientError.
"""
self
.
items_got
.
add
(
r
)
try
:
return
self
.
map
[
r
]
except
KeyError
:
return
None
def
__call__
(
self
,
r
):
"""Return the gradient wrt result r"""
return
self
.
__getitem__
(
r
)
def
add_output
(
self
,
r
,
dr
):
self
.
add
(
r
,
dr
)
self
.
outputs
.
append
(
r
)
def
add
(
self
,
r
,
dr
):
"""Add dr to the sum of gradients associated with r."""
if
r
in
self
.
items_got
:
raise
OrderError
(
'gradient has already been retrieved'
,
r
)
if
r
in
self
.
map
:
self
.
map
[
r
]
=
self
.
map
[
r
]
+
dr
else
:
self
.
map
[
r
]
=
dr
def
bprop
(
self
):
This function expects the op.grad(...) function to return the gradient
"""Build a backpropagation graph.
expression [results] associated with the inputs of the op. If the op has a
single input, it should return a single result; if the op has multiple
inputs, it should return a list of results corresponding to the gradients in
the same order as the inputs.
This function traverses the graph backward from self.outputs, calling
For each input wrt to which an op is not differentiable, it should return
update_gradient on the ops as it goes. Ops without an update_gradient
None instead of a result instance.
function are considered not differentiable. The update_gradient
function is defined in the UpdateGradient class.
maybe_redo
"""
"""
if
self
.
did_bprop
:
raise
OrderError
(
'bprop has already been done'
)
try
:
outputs
=
self
.
outputs
inputs
=
gof
.
graph
.
inputs
(
outputs
)
for
op
in
gof
.
graph
.
io_toposort
(
inputs
,
outputs
)
.
__reversed__
():
op
.
update_gradient
(
self
)
finally
:
self
.
did_bprop
=
True
def
grad
(
cost
,
param
=
None
,
cost_grad
=
1.0
):
gmap
=
{}
"""Return symbolic expression of gradient of <cost> wrt <param>.
for
(
r
,
g_r
)
in
self
.
sources
:
if
r
in
gmap
:
If <param> is None, then return a Grad instance, from which the gradients of
gmap
[
r
]
=
gmap
[
r
]
+
dr
multiple objects can be retrieved using the __getitem__ or __call__ methods
(as in function currying in languages such as scheme and OCaML).
If <param> is not None, then return the gradient expression for
d cost / d param.
"""
rval
=
Grad
({
cost
:
cost_grad
})
rval
.
bprop
()
if
param
is
None
:
return
rval
else
:
else
:
return
rval
(
param
)
gmap
[
r
]
=
dr
class
UpdateGradient
:
"""This class defines the interface that Grad.bprop expects of each
differentiable Op"""
def
update_gradient
(
self
,
grad_d
):
"""Override this function to call grad_d.add(r,grad_r) for each
differentiable input result, r.
You can assume that the gradient with respect to all output results
outputs
=
gmap
.
keys
()
has been accumulated in grad_d. These expressions are available by
calling grad_d[o] for o in self.outputs. If grad_d[o] returns None,
then this function should assume that grad_d[o] is an appropriate sort
of zero.
"""
if
inputs
is
None
:
raise
AbstractFunctionError
()
inputs
=
gof
.
graph
.
inputs
(
outputs
)
class
SelfGrad
(
UpdateGradient
):
"""This class implements update_gradient in terms of the popular self.grad
This class defines update_gradient (necessary for Grad.bprop) to call a
self.grad function like this:
if len(self.outputs) > 1:
self.grad(self.inputs, [grad_d[o] for o in self.outputs])
else
self.grad(self.inputs, grad_d[output[0]])
self.grad() is an Abstract function, see its documentation for the
expected behaviour.
"""
def
update_gradient
(
self
,
grad_d
):
#Call self.grad(inputs, output_gradients) and add the result to grad_d
if
len
(
self
.
outputs
)
>
1
:
inputgs
=
self
.
grad
(
self
.
inputs
,
[
grad_d
[
o
]
for
o
in
self
.
outputs
])
else
:
inputgs
=
self
.
grad
(
self
.
inputs
,
grad_d
[
self
.
outputs
[
0
]])
if
len
(
self
.
inputs
)
==
1
and
is_result
(
inputgs
):
for
op
in
gof
.
graph
.
io_toposort
(
inputs
,
outputs
)
.
__reversed__
():
inputgs
=
[
inputgs
]
g_outputs
=
[
gmap
[
o
]
for
o
in
self
.
outputs
]
if
all
(
map
(
lambda
x
:
x
is
None
,
g_outputs
)):
continue
output_arg
=
unpack_singleton
(
g_outputs
)
input_arg
=
unpack_singleton
(
op
.
inputs
)
op_grad
=
op
.
grad
(
input_arg
,
output_arg
)
if
op_grad
is
None
:
raise
Exception
(
'If you really mean for grad(...) to return None,
please return [None]'
,
op
.
__class__
)
g_inputs
=
pack_singleton
(
op_grad
)
assert
len
(
g_inputs
)
==
len
(
op
.
inputs
)
for
r
,
g_r
in
zip
(
self
.
inputs
,
g_inputs
):
if
g_r
is
not
None
:
if
r
in
gmap
:
gmap
[
r
]
=
gmap
[
r
]
+
g_r
else
:
else
:
assert
len
(
inputgs
)
==
len
(
self
.
inputs
)
gmap
[
r
]
=
g_r
for
input
,
inputgrad
in
zip
(
self
.
inputs
,
inputgs
):
return
gmap
grad_d
.
add
(
input
,
inputgrad
)
def
grad
(
self
,
*
args
):
def
diff
(
cost
,
param
):
"""Return gradient expressions wrt input arguments
"""Return symbolic expression of gradient of <cost> wrt <param>.
If len(self.inputs)==1 : return the input gradient expression
If <param> is a list, then return a list containing the gradient of cost wrt
If len(self.inputs)>=2 : return a list of input gradient expressions
each element of the list.
"""
"""
raise
AbstractFunctionError
()
inputs
=
gof
.
graph
.
inputs
([
cost
])
gmap
=
grad_sources_inputs
([(
cost
,
1.0
)],
inputs
)
if
isinstance
(
param
,
lst
):
return
[
gmap
[
p
]
for
p
in
param
]
else
:
return
gmap
[
param
]
tensor_ops.py
浏览文件 @
30d14420
...
@@ -2,7 +2,6 @@
...
@@ -2,7 +2,6 @@
from
gof
import
Op
,
utils
,
Destroyer
,
Viewer
from
gof
import
Op
,
utils
,
Destroyer
,
Viewer
import
gof.op
import
gof.op
import
gradient
from
tensor
import
*
from
tensor
import
*
...
@@ -24,7 +23,7 @@ def _wrap_as_tensor(x):
...
@@ -24,7 +23,7 @@ def _wrap_as_tensor(x):
# Ops in this file.
# Ops in this file.
# It is not necessary to inherit from TensorOp to make an Op that manipulates
# It is not necessary to inherit from TensorOp to make an Op that manipulates
# Tensors.
# Tensors.
class
TensorOp
(
Op
,
gradient
.
SelfGrad
):
class
TensorOp
(
Op
):
nin
=
-
1
nin
=
-
1
nout
=
1
nout
=
1
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论