Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
1f244aa4
提交
1f244aa4
authored
5月 24, 2012
作者:
Nicolas Bouchard
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Two extra op from numpy.
上级
ac42ce74
隐藏空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
232 行增加
和
0 行删除
+232
-0
BinCountOp.py
theano/extra_ops/BinCountOp.py
+117
-0
DiffOp.py
theano/extra_ops/DiffOp.py
+115
-0
没有找到文件。
theano/extra_ops/BinCountOp.py
0 → 100644
浏览文件 @
1f244aa4
import
theano
import
numpy
as
np
from
theano
import
tensor
as
T
from
theano.tests
import
unittest_tools
as
utt
class
BinCountOp
(
theano
.
Op
):
"""Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest
value in x. If minlength is specified, there will be at least
this number of bins in the output array (though it will be longer
if necessary, depending on the contents of x). Each bin gives the
number of occurrences of its index value in x. If weights is
specified the input array is weighted by it, i.e. if a value n
is found at position i, out[n] += weight[i] instead of out[n] += 1.
Wraping of numpy.bincount
Parameter:
x -- 1 dimension, nonnegative ints
Keywords arguments:
weights -- Weights, array of the same shape as x.
minlength -- A minimum number of bins for the output array.
"""
compatible_type
=
(
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
'uint16'
,
'uint32'
,
'uint64'
)
def
__init__
(
self
,
weights
=
None
,
minlength
=
None
):
self
.
weights
=
weights
self
.
minlength
=
minlength
def
__eq__
(
self
,
other
):
return
(
type
(
self
)
==
type
(
other
)
and
self
.
weights
==
other
.
weights
and
self
.
minlength
==
other
.
minlength
)
def
__hash__
(
self
):
h
=
0
if
self
.
weights
!=
None
:
for
k
in
range
(
len
(
self
.
weights
)):
h
=
h
^
hash
(
self
.
weights
[
k
])
return
hash
(
type
(
self
))
^
h
^
hash
(
self
.
minlength
)
def
make_node
(
self
,
x
):
x
=
T
.
as_tensor_variable
(
x
)
if
x
.
dtype
not
in
BinCountOp
.
compatible_type
:
raise
TypeError
(
"Inputs must be integers."
)
if
x
.
ndim
!=
1
:
raise
TypeError
(
"Inputs must be of dimension 1."
)
return
theano
.
Apply
(
self
,
[
x
],
[
x
.
type
()])
def
perform
(
self
,
node
,
inputs
,
output_storage
):
x
=
inputs
[
0
]
if
x
.
dtype
not
in
BinCountOp
.
compatible_type
:
raise
TypeError
(
"Inputs must be integers."
)
if
x
.
ndim
!=
1
:
raise
TypeError
(
"Input must be of dimension 1."
)
z
=
output_storage
[
0
]
z
[
0
]
=
np
.
bincount
(
x
,
self
.
weights
,
self
.
minlength
)
def
grad
(
self
,
inputs
,
outputs_gradients
):
return
[
None
for
i
in
inputs
]
# Non differentiable
def
infer_shape
(
self
,
node
,
ins_shapes
):
inputs
=
node
.
inputs
[
0
]
m
=
T
.
max
(
inputs
)
+
1
if
self
.
minlength
!=
None
:
m
=
T
.
max
(
T
.
stack
(
m
,
self
.
minlength
))
return
[[
m
]]
def
__str__
(
self
):
return
self
.
__class__
.
__name__
def
bincount
(
x
,
weights
=
None
,
minlength
=
None
):
return
BinCountOp
(
weights
=
weights
,
minlength
=
minlength
)(
x
)
class
TestBinCountOp
(
utt
.
InferShapeTester
):
def
setUp
(
self
):
super
(
TestBinCountOp
,
self
)
.
setUp
()
self
.
op_class
=
BinCountOp
self
.
op
=
BinCountOp
()
def
test_bincountOp
(
self
):
x
=
T
.
lvector
(
'x'
)
a
=
np
.
random
.
random_integers
(
50
,
size
=
(
25
))
w
=
np
.
random
.
random
((
25
,))
f1
=
theano
.
function
([
x
],
bincount
(
x
))
f2
=
theano
.
function
([
x
],
bincount
(
x
,
weights
=
w
))
f3
=
theano
.
function
([
x
],
bincount
(
x
,
minlength
=
23
))
assert
(
np
.
bincount
(
a
)
==
f1
(
a
))
.
all
assert
(
np
.
bincount
(
a
,
weights
=
w
)
==
f2
(
a
))
.
all
assert
(
np
.
bincount
(
a
,
minlength
=
23
)
==
f3
(
a
))
.
all
def
test_infer_shape
(
self
):
x
=
T
.
lvector
(
'x'
)
self
.
_compile_and_check
([
x
],
[
self
.
op
(
x
)],
[
np
.
random
.
random_integers
(
50
,
size
=
(
25
,))],
self
.
op_class
)
w
=
np
.
random
.
random
((
25
,))
self
.
_compile_and_check
([
x
],
[
bincount
(
x
,
weights
=
w
)],
[
np
.
random
.
random_integers
(
50
,
size
=
(
25
,))],
self
.
op_class
)
self
.
_compile_and_check
([
x
],
[
bincount
(
x
,
minlength
=
60
)],
[
np
.
random
.
random_integers
(
50
,
size
=
(
25
,))],
self
.
op_class
)
theano/extra_ops/DiffOp.py
0 → 100644
浏览文件 @
1f244aa4
# TODO implement grad for higher dimension
import
theano
import
numpy
as
np
from
theano
import
tensor
as
T
from
theano.tests
import
unittest_tools
as
utt
class
DiffOp
(
theano
.
Op
):
"""Calculate the n-th order discrete difference along given axis.
The first order difference is given by out[n] = a[n+1] - a[n]
along the given axis, higher order differences are calculated by
using diff recursively. Wraping of numpy.diff for vector.
Parameter:
x -- Input vector.
Keywords arguments:
n -- The number of times values are differenced, default is 1.
"""
def
__init__
(
self
,
n
=
1
):
self
.
n
=
n
# self.axis = axis
def
__eq__
(
self
,
other
):
return
(
type
(
self
)
==
type
(
other
)
and
self
.
n
==
other
.
n
)
# self.axis == other.axis
def
__hash__
(
self
):
return
hash
(
type
(
self
))
^
hash
(
self
.
n
)
# ^ hash(self.axis)
def
make_node
(
self
,
x
):
x
=
T
.
as_tensor_variable
(
x
)
return
theano
.
Apply
(
self
,
[
x
],
[
x
.
type
()])
def
perform
(
self
,
node
,
inputs
,
output_storage
):
x
=
inputs
[
0
]
z
=
output_storage
[
0
]
z
[
0
]
=
np
.
diff
(
x
,
self
.
n
)
# axis
def
grad
(
self
,
inputs
,
outputs_gradients
):
z
=
outputs_gradients
[
0
]
def
_grad_helper
(
z
):
pre
=
T
.
concatenate
([[
0.
],
z
])
# Prepend 0
app
=
T
.
concatenate
([
z
,
[
0.
]])
# Append 0
return
pre
-
app
for
k
in
range
(
self
.
n
):
# Apply grad recursively
z
=
_grad_helper
(
z
)
return
[
z
]
def
infer_shape
(
self
,
node
,
ins_shapes
):
i0_shapes
=
ins_shapes
[
0
]
out_shape
=
list
(
i0_shapes
)
out_shape
[
0
]
=
out_shape
[
0
]
-
self
.
n
# Axis
return
[
out_shape
]
def
__str__
(
self
):
return
self
.
__class__
.
__name__
def
diff
(
x
,
n
=
1
):
# Axis
return
DiffOp
(
n
=
n
)(
x
)
class
TestDiffOp
(
utt
.
InferShapeTester
):
nb
=
10
# Number of time iterating for n
def
setUp
(
self
):
super
(
TestDiffOp
,
self
)
.
setUp
()
self
.
op_class
=
DiffOp
self
.
op
=
DiffOp
()
def
test_diffOp
(
self
):
x
=
T
.
dvector
(
'x'
)
a
=
np
.
random
.
random
(
500
)
f
=
theano
.
function
([
x
],
diff
(
x
))
assert
np
.
allclose
(
np
.
diff
(
a
),
f
(
a
))
# Test n
for
k
in
range
(
TestDiffOp
.
nb
):
g
=
theano
.
function
([
x
],
diff
(
x
,
n
=
k
))
assert
np
.
allclose
(
np
.
diff
(
a
,
n
=
k
),
g
(
a
))
def
test_infer_shape
(
self
):
x
=
T
.
dvector
(
'x'
)
self
.
_compile_and_check
([
x
],
[
self
.
op
(
x
)],
[
np
.
random
.
random
(
500
)],
self
.
op_class
)
for
k
in
range
(
TestDiffOp
.
nb
):
self
.
_compile_and_check
([
x
],
[
DiffOp
(
n
=
k
)(
x
)],
[
np
.
random
.
random
(
500
)],
self
.
op_class
)
def
test_grad
(
self
):
x
=
T
.
vector
(
'x'
)
a
=
np
.
random
.
random
(
500
)
gf
=
theano
.
function
([
x
],
T
.
grad
(
T
.
sum
(
diff
(
x
)),
x
))
utt
.
verify_grad
(
self
.
op
,
[
a
])
# Test n
for
k
in
range
(
TestDiffOp
.
nb
):
dg
=
theano
.
function
([
x
],
T
.
grad
(
T
.
sum
(
diff
(
x
,
n
=
k
)),
x
))
utt
.
verify_grad
(
DiffOp
(
n
=
k
),
[
a
])
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论