Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
8e3aaba6
提交
8e3aaba6
authored
4月 21, 2022
作者:
Larry Dong
提交者:
Brandon T. Willard
4月 22, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Removed DiffOp class and tests
上级
9e547465
隐藏空白字符变更
内嵌
并排
正在显示
4 个修改的文件
包含
0 行增加
和
132 行删除
+0
-132
dispatch.py
aesara/link/jax/dispatch.py
+0
-12
extra_ops.py
aesara/link/numba/dispatch/extra_ops.py
+0
-32
extra_ops.py
aesara/tensor/extra_ops.py
+0
-60
test_extra_ops.py
tests/tensor/test_extra_ops.py
+0
-28
没有找到文件。
aesara/link/jax/dispatch.py
浏览文件 @
8e3aaba6
...
@@ -39,7 +39,6 @@ from aesara.tensor.extra_ops import (
...
@@ -39,7 +39,6 @@ from aesara.tensor.extra_ops import (
Bartlett
,
Bartlett
,
BroadcastTo
,
BroadcastTo
,
CumOp
,
CumOp
,
DiffOp
,
FillDiagonal
,
FillDiagonal
,
FillDiagonalOffset
,
FillDiagonalOffset
,
RavelMultiIndex
,
RavelMultiIndex
,
...
@@ -938,17 +937,6 @@ def jax_funcify_CumOp(op, **kwargs):
...
@@ -938,17 +937,6 @@ def jax_funcify_CumOp(op, **kwargs):
return
cumop
return
cumop
@jax_funcify.register
(
DiffOp
)
def
jax_funcify_DiffOp
(
op
,
**
kwargs
):
n
=
op
.
n
axis
=
op
.
axis
def
diffop
(
x
,
n
=
n
,
axis
=
axis
):
return
jnp
.
diff
(
x
,
n
=
n
,
axis
=
axis
)
return
diffop
@jax_funcify.register
(
Repeat
)
@jax_funcify.register
(
Repeat
)
def
jax_funcify_Repeat
(
op
,
**
kwargs
):
def
jax_funcify_Repeat
(
op
,
**
kwargs
):
axis
=
op
.
axis
axis
=
op
.
axis
...
...
aesara/link/numba/dispatch/extra_ops.py
浏览文件 @
8e3aaba6
...
@@ -3,7 +3,6 @@ import warnings
...
@@ -3,7 +3,6 @@ import warnings
import
numba
import
numba
import
numpy
as
np
import
numpy
as
np
from
numba.misc.special
import
literal_unroll
from
numba.misc.special
import
literal_unroll
from
numpy.core.multiarray
import
normalize_axis_index
from
aesara
import
config
from
aesara
import
config
from
aesara.link.numba.dispatch
import
basic
as
numba_basic
from
aesara.link.numba.dispatch
import
basic
as
numba_basic
...
@@ -12,7 +11,6 @@ from aesara.tensor.extra_ops import (
...
@@ -12,7 +11,6 @@ from aesara.tensor.extra_ops import (
Bartlett
,
Bartlett
,
BroadcastTo
,
BroadcastTo
,
CumOp
,
CumOp
,
DiffOp
,
FillDiagonal
,
FillDiagonal
,
FillDiagonalOffset
,
FillDiagonalOffset
,
RavelMultiIndex
,
RavelMultiIndex
,
...
@@ -67,36 +65,6 @@ def numba_funcify_CumOp(op, node, **kwargs):
...
@@ -67,36 +65,6 @@ def numba_funcify_CumOp(op, node, **kwargs):
return
cumop
return
cumop
@numba_funcify.register
(
DiffOp
)
def
numba_funcify_DiffOp
(
op
,
node
,
**
kwargs
):
n
=
op
.
n
axis
=
op
.
axis
ndim
=
node
.
inputs
[
0
]
.
ndim
dtype
=
node
.
outputs
[
0
]
.
dtype
axis
=
normalize_axis_index
(
axis
,
ndim
)
slice1
=
[
slice
(
None
)]
*
ndim
slice2
=
[
slice
(
None
)]
*
ndim
slice1
[
axis
]
=
slice
(
1
,
None
)
slice2
[
axis
]
=
slice
(
None
,
-
1
)
slice1
=
tuple
(
slice1
)
slice2
=
tuple
(
slice2
)
op
=
np
.
not_equal
if
dtype
==
"bool"
else
np
.
subtract
@numba_basic.numba_njit
(
boundscheck
=
False
,
fastmath
=
config
.
numba__fastmath
)
def
diffop
(
x
):
res
=
x
.
copy
()
for
_
in
range
(
n
):
res
=
op
(
res
[
slice1
],
res
[
slice2
])
return
res
return
diffop
@numba_funcify.register
(
FillDiagonal
)
@numba_funcify.register
(
FillDiagonal
)
def
numba_funcify_FillDiagonal
(
op
,
**
kwargs
):
def
numba_funcify_FillDiagonal
(
op
,
**
kwargs
):
@numba_basic.numba_njit
@numba_basic.numba_njit
...
...
aesara/tensor/extra_ops.py
浏览文件 @
8e3aaba6
...
@@ -469,66 +469,6 @@ class CumprodOp(Op):
...
@@ -469,66 +469,6 @@ class CumprodOp(Op):
return
obj
return
obj
class
DiffOp
(
Op
):
# See function diff for docstring
__props__
=
(
"n"
,
"axis"
)
def
__init__
(
self
,
n
=
1
,
axis
=-
1
):
self
.
n
=
n
self
.
axis
=
axis
# numpy return a view in that case.
# TODO, make an optimization that remove this op in this case.
if
n
==
0
:
self
.
view_map
=
{
0
:
[
0
]}
def
make_node
(
self
,
x
):
x
=
at
.
as_tensor_variable
(
x
)
axis
=
normalize_axis_index
(
self
.
axis
,
x
.
ndim
)
shape
=
[
None
]
*
x
.
type
.
ndim
for
i
,
shape_i
in
enumerate
(
x
.
type
.
shape
):
if
shape_i
is
None
:
continue
if
i
==
axis
:
shape
[
i
]
=
max
(
0
,
shape_i
-
self
.
n
)
else
:
shape
[
i
]
=
shape_i
out_type
=
TensorType
(
dtype
=
x
.
type
.
dtype
,
shape
=
shape
)
return
Apply
(
self
,
[
x
],
[
out_type
()])
def
perform
(
self
,
node
,
inputs
,
output_storage
):
x
=
inputs
[
0
]
z
=
output_storage
[
0
]
z
[
0
]
=
np
.
diff
(
x
,
n
=
self
.
n
,
axis
=
self
.
axis
)
def
grad
(
self
,
inputs
,
outputs_gradients
):
inputs
=
inputs
[
0
]
if
inputs
.
ndim
!=
1
:
raise
NotImplementedError
(
"Grad is not implemented for inputs with"
"number of dimension other than 1."
)
z
=
outputs_gradients
[
0
]
def
_grad_helper
(
z
):
pre
=
at
.
concatenate
([[
0.0
],
z
])
app
=
at
.
concatenate
([
z
,
[
0.0
]])
return
pre
-
app
# FIXME: This fails when n is larger than the input size
for
k
in
range
(
self
.
n
):
z
=
_grad_helper
(
z
)
return
[
z
]
def
infer_shape
(
self
,
fgraph
,
node
,
ins_shapes
):
i0_shapes
=
ins_shapes
[
0
]
out_shape
=
list
(
i0_shapes
)
out_shape
[
self
.
axis
]
=
at_max
((
0
,
out_shape
[
self
.
axis
]
-
self
.
n
))
return
[
out_shape
]
def
diff
(
x
,
n
=
1
,
axis
=-
1
):
def
diff
(
x
,
n
=
1
,
axis
=-
1
):
"""Calculate the `n`-th order discrete difference along the given `axis`.
"""Calculate the `n`-th order discrete difference along the given `axis`.
...
...
tests/tensor/test_extra_ops.py
浏览文件 @
8e3aaba6
...
@@ -18,7 +18,6 @@ from aesara.tensor.extra_ops import (
...
@@ -18,7 +18,6 @@ from aesara.tensor.extra_ops import (
BroadcastTo
,
BroadcastTo
,
CpuContiguous
,
CpuContiguous
,
CumOp
,
CumOp
,
DiffOp
,
FillDiagonal
,
FillDiagonal
,
FillDiagonalOffset
,
FillDiagonalOffset
,
RavelMultiIndex
,
RavelMultiIndex
,
...
@@ -334,33 +333,6 @@ class TestDiffOp(utt.InferShapeTester):
...
@@ -334,33 +333,6 @@ class TestDiffOp(utt.InferShapeTester):
else
:
else
:
assert
out
.
type
.
shape
[
i
]
==
out_test
.
shape
[
i
]
assert
out
.
type
.
shape
[
i
]
==
out_test
.
shape
[
i
]
def
test_infer_shape
(
self
):
x
=
matrix
(
"x"
)
a
=
np
.
random
.
random
((
30
,
50
))
.
astype
(
config
.
floatX
)
# Test default n and axis
self
.
_compile_and_check
([
x
],
[
DiffOp
()(
x
)],
[
a
],
DiffOp
)
for
axis
in
(
-
2
,
-
1
,
0
,
1
):
for
n
in
(
0
,
1
,
2
,
a
.
shape
[
0
],
a
.
shape
[
0
]
+
1
):
self
.
_compile_and_check
([
x
],
[
diff
(
x
,
n
=
n
,
axis
=
axis
)],
[
a
],
DiffOp
)
def
test_grad
(
self
):
a
=
np
.
random
.
random
(
50
)
.
astype
(
config
.
floatX
)
# Test default n and axis
utt
.
verify_grad
(
DiffOp
(),
[
a
])
for
n
in
(
0
,
1
,
2
,
a
.
shape
[
0
]):
utt
.
verify_grad
(
DiffOp
(
n
=
n
),
[
a
],
eps
=
7e-3
)
@pytest.mark.xfail
(
reason
=
"gradient is wrong when n is larger than input size"
)
def
test_grad_n_larger_than_input
(
self
):
# Gradient is wrong when n is larger than the input size. Until it is fixed,
# this test ensures the behavior is documented
a
=
np
.
random
.
random
(
10
)
.
astype
(
config
.
floatX
)
utt
.
verify_grad
(
DiffOp
(
n
=
11
),
[
a
],
eps
=
7e-3
)
def
test_grad_not_implemented
(
self
):
def
test_grad_not_implemented
(
self
):
x
=
at
.
matrix
(
"x"
)
x
=
at
.
matrix
(
"x"
)
with
pytest
.
raises
(
NotImplementedError
):
with
pytest
.
raises
(
NotImplementedError
):
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论