Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
8a4505c0
提交
8a4505c0
authored
3月 02, 2022
作者:
Brandon T. Willard
提交者:
Brandon T. Willard
3月 02, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Move C-specific content from aesara.graph.op to aesara.link.c.op
上级
4eea9f79
隐藏空白字符变更
内嵌
并排
正在显示
45 个修改的文件
包含
885 行增加
和
843 行删除
+885
-843
debugmode.py
aesara/compile/debugmode.py
+2
-1
ops.py
aesara/compile/ops.py
+2
-1
basic_ops.py
aesara/gpuarray/basic_ops.py
+2
-1
blas.py
aesara/gpuarray/blas.py
+1
-1
blocksparse.py
aesara/gpuarray/blocksparse.py
+1
-1
ctc.py
aesara/gpuarray/ctc.py
+1
-1
dnn.py
aesara/gpuarray/dnn.py
+1
-1
linalg.py
aesara/gpuarray/linalg.py
+2
-1
reduction.py
aesara/gpuarray/reduction.py
+1
-1
subtensor.py
aesara/gpuarray/subtensor.py
+2
-1
op.py
aesara/graph/op.py
+11
-669
op.py
aesara/link/c/op.py
+667
-0
raise_op.py
aesara/raise_op.py
+1
-1
multinomial.py
aesara/sandbox/multinomial.py
+1
-1
rng_mrg.py
aesara/sandbox/rng_mrg.py
+1
-1
basic.py
aesara/scalar/basic.py
+1
-1
basic.py
aesara/sparse/basic.py
+2
-1
opt.py
aesara/sparse/opt.py
+1
-1
basic.py
aesara/tensor/basic.py
+2
-1
blas.py
aesara/tensor/blas.py
+2
-1
blas_c.py
aesara/tensor/blas_c.py
+1
-1
elemwise.py
aesara/tensor/elemwise.py
+1
-1
extra_ops.py
aesara/tensor/extra_ops.py
+2
-1
math.py
aesara/tensor/math.py
+2
-1
basic.py
aesara/tensor/nnet/basic.py
+2
-1
conv.py
aesara/tensor/nnet/conv.py
+1
-1
corr.py
aesara/tensor/nnet/corr.py
+2
-1
corr3d.py
aesara/tensor/nnet/corr3d.py
+2
-1
ctc.py
aesara/tensor/nnet/ctc.py
+1
-1
neighbours.py
aesara/tensor/nnet/neighbours.py
+1
-1
shape.py
aesara/tensor/shape.py
+1
-1
pool.py
aesara/tensor/signal/pool.py
+1
-1
subtensor.py
aesara/tensor/subtensor.py
+2
-1
basic.py
aesara/typed_list/basic.py
+2
-1
creating_a_c_op.rst
doc/extending/creating_a_c_op.rst
+2
-2
other_ops.rst
doc/extending/other_ops.rst
+1
-1
using_params.rst
doc/extending/using_params.rst
+1
-1
setup.cfg
setup.cfg
+4
-0
test_debugmode.py
tests/compile/test_debugmode.py
+2
-1
test_compute_test_value.py
tests/graph/test_compute_test_value.py
+2
-1
test_op.py
tests/graph/test_op.py
+2
-133
test_params_type.py
tests/graph/test_params_type.py
+1
-1
test_types.py
tests/graph/test_types.py
+1
-1
test_basic.py
tests/link/c/test_basic.py
+1
-1
test_op.py
tests/link/c/test_op.py
+143
-0
没有找到文件。
aesara/compile/debugmode.py
浏览文件 @
8a4505c0
...
...
@@ -32,9 +32,10 @@ from aesara.configdefaults import config
from
aesara.graph.basic
import
Variable
,
io_toposort
from
aesara.graph.destroyhandler
import
DestroyHandler
from
aesara.graph.features
import
BadOptimization
from
aesara.graph.op
import
COp
,
HasInnerGraph
,
Op
from
aesara.graph.op
import
HasInnerGraph
,
Op
from
aesara.graph.utils
import
InconsistencyError
,
MethodNotDefined
from
aesara.link.basic
import
Container
,
LocalLinker
from
aesara.link.c.op
import
COp
from
aesara.link.utils
import
map_storage
,
raise_with_op
from
aesara.printing
import
_debugprint
from
aesara.utils
import
NoDuplicateOptWarningFilter
,
difference
,
get_unbound_function
...
...
aesara/compile/ops.py
浏览文件 @
8a4505c0
...
...
@@ -11,8 +11,9 @@ import warnings
from
typing
import
Dict
,
Tuple
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.op
import
Op
from
aesara.graph.type
import
CType
from
aesara.link.c.op
import
COp
def
register_view_op_c_code
(
type
,
code
,
version
=
()):
...
...
aesara/gpuarray/basic_ops.py
浏览文件 @
8a4505c0
...
...
@@ -11,12 +11,13 @@ import aesara.tensor as at
from
aesara.configdefaults
import
config
from
aesara.gradient
import
grad_undefined
from
aesara.graph.basic
import
Apply
,
Variable
from
aesara.graph.op
import
COp
,
ExternalCOp
,
Op
,
_NoPythonOp
from
aesara.graph.op
import
Op
,
_NoPythonOp
from
aesara.graph.opt
import
copy_stack_trace
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.type
import
CType
from
aesara.graph.utils
import
MethodNotDefined
from
aesara.link.c.interface
import
HideC
from
aesara.link.c.op
import
COp
,
ExternalCOp
from
aesara.scalar
import
bool
as
bool_t
from
aesara.scalar
import
int32
as
int32_t
from
aesara.tensor.basic
import
Alloc
,
AllocEmpty
,
Join
,
Split
,
infer_broadcastable
...
...
aesara/gpuarray/blas.py
浏览文件 @
8a4505c0
...
...
@@ -10,9 +10,9 @@ from aesara.gpuarray.basic_ops import (
)
from
aesara.gpuarray.opt_util
import
inplace_allocempty
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
_NoPythonCOp
from
aesara.graph.opt
import
LocalOptGroup
,
in2out
from
aesara.graph.params_type
import
ParamsType
from
aesara.link.c.op
import
_NoPythonCOp
from
aesara.scalar
import
bool
as
bool_t
from
aesara.tensor.basic
import
as_tensor_variable
...
...
aesara/gpuarray/blocksparse.py
浏览文件 @
8a4505c0
...
...
@@ -11,8 +11,8 @@ from aesara.gpuarray.basic_ops import (
from
aesara.gpuarray.type
import
gpu_context_type
from
aesara.gradient
import
grad_undefined
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
_NoPythonExternalCOp
from
aesara.graph.params_type
import
ParamsType
from
aesara.link.c.op
import
_NoPythonExternalCOp
from
aesara.scalar
import
bool
as
bool_t
from
aesara.tensor
import
as_tensor_variable
from
aesara.tensor.type
import
discrete_dtypes
...
...
aesara/gpuarray/ctc.py
浏览文件 @
8a4505c0
...
...
@@ -13,8 +13,8 @@ from aesara.gpuarray.elemwise import GpuDimShuffle
from
aesara.gpuarray.type
import
GpuArrayType
,
gpu_context_type
from
aesara.gradient
import
grad_undefined
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
_NoPythonExternalCOp
from
aesara.graph.opt
import
local_optimizer
from
aesara.link.c.op
import
_NoPythonExternalCOp
from
aesara.tensor.basic
import
as_tensor_variable
from
aesara.tensor.basic_opt
import
register_canonicalize
from
aesara.tensor.blas
import
batched_dot
...
...
aesara/gpuarray/dnn.py
浏览文件 @
8a4505c0
...
...
@@ -28,10 +28,10 @@ from aesara.gpuarray.basic_ops import (
from
aesara.gpuarray.type
import
GpuArraySharedVariable
,
get_context
,
gpu_context_type
from
aesara.gradient
import
DisconnectedType
,
grad_not_implemented
from
aesara.graph.basic
import
Apply
,
Variable
from
aesara.graph.op
import
ExternalCOp
,
_NoPythonCOp
,
_NoPythonExternalCOp
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.type
import
CDataType
,
EnumList
,
Generic
from
aesara.link.c.cmodule
import
GCC_compiler
from
aesara.link.c.op
import
ExternalCOp
,
_NoPythonCOp
,
_NoPythonExternalCOp
from
aesara.raise_op
import
Assert
from
aesara.scalar
import
as_scalar
from
aesara.scalar
import
bool
as
bool_t
...
...
aesara/gpuarray/linalg.py
浏览文件 @
8a4505c0
...
...
@@ -14,8 +14,9 @@ from aesara.gpuarray.basic_ops import (
)
from
aesara.gpuarray.type
import
GpuArrayType
,
gpu_context_type
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
ExternalCOp
,
Op
from
aesara.graph.op
import
Op
from
aesara.graph.params_type
import
ParamsType
from
aesara.link.c.op
import
ExternalCOp
from
aesara.scalar
import
bool
as
bool_t
from
aesara.tensor
import
basic
as
at
from
aesara.tensor
import
math
as
tm
...
...
aesara/gpuarray/reduction.py
浏览文件 @
8a4505c0
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
COp
from
aesara.graph.type
import
Generic
from
aesara.link.c.op
import
COp
from
.basic_ops
import
as_gpuarray_variable
,
gpuarray_helper_inc_dir
,
infer_context_name
from
.type
import
GpuArrayType
...
...
aesara/gpuarray/subtensor.py
浏览文件 @
8a4505c0
...
...
@@ -5,10 +5,11 @@ import numpy as np
import
aesara.tensor
as
at
from
aesara.gradient
import
grad_not_implemented
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.op
import
Op
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.type
import
CType
from
aesara.link.c.interface
import
HideC
from
aesara.link.c.op
import
COp
from
aesara.scalar
import
bool
as
bool_t
from
aesara.scalar
import
int32
as
int_t
from
aesara.scalar
import
uint32
as
size_t
...
...
aesara/graph/op.py
浏览文件 @
8a4505c0
"""
Defines base classes `Op` and `CLinkerOp`.
The `Op` class is the base interface for all operations
compatible with `graph`'s :doc:`graph` routines.
"""
import
copy
import
inspect
import
os
import
re
import
sys
import
warnings
from
abc
import
abstractmethod
...
...
@@ -16,20 +6,14 @@ from typing import (
TYPE_CHECKING
,
Any
,
Callable
,
ClassVar
,
Collection
,
Dict
,
List
,
Optional
,
Pattern
,
Set
,
Text
,
Tuple
,
Union
,
)
import
numpy
as
np
import
aesara
from
aesara.configdefaults
import
config
from
aesara.graph.basic
import
Apply
,
NoParams
,
Variable
...
...
@@ -41,7 +25,6 @@ from aesara.graph.utils import (
add_tag_trace
,
get_variable_trace_string
,
)
from
aesara.link.c.interface
import
CLinkerOp
if
TYPE_CHECKING
:
...
...
@@ -591,6 +574,17 @@ class Op(MetaObject):
return
getattr
(
type
(
self
),
"__name__"
,
super
()
.
__str__
())
class
_NoPythonOp
(
Op
):
"""A class used to indicate that an `Op` does not provide a Python implementation.
XXX: Do not use this class; it's only for tracking bad implementations internally.
"""
def
perform
(
self
,
node
,
inputs
,
output_storage
,
params
=
None
):
raise
NotImplementedError
(
"No Python implementation is provided by this Op."
)
class
HasInnerGraph
:
r"""A mixin for an `Op` that contain an inner graph."""
...
...
@@ -610,100 +604,6 @@ class HasInnerGraph:
"""The inner function's outputs."""
class
COp
(
Op
,
CLinkerOp
):
"""An `Op` with a C implementation."""
def
make_c_thunk
(
self
,
node
:
Apply
,
storage_map
:
StorageMapType
,
compute_map
:
ComputeMapType
,
no_recycling
:
Collection
[
Apply
],
)
->
ThunkType
:
"""Create a thunk for a C implementation.
Like :meth:`Op.make_thunk`, but will only try to make a C thunk.
"""
# FIXME: Putting the following import on the module level causes an import cycle.
# The conclusion should be that the antire "make_c_thunk" method should be defined
# in aesara.link.c and dispatched onto the Op!
import
aesara.link.c.basic
from
aesara.graph.fg
import
FunctionGraph
node_input_storage
=
[
storage_map
[
r
]
for
r
in
node
.
inputs
]
node_output_storage
=
[
storage_map
[
r
]
for
r
in
node
.
outputs
]
e
=
FunctionGraph
(
node
.
inputs
,
node
.
outputs
)
e_no_recycling
=
[
new_o
for
(
new_o
,
old_o
)
in
zip
(
e
.
outputs
,
node
.
outputs
)
if
old_o
in
no_recycling
]
cl
=
aesara
.
link
.
c
.
basic
.
CLinker
()
.
accept
(
e
,
no_recycling
=
e_no_recycling
)
# float16 gets special treatment since running
# unprepared C code will get bad results.
if
not
getattr
(
self
,
"_f16_ok"
,
False
):
def
is_f16
(
t
):
return
getattr
(
t
,
"dtype"
,
""
)
==
"float16"
if
any
(
is_f16
(
i
.
type
)
for
i
in
node
.
inputs
)
or
any
(
is_f16
(
o
.
type
)
for
o
in
node
.
outputs
):
# get_dynamic_module is a subset of make_thunk that is reused.
# This just try to build the c code
# It will raise an error for ops
# that don't implement c code. In those cases, we
# don't want to print a warning.
cl
.
get_dynamic_module
()
print
(
f
"Disabling C code for {self} due to unsupported float16"
)
raise
NotImplementedError
(
"float16"
)
outputs
=
cl
.
make_thunk
(
input_storage
=
node_input_storage
,
output_storage
=
node_output_storage
)
thunk
,
node_input_filters
,
node_output_filters
=
outputs
def
rval
():
thunk
()
for
o
in
node
.
outputs
:
compute_map
[
o
][
0
]
=
True
rval
.
thunk
=
thunk
rval
.
cthunk
=
thunk
.
cthunk
rval
.
inputs
=
node_input_storage
rval
.
outputs
=
node_output_storage
rval
.
lazy
=
False
return
rval
def
make_thunk
(
self
,
node
,
storage_map
,
compute_map
,
no_recycling
,
impl
=
None
):
"""Create a thunk.
See :meth:`Op.make_thunk`.
Parameters
----------
impl :
Currently, ``None``, ``'c'`` or ``'py'``. If ``'c'`` or ``'py'`` we
will only try that version of the code.
"""
if
(
impl
is
None
and
config
.
cxx
)
or
impl
==
"c"
:
self
.
prepare_node
(
node
,
storage_map
=
storage_map
,
compute_map
=
compute_map
,
impl
=
"c"
)
try
:
return
self
.
make_c_thunk
(
node
,
storage_map
,
compute_map
,
no_recycling
)
except
(
NotImplementedError
,
MethodNotDefined
):
# We requested the c code, so don't catch the error.
if
impl
==
"c"
:
raise
return
super
()
.
make_thunk
(
node
,
storage_map
,
compute_map
,
no_recycling
,
impl
=
impl
)
def
get_test_value
(
v
:
Variable
)
->
Any
:
"""Get the test value for `v`.
...
...
@@ -802,561 +702,3 @@ def get_test_values(*args: Variable) -> Union[Any, List[Any]]:
return
rval
return
[
tuple
(
rval
)]
class
OpenMPOp
(
COp
):
r"""Base class for `Op`\s using OpenMP.
This `Op` will check that the compiler support correctly OpenMP code.
If not, it will print a warning and disable OpenMP for this `Op`, then it
will generate the not OpenMP code.
This is needed, as EPD on the Windows version of ``g++`` says it supports
OpenMP, but does not include the OpenMP files.
We also add the correct compiler flags in ``c_compile_args``.
"""
gxx_support_openmp
:
Optional
[
bool
]
=
None
"""
``True``/``False`` after we tested this.
"""
def
__init__
(
self
,
openmp
:
Optional
[
bool
]
=
None
):
if
openmp
is
None
:
openmp
=
config
.
openmp
self
.
openmp
=
openmp
def
__setstate__
(
self
,
d
:
Dict
):
self
.
__dict__
.
update
(
d
)
# If we unpickle old op
if
not
hasattr
(
self
,
"openmp"
):
self
.
openmp
=
False
def
c_compile_args
(
self
,
**
kwargs
):
"""Return the compilation argument ``"-fopenmp"`` if OpenMP is supported."""
self
.
update_self_openmp
()
if
self
.
openmp
:
return
[
"-fopenmp"
]
return
[]
def
c_headers
(
self
,
**
kwargs
):
"""Return the header file name ``"omp.h"`` if OpenMP is supported."""
self
.
update_self_openmp
()
if
self
.
openmp
:
return
[
"omp.h"
]
return
[]
@staticmethod
def
test_gxx_support
():
"""Check if OpenMP is supported."""
from
aesara.link.c.cmodule
import
GCC_compiler
code
=
"""
#include <omp.h>
int main( int argc, const char* argv[] )
{
int res[10];
for(int i=0; i < 10; i++){
res[i] = i;
}
}
"""
default_openmp
=
GCC_compiler
.
try_compile_tmp
(
src_code
=
code
,
tmp_prefix
=
"test_omp_"
,
flags
=
[
"-fopenmp"
],
try_run
=
False
)
return
default_openmp
def
update_self_openmp
(
self
)
->
None
:
"""Make sure ``self.openmp`` is not ``True`` if there is no OpenMP support in ``gxx``."""
if
self
.
openmp
:
if
OpenMPOp
.
gxx_support_openmp
is
None
:
OpenMPOp
.
gxx_support_openmp
=
OpenMPOp
.
test_gxx_support
()
if
not
OpenMPOp
.
gxx_support_openmp
:
# We want to warn only once.
warnings
.
warn
(
"Your g++ compiler fails to compile OpenMP code. We"
" know this happen with some version of the EPD mingw"
" compiler and LLVM compiler on Mac OS X."
" We disable openmp everywhere in Aesara."
" To remove this warning set the aesara flags `openmp`"
" to False."
,
stacklevel
=
3
,
)
if
OpenMPOp
.
gxx_support_openmp
is
False
:
self
.
openmp
=
False
config
.
openmp
=
False
def
prepare_node
(
self
,
node
,
storage_map
,
compute_map
,
impl
):
if
impl
==
"c"
:
self
.
update_self_openmp
()
def
lquote_macro
(
txt
:
Text
)
->
Text
:
"""Turn the last line of text into a ``
\\
``-commented line."""
res
=
[]
spl
=
txt
.
split
(
"
\n
"
)
for
l
in
spl
[:
-
1
]:
res
.
append
(
l
+
"
\\
"
)
res
.
append
(
spl
[
-
1
])
return
"
\n
"
.
join
(
res
)
def
get_sub_macros
(
sub
:
Dict
[
Text
,
Text
])
->
Union
[
Tuple
[
Text
],
Tuple
[
Text
,
Text
]]:
define_macros
=
[]
undef_macros
=
[]
define_macros
.
append
(
f
"#define FAIL {lquote_macro(sub['fail'])}"
)
undef_macros
.
append
(
"#undef FAIL"
)
if
"params"
in
sub
:
define_macros
.
append
(
f
"#define PARAMS {sub['params']}"
)
undef_macros
.
append
(
"#undef PARAMS"
)
return
"
\n
"
.
join
(
define_macros
),
"
\n
"
.
join
(
undef_macros
)
def
get_io_macros
(
inputs
:
List
[
Text
],
outputs
:
List
[
Text
]
)
->
Union
[
Tuple
[
List
[
Text
]],
Tuple
[
str
,
str
]]:
define_macros
=
[]
undef_macros
=
[]
for
i
,
inp
in
enumerate
(
inputs
):
define_macros
.
append
(
f
"#define INPUT_{int(i)} {inp}"
)
undef_macros
.
append
(
f
"#undef INPUT_{int(i)}"
)
for
i
,
out
in
enumerate
(
outputs
):
define_macros
.
append
(
f
"#define OUTPUT_{int(i)} {out}"
)
undef_macros
.
append
(
f
"#undef OUTPUT_{int(i)}"
)
return
"
\n
"
.
join
(
define_macros
),
"
\n
"
.
join
(
undef_macros
)
class
ExternalCOp
(
COp
):
"""Class for an `Op` with an external C implementation.
One can inherit from this class, provide its constructor with a path to
an external C source file and the name of a function within it, and define
an `Op` for said function.
"""
section_re
:
ClassVar
[
Pattern
]
=
re
.
compile
(
r"^#section ([a-zA-Z0-9_]+)$"
,
re
.
MULTILINE
)
backward_re
:
ClassVar
[
Pattern
]
=
re
.
compile
(
r"^AESARA_(APPLY|SUPPORT)_CODE_SECTION$"
,
re
.
MULTILINE
)
# This is the set of allowed markers
SECTIONS
:
ClassVar
[
Set
[
Text
]]
=
{
"init_code"
,
"init_code_apply"
,
"init_code_struct"
,
"support_code"
,
"support_code_apply"
,
"support_code_struct"
,
"cleanup_code_struct"
,
"code"
,
"code_cleanup"
,
}
@classmethod
def
get_path
(
cls
,
f
:
Text
)
->
Text
:
"""Convert a path relative to the location of the class file into an absolute path.
Paths that are already absolute are passed through unchanged.
"""
if
not
os
.
path
.
isabs
(
f
):
class_file
=
inspect
.
getfile
(
cls
)
class_dir
=
os
.
path
.
dirname
(
class_file
)
f
=
os
.
path
.
realpath
(
os
.
path
.
join
(
class_dir
,
f
))
return
f
def
__init__
(
self
,
func_files
:
Union
[
Text
,
List
[
Text
]],
func_name
:
Optional
[
Text
]
=
None
):
"""
Sections are loaded from files in order with sections in later
files overriding sections in previous files.
"""
if
not
isinstance
(
func_files
,
list
):
func_files
=
[
func_files
]
self
.
func_name
=
func_name
# Keep the original name. If we reload old pickle, we want to
# find the new path and new version of the file in Aesara.
self
.
func_files
=
func_files
self
.
load_c_code
(
func_files
)
if
len
(
self
.
code_sections
)
==
0
:
raise
ValueError
(
"No sections where defined in C files"
)
if
self
.
func_name
is
not
None
:
if
"op_code"
in
self
.
code_sections
:
# maybe a warning instead (and clearing the key)
raise
ValueError
(
'Cannot have an "op_code" section and '
"specify the func_name"
)
if
"op_code_cleanup"
in
self
.
code_sections
:
# maybe a warning instead (and clearing the key)
raise
ValueError
(
'Cannot have an "op_code_cleanup" section '
"and specify the func_name"
)
def
load_c_code
(
self
,
func_files
:
List
[
Text
])
->
None
:
"""Loads the C code to perform the `Op`."""
func_files
=
[
self
.
get_path
(
f
)
for
f
in
func_files
]
self
.
func_codes
=
[]
for
func_file
in
func_files
:
# U (universal) will convert all new lines format to \n.
with
open
(
func_file
)
as
f
:
self
.
func_codes
.
append
(
f
.
read
())
# If both the old section markers and the new section markers are
# present, raise an error because we don't know which ones to follow.
old_markers_present
=
False
new_markers_present
=
False
for
code
in
self
.
func_codes
:
if
self
.
backward_re
.
search
(
code
):
old_markers_present
=
True
if
self
.
section_re
.
search
(
code
):
new_markers_present
=
True
if
old_markers_present
and
new_markers_present
:
raise
ValueError
(
"Both the new and the old syntax for "
"identifying code sections are present in the "
"provided C code. These two syntaxes should not "
"be used at the same time."
)
self
.
code_sections
=
dict
()
for
i
,
code
in
enumerate
(
self
.
func_codes
):
if
self
.
backward_re
.
search
(
code
):
# This is backward compat code that will go away in a while
# Separate the code into the proper sections
split
=
self
.
backward_re
.
split
(
code
)
n
=
1
while
n
<
len
(
split
):
if
split
[
n
]
==
"APPLY"
:
self
.
code_sections
[
"support_code_apply"
]
=
split
[
n
+
1
]
elif
split
[
n
]
==
"SUPPORT"
:
self
.
code_sections
[
"support_code"
]
=
split
[
n
+
1
]
n
+=
2
continue
elif
self
.
section_re
.
search
(
code
):
# Check for code outside of the supported sections
split
=
self
.
section_re
.
split
(
code
)
if
split
[
0
]
.
strip
()
!=
""
:
raise
ValueError
(
"Stray code before first #section "
f
"statement (in file {func_files[i]}): {split[0]}"
)
# Separate the code into the proper sections
n
=
1
while
n
<
len
(
split
):
if
split
[
n
]
not
in
self
.
SECTIONS
:
raise
ValueError
(
f
"Unknown section type (in file {func_files[i]}): {split[n]}"
)
if
split
[
n
]
not
in
self
.
code_sections
:
self
.
code_sections
[
split
[
n
]]
=
""
self
.
code_sections
[
split
[
n
]]
+=
split
[
n
+
1
]
n
+=
2
else
:
raise
ValueError
(
f
"No valid section marker was found in file {func_files[i]}"
)
def
__get_op_params
(
self
)
->
List
[
Tuple
[
str
,
Any
]]:
"""Construct name, value pairs that will be turned into macros for use within the `Op`'s code.
The names must be strings that are not a C keyword and the
values must be strings of literal C representations.
If op uses a :class:`aesara.graph.params_type.ParamsType` as ``params_type``,
it returns:
- a default macro ``PARAMS_TYPE`` which defines the class name of the
corresponding C struct.
- a macro ``DTYPE_PARAM_key`` for every ``key`` in the :class:`ParamsType` for which associated
type implements the method :func:`aesara.graph.type.CLinkerType.c_element_type`.
``DTYPE_PARAM_key`` defines the primitive C type name of an item in a variable
associated to ``key``.
"""
params
:
List
[
Tuple
[
str
,
Any
]]
=
[]
if
isinstance
(
self
.
params_type
,
ParamsType
):
wrapper
=
self
.
params_type
params
.
append
((
"PARAMS_TYPE"
,
wrapper
.
name
))
for
i
in
range
(
wrapper
.
length
):
c_type
=
wrapper
.
types
[
i
]
.
c_element_type
()
if
c_type
:
# NB (reminder): These macros are currently used only in ParamsType example test
# (`aesara/graph/tests/test_quadratic_function.c`), to demonstrate how we can
# access params dtypes when dtypes may change (e.g. if based on config.floatX).
# But in practice, params types generally have fixed types per op.
params
.
append
(
(
"DTYPE_PARAM_"
+
wrapper
.
fields
[
i
],
c_type
,
)
)
return
params
def
c_code_cache_version
(
self
):
version
=
(
hash
(
tuple
(
self
.
func_codes
)),)
if
self
.
params_type
is
not
None
:
version
+=
(
self
.
params_type
.
c_code_cache_version
(),)
return
version
def
c_init_code
(
self
,
**
kwargs
):
if
"init_code"
in
self
.
code_sections
:
return
[
self
.
code_sections
[
"init_code"
]]
else
:
return
super
()
.
c_init_code
(
**
kwargs
)
def
c_support_code
(
self
,
**
kwargs
):
if
"support_code"
in
self
.
code_sections
:
return
self
.
code_sections
[
"support_code"
]
else
:
return
super
()
.
c_support_code
(
**
kwargs
)
def
c_init_code_apply
(
self
,
node
,
name
):
if
"init_code_apply"
in
self
.
code_sections
:
code
=
self
.
code_sections
[
"init_code_apply"
]
define_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
return
"
\n
"
.
join
([
""
,
define_macros
,
code
,
undef_macros
])
else
:
return
super
()
.
c_init_code_apply
(
node
,
name
)
def
c_support_code_apply
(
self
,
node
,
name
):
if
"support_code_apply"
in
self
.
code_sections
:
code
=
self
.
code_sections
[
"support_code_apply"
]
define_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
return
"
\n
"
.
join
([
""
,
define_macros
,
code
,
undef_macros
])
else
:
return
super
()
.
c_support_code_apply
(
node
,
name
)
def
c_support_code_struct
(
self
,
node
,
name
):
if
"support_code_struct"
in
self
.
code_sections
:
code
=
self
.
code_sections
[
"support_code_struct"
]
define_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
return
"
\n
"
.
join
([
""
,
define_macros
,
code
,
undef_macros
])
else
:
return
super
()
.
c_support_code_struct
(
node
,
name
)
def
c_cleanup_code_struct
(
self
,
node
,
name
):
if
"cleanup_code_struct"
in
self
.
code_sections
:
code
=
self
.
code_sections
[
"cleanup_code_struct"
]
define_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
return
"
\n
"
.
join
([
""
,
define_macros
,
code
,
undef_macros
])
else
:
return
super
()
.
c_cleanup_code_struct
(
node
,
name
)
def
format_c_function_args
(
self
,
inp
:
List
[
Text
],
out
:
List
[
Text
])
->
Text
:
"""Generate a string containing the arguments sent to the external C function.
The result will have the format: ``"input0, input1, input2, &output0, &output1"``.
"""
inp
=
list
(
inp
)
numi
=
getattr
(
self
,
"_cop_num_inputs"
,
len
(
inp
))
while
len
(
inp
)
<
numi
:
inp
.
append
(
"NULL"
)
out
=
[
f
"&{o}"
for
o
in
out
]
numo
=
getattr
(
self
,
"_cop_num_outputs"
,
len
(
out
))
while
len
(
out
)
<
numo
:
out
.
append
(
"NULL"
)
return
", "
.
join
(
inp
+
out
)
def
get_c_macros
(
self
,
node
:
Apply
,
name
:
Text
,
check_input
:
Optional
[
bool
]
=
None
)
->
Union
[
Tuple
[
str
],
Tuple
[
str
,
str
]]:
"Construct a pair of C ``#define`` and ``#undef`` code strings."
define_template
=
"#define
%
s
%
s"
undef_template
=
"#undef
%
s"
define_macros
=
[]
undef_macros
=
[]
if
check_input
is
None
:
check_input
=
getattr
(
self
,
"check_input"
,
True
)
if
check_input
:
# Extract the various properties of the input and output variables
variables
=
node
.
inputs
+
node
.
outputs
variable_names
=
[
f
"INPUT_{i}"
for
i
in
range
(
len
(
node
.
inputs
))]
+
[
f
"OUTPUT_{i}"
for
i
in
range
(
len
(
node
.
outputs
))
]
# Generate dtype macros
for
i
,
v
in
enumerate
(
variables
):
if
not
hasattr
(
v
,
"dtype"
):
continue
vname
=
variable_names
[
i
]
macro_name
=
"DTYPE_"
+
vname
macro_value
=
"npy_"
+
v
.
dtype
define_macros
.
append
(
define_template
%
(
macro_name
,
macro_value
))
undef_macros
.
append
(
undef_template
%
macro_name
)
d
=
np
.
dtype
(
v
.
dtype
)
macro_name
=
"TYPENUM_"
+
vname
macro_value
=
d
.
num
define_macros
.
append
(
define_template
%
(
macro_name
,
macro_value
))
undef_macros
.
append
(
undef_template
%
macro_name
)
macro_name
=
"ITEMSIZE_"
+
vname
macro_value
=
d
.
itemsize
define_macros
.
append
(
define_template
%
(
macro_name
,
macro_value
))
undef_macros
.
append
(
undef_template
%
macro_name
)
# Generate a macro to mark code as being apply-specific
define_macros
.
append
(
define_template
%
(
"APPLY_SPECIFIC(str)"
,
f
"str##_{name}"
))
undef_macros
.
append
(
undef_template
%
"APPLY_SPECIFIC"
)
for
n
,
v
in
self
.
__get_op_params
():
define_macros
.
append
(
define_template
%
(
n
,
v
))
undef_macros
.
append
(
undef_template
%
(
n
,))
return
"
\n
"
.
join
(
define_macros
),
"
\n
"
.
join
(
undef_macros
)
def
c_init_code_struct
(
self
,
node
,
name
,
sub
):
r""" Stitches all the macros and ``init_code_*``\s together."""
if
"init_code_struct"
in
self
.
code_sections
:
op_code
=
self
.
code_sections
[
"init_code_struct"
]
def_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
def_sub
,
undef_sub
=
get_sub_macros
(
sub
)
return
"
\n
"
.
join
(
[
""
,
def_macros
,
def_sub
,
op_code
,
undef_sub
,
undef_macros
]
)
else
:
return
super
()
.
c_init_code_struct
(
node
,
name
,
sub
)
def
c_code
(
self
,
node
,
name
,
inp
,
out
,
sub
):
if
self
.
func_name
is
not
None
:
assert
"code"
not
in
self
.
code_sections
define_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
,
check_input
=
False
)
params
=
""
if
"params"
in
sub
:
params
=
f
", {sub['params']}"
# Generate the C code
return
"""
%(define_macros)
s
{
if (
%(func_name)
s(
%(func_args)
s
%(params)
s) != 0) {
%(fail)
s
}
}
%(undef_macros)
s
"""
%
dict
(
func_name
=
self
.
func_name
,
fail
=
sub
[
"fail"
],
params
=
params
,
func_args
=
self
.
format_c_function_args
(
inp
,
out
),
define_macros
=
define_macros
,
undef_macros
=
undef_macros
,
)
else
:
if
"code"
in
self
.
code_sections
:
op_code
=
self
.
code_sections
[
"code"
]
def_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
def_sub
,
undef_sub
=
get_sub_macros
(
sub
)
def_io
,
undef_io
=
get_io_macros
(
inp
,
out
)
return
"
\n
"
.
join
(
[
def_macros
,
def_sub
,
def_io
,
op_code
,
undef_io
,
undef_sub
,
undef_macros
,
]
)
else
:
raise
NotImplementedError
()
def
c_code_cleanup
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
r"""Stitches all the macros and ``code_cleanup``\s together."""
if
"code_cleanup"
in
self
.
code_sections
:
op_code
=
self
.
code_sections
[
"code_cleanup"
]
def_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
def_sub
,
undef_sub
=
get_sub_macros
(
sub
)
def_io
,
undef_io
=
get_io_macros
(
inputs
,
outputs
)
return
"
\n
"
.
join
(
[
def_macros
,
def_sub
,
def_io
,
op_code
,
undef_io
,
undef_sub
,
undef_macros
,
]
)
else
:
return
super
()
.
c_code_cleanup
(
node
,
name
,
inputs
,
outputs
,
sub
)
class
_NoPythonOp
(
Op
):
"""A class used to indicate that an `Op` does not provide a Python implementation.
XXX: Do not use this class; it's only for tracking bad implementations internally.
"""
def
perform
(
self
,
node
,
inputs
,
output_storage
,
params
=
None
):
raise
NotImplementedError
(
"No Python implementation is provided by this Op."
)
class
_NoPythonCOp
(
COp
):
"""A class used to indicate that a `COp` does not provide a Python implementation.
XXX: Do not use this class; it's only for tracking bad implementations internally.
"""
def
perform
(
self
,
node
,
inputs
,
output_storage
,
params
=
None
):
raise
NotImplementedError
(
"No Python implementation is provided by this COp."
)
class
_NoPythonExternalCOp
(
ExternalCOp
):
"""A class used to indicate that an `ExternalCOp` does not provide a Python implementation.
XXX: Do not use this class; it's only for tracking bad implementations internally.
"""
def
perform
(
self
,
node
,
inputs
,
output_storage
,
params
=
None
):
raise
NotImplementedError
(
"No Python implementation is provided by this ExternalCOp."
)
aesara/link/c/op.py
0 → 100644
浏览文件 @
8a4505c0
import
inspect
import
os
import
re
import
warnings
from
typing
import
(
Any
,
ClassVar
,
Collection
,
Dict
,
List
,
Optional
,
Pattern
,
Set
,
Text
,
Tuple
,
Union
,
)
import
numpy
as
np
from
aesara.configdefaults
import
config
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
ComputeMapType
,
Op
,
StorageMapType
,
ThunkType
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.utils
import
MethodNotDefined
from
aesara.link.c.interface
import
CLinkerOp
class
COp
(
Op
,
CLinkerOp
):
"""An `Op` with a C implementation."""
def
make_c_thunk
(
self
,
node
:
Apply
,
storage_map
:
StorageMapType
,
compute_map
:
ComputeMapType
,
no_recycling
:
Collection
[
Apply
],
)
->
ThunkType
:
"""Create a thunk for a C implementation.
Like :meth:`Op.make_thunk`, but will only try to make a C thunk.
"""
# FIXME: Putting the following import on the module level causes an import cycle.
# The conclusion should be that the antire "make_c_thunk" method should be defined
# in aesara.link.c and dispatched onto the Op!
import
aesara.link.c.basic
from
aesara.graph.fg
import
FunctionGraph
node_input_storage
=
[
storage_map
[
r
]
for
r
in
node
.
inputs
]
node_output_storage
=
[
storage_map
[
r
]
for
r
in
node
.
outputs
]
e
=
FunctionGraph
(
node
.
inputs
,
node
.
outputs
)
e_no_recycling
=
[
new_o
for
(
new_o
,
old_o
)
in
zip
(
e
.
outputs
,
node
.
outputs
)
if
old_o
in
no_recycling
]
cl
=
aesara
.
link
.
c
.
basic
.
CLinker
()
.
accept
(
e
,
no_recycling
=
e_no_recycling
)
# float16 gets special treatment since running
# unprepared C code will get bad results.
if
not
getattr
(
self
,
"_f16_ok"
,
False
):
def
is_f16
(
t
):
return
getattr
(
t
,
"dtype"
,
""
)
==
"float16"
if
any
(
is_f16
(
i
.
type
)
for
i
in
node
.
inputs
)
or
any
(
is_f16
(
o
.
type
)
for
o
in
node
.
outputs
):
# get_dynamic_module is a subset of make_thunk that is reused.
# This just try to build the c code
# It will raise an error for ops
# that don't implement c code. In those cases, we
# don't want to print a warning.
cl
.
get_dynamic_module
()
print
(
f
"Disabling C code for {self} due to unsupported float16"
)
raise
NotImplementedError
(
"float16"
)
outputs
=
cl
.
make_thunk
(
input_storage
=
node_input_storage
,
output_storage
=
node_output_storage
)
thunk
,
node_input_filters
,
node_output_filters
=
outputs
def
rval
():
thunk
()
for
o
in
node
.
outputs
:
compute_map
[
o
][
0
]
=
True
rval
.
thunk
=
thunk
rval
.
cthunk
=
thunk
.
cthunk
rval
.
inputs
=
node_input_storage
rval
.
outputs
=
node_output_storage
rval
.
lazy
=
False
return
rval
def
make_thunk
(
self
,
node
,
storage_map
,
compute_map
,
no_recycling
,
impl
=
None
):
"""Create a thunk.
See :meth:`Op.make_thunk`.
Parameters
----------
impl :
Currently, ``None``, ``'c'`` or ``'py'``. If ``'c'`` or ``'py'`` we
will only try that version of the code.
"""
if
(
impl
is
None
and
config
.
cxx
)
or
impl
==
"c"
:
self
.
prepare_node
(
node
,
storage_map
=
storage_map
,
compute_map
=
compute_map
,
impl
=
"c"
)
try
:
return
self
.
make_c_thunk
(
node
,
storage_map
,
compute_map
,
no_recycling
)
except
(
NotImplementedError
,
MethodNotDefined
):
# We requested the c code, so don't catch the error.
if
impl
==
"c"
:
raise
return
super
()
.
make_thunk
(
node
,
storage_map
,
compute_map
,
no_recycling
,
impl
=
impl
)
class
OpenMPOp
(
COp
):
r"""Base class for `Op`\s using OpenMP.
This `Op` will check that the compiler support correctly OpenMP code.
If not, it will print a warning and disable OpenMP for this `Op`, then it
will generate the not OpenMP code.
This is needed, as EPD on the Windows version of ``g++`` says it supports
OpenMP, but does not include the OpenMP files.
We also add the correct compiler flags in ``c_compile_args``.
"""
gxx_support_openmp
:
Optional
[
bool
]
=
None
"""
``True``/``False`` after we tested this.
"""
def
__init__
(
self
,
openmp
:
Optional
[
bool
]
=
None
):
if
openmp
is
None
:
openmp
=
config
.
openmp
self
.
openmp
=
openmp
def
__setstate__
(
self
,
d
:
Dict
):
self
.
__dict__
.
update
(
d
)
# If we unpickle old op
if
not
hasattr
(
self
,
"openmp"
):
self
.
openmp
=
False
def
c_compile_args
(
self
,
**
kwargs
):
"""Return the compilation argument ``"-fopenmp"`` if OpenMP is supported."""
self
.
update_self_openmp
()
if
self
.
openmp
:
return
[
"-fopenmp"
]
return
[]
def
c_headers
(
self
,
**
kwargs
):
"""Return the header file name ``"omp.h"`` if OpenMP is supported."""
self
.
update_self_openmp
()
if
self
.
openmp
:
return
[
"omp.h"
]
return
[]
@staticmethod
def
test_gxx_support
():
"""Check if OpenMP is supported."""
from
aesara.link.c.cmodule
import
GCC_compiler
code
=
"""
#include <omp.h>
int main( int argc, const char* argv[] )
{
int res[10];
for(int i=0; i < 10; i++){
res[i] = i;
}
}
"""
default_openmp
=
GCC_compiler
.
try_compile_tmp
(
src_code
=
code
,
tmp_prefix
=
"test_omp_"
,
flags
=
[
"-fopenmp"
],
try_run
=
False
)
return
default_openmp
def
update_self_openmp
(
self
)
->
None
:
"""Make sure ``self.openmp`` is not ``True`` if there is no OpenMP support in ``gxx``."""
if
self
.
openmp
:
if
OpenMPOp
.
gxx_support_openmp
is
None
:
OpenMPOp
.
gxx_support_openmp
=
OpenMPOp
.
test_gxx_support
()
if
not
OpenMPOp
.
gxx_support_openmp
:
# We want to warn only once.
warnings
.
warn
(
"Your g++ compiler fails to compile OpenMP code. We"
" know this happen with some version of the EPD mingw"
" compiler and LLVM compiler on Mac OS X."
" We disable openmp everywhere in Aesara."
" To remove this warning set the aesara flags `openmp`"
" to False."
,
stacklevel
=
3
,
)
if
OpenMPOp
.
gxx_support_openmp
is
False
:
self
.
openmp
=
False
config
.
openmp
=
False
def
prepare_node
(
self
,
node
,
storage_map
,
compute_map
,
impl
):
if
impl
==
"c"
:
self
.
update_self_openmp
()
def
lquote_macro
(
txt
:
Text
)
->
Text
:
"""Turn the last line of text into a ``
\\
``-commented line."""
res
=
[]
spl
=
txt
.
split
(
"
\n
"
)
for
l
in
spl
[:
-
1
]:
res
.
append
(
l
+
"
\\
"
)
res
.
append
(
spl
[
-
1
])
return
"
\n
"
.
join
(
res
)
def
get_sub_macros
(
sub
:
Dict
[
Text
,
Text
])
->
Union
[
Tuple
[
Text
],
Tuple
[
Text
,
Text
]]:
define_macros
=
[]
undef_macros
=
[]
define_macros
.
append
(
f
"#define FAIL {lquote_macro(sub['fail'])}"
)
undef_macros
.
append
(
"#undef FAIL"
)
if
"params"
in
sub
:
define_macros
.
append
(
f
"#define PARAMS {sub['params']}"
)
undef_macros
.
append
(
"#undef PARAMS"
)
return
"
\n
"
.
join
(
define_macros
),
"
\n
"
.
join
(
undef_macros
)
def
get_io_macros
(
inputs
:
List
[
Text
],
outputs
:
List
[
Text
]
)
->
Union
[
Tuple
[
List
[
Text
]],
Tuple
[
str
,
str
]]:
define_macros
=
[]
undef_macros
=
[]
for
i
,
inp
in
enumerate
(
inputs
):
define_macros
.
append
(
f
"#define INPUT_{int(i)} {inp}"
)
undef_macros
.
append
(
f
"#undef INPUT_{int(i)}"
)
for
i
,
out
in
enumerate
(
outputs
):
define_macros
.
append
(
f
"#define OUTPUT_{int(i)} {out}"
)
undef_macros
.
append
(
f
"#undef OUTPUT_{int(i)}"
)
return
"
\n
"
.
join
(
define_macros
),
"
\n
"
.
join
(
undef_macros
)
class
ExternalCOp
(
COp
):
"""Class for an `Op` with an external C implementation.
One can inherit from this class, provide its constructor with a path to
an external C source file and the name of a function within it, and define
an `Op` for said function.
"""
section_re
:
ClassVar
[
Pattern
]
=
re
.
compile
(
r"^#section ([a-zA-Z0-9_]+)$"
,
re
.
MULTILINE
)
backward_re
:
ClassVar
[
Pattern
]
=
re
.
compile
(
r"^AESARA_(APPLY|SUPPORT)_CODE_SECTION$"
,
re
.
MULTILINE
)
# This is the set of allowed markers
SECTIONS
:
ClassVar
[
Set
[
Text
]]
=
{
"init_code"
,
"init_code_apply"
,
"init_code_struct"
,
"support_code"
,
"support_code_apply"
,
"support_code_struct"
,
"cleanup_code_struct"
,
"code"
,
"code_cleanup"
,
}
@classmethod
def
get_path
(
cls
,
f
:
Text
)
->
Text
:
"""Convert a path relative to the location of the class file into an absolute path.
Paths that are already absolute are passed through unchanged.
"""
if
not
os
.
path
.
isabs
(
f
):
class_file
=
inspect
.
getfile
(
cls
)
class_dir
=
os
.
path
.
dirname
(
class_file
)
f
=
os
.
path
.
realpath
(
os
.
path
.
join
(
class_dir
,
f
))
return
f
def
__init__
(
self
,
func_files
:
Union
[
Text
,
List
[
Text
]],
func_name
:
Optional
[
Text
]
=
None
):
"""
Sections are loaded from files in order with sections in later
files overriding sections in previous files.
"""
if
not
isinstance
(
func_files
,
list
):
func_files
=
[
func_files
]
self
.
func_name
=
func_name
# Keep the original name. If we reload old pickle, we want to
# find the new path and new version of the file in Aesara.
self
.
func_files
=
func_files
self
.
load_c_code
(
func_files
)
if
len
(
self
.
code_sections
)
==
0
:
raise
ValueError
(
"No sections where defined in C files"
)
if
self
.
func_name
is
not
None
:
if
"op_code"
in
self
.
code_sections
:
# maybe a warning instead (and clearing the key)
raise
ValueError
(
'Cannot have an "op_code" section and '
"specify the func_name"
)
if
"op_code_cleanup"
in
self
.
code_sections
:
# maybe a warning instead (and clearing the key)
raise
ValueError
(
'Cannot have an "op_code_cleanup" section '
"and specify the func_name"
)
def
load_c_code
(
self
,
func_files
:
List
[
Text
])
->
None
:
"""Loads the C code to perform the `Op`."""
func_files
=
[
self
.
get_path
(
f
)
for
f
in
func_files
]
self
.
func_codes
=
[]
for
func_file
in
func_files
:
# U (universal) will convert all new lines format to \n.
with
open
(
func_file
)
as
f
:
self
.
func_codes
.
append
(
f
.
read
())
# If both the old section markers and the new section markers are
# present, raise an error because we don't know which ones to follow.
old_markers_present
=
False
new_markers_present
=
False
for
code
in
self
.
func_codes
:
if
self
.
backward_re
.
search
(
code
):
old_markers_present
=
True
if
self
.
section_re
.
search
(
code
):
new_markers_present
=
True
if
old_markers_present
and
new_markers_present
:
raise
ValueError
(
"Both the new and the old syntax for "
"identifying code sections are present in the "
"provided C code. These two syntaxes should not "
"be used at the same time."
)
self
.
code_sections
=
dict
()
for
i
,
code
in
enumerate
(
self
.
func_codes
):
if
self
.
backward_re
.
search
(
code
):
# This is backward compat code that will go away in a while
# Separate the code into the proper sections
split
=
self
.
backward_re
.
split
(
code
)
n
=
1
while
n
<
len
(
split
):
if
split
[
n
]
==
"APPLY"
:
self
.
code_sections
[
"support_code_apply"
]
=
split
[
n
+
1
]
elif
split
[
n
]
==
"SUPPORT"
:
self
.
code_sections
[
"support_code"
]
=
split
[
n
+
1
]
n
+=
2
continue
elif
self
.
section_re
.
search
(
code
):
# Check for code outside of the supported sections
split
=
self
.
section_re
.
split
(
code
)
if
split
[
0
]
.
strip
()
!=
""
:
raise
ValueError
(
"Stray code before first #section "
f
"statement (in file {func_files[i]}): {split[0]}"
)
# Separate the code into the proper sections
n
=
1
while
n
<
len
(
split
):
if
split
[
n
]
not
in
self
.
SECTIONS
:
raise
ValueError
(
f
"Unknown section type (in file {func_files[i]}): {split[n]}"
)
if
split
[
n
]
not
in
self
.
code_sections
:
self
.
code_sections
[
split
[
n
]]
=
""
self
.
code_sections
[
split
[
n
]]
+=
split
[
n
+
1
]
n
+=
2
else
:
raise
ValueError
(
f
"No valid section marker was found in file {func_files[i]}"
)
def
__get_op_params
(
self
)
->
List
[
Tuple
[
str
,
Any
]]:
"""Construct name, value pairs that will be turned into macros for use within the `Op`'s code.
The names must be strings that are not a C keyword and the
values must be strings of literal C representations.
If op uses a :class:`aesara.graph.params_type.ParamsType` as ``params_type``,
it returns:
- a default macro ``PARAMS_TYPE`` which defines the class name of the
corresponding C struct.
- a macro ``DTYPE_PARAM_key`` for every ``key`` in the :class:`ParamsType` for which associated
type implements the method :func:`aesara.graph.type.CLinkerType.c_element_type`.
``DTYPE_PARAM_key`` defines the primitive C type name of an item in a variable
associated to ``key``.
"""
params
:
List
[
Tuple
[
str
,
Any
]]
=
[]
if
isinstance
(
self
.
params_type
,
ParamsType
):
wrapper
=
self
.
params_type
params
.
append
((
"PARAMS_TYPE"
,
wrapper
.
name
))
for
i
in
range
(
wrapper
.
length
):
c_type
=
wrapper
.
types
[
i
]
.
c_element_type
()
if
c_type
:
# NB (reminder): These macros are currently used only in ParamsType example test
# (`aesara/graph/tests/test_quadratic_function.c`), to demonstrate how we can
# access params dtypes when dtypes may change (e.g. if based on config.floatX).
# But in practice, params types generally have fixed types per op.
params
.
append
(
(
"DTYPE_PARAM_"
+
wrapper
.
fields
[
i
],
c_type
,
)
)
return
params
def
c_code_cache_version
(
self
):
version
=
(
hash
(
tuple
(
self
.
func_codes
)),)
if
self
.
params_type
is
not
None
:
version
+=
(
self
.
params_type
.
c_code_cache_version
(),)
return
version
def
c_init_code
(
self
,
**
kwargs
):
if
"init_code"
in
self
.
code_sections
:
return
[
self
.
code_sections
[
"init_code"
]]
else
:
return
super
()
.
c_init_code
(
**
kwargs
)
def
c_support_code
(
self
,
**
kwargs
):
if
"support_code"
in
self
.
code_sections
:
return
self
.
code_sections
[
"support_code"
]
else
:
return
super
()
.
c_support_code
(
**
kwargs
)
def
c_init_code_apply
(
self
,
node
,
name
):
if
"init_code_apply"
in
self
.
code_sections
:
code
=
self
.
code_sections
[
"init_code_apply"
]
define_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
return
"
\n
"
.
join
([
""
,
define_macros
,
code
,
undef_macros
])
else
:
return
super
()
.
c_init_code_apply
(
node
,
name
)
def
c_support_code_apply
(
self
,
node
,
name
):
if
"support_code_apply"
in
self
.
code_sections
:
code
=
self
.
code_sections
[
"support_code_apply"
]
define_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
return
"
\n
"
.
join
([
""
,
define_macros
,
code
,
undef_macros
])
else
:
return
super
()
.
c_support_code_apply
(
node
,
name
)
def
c_support_code_struct
(
self
,
node
,
name
):
if
"support_code_struct"
in
self
.
code_sections
:
code
=
self
.
code_sections
[
"support_code_struct"
]
define_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
return
"
\n
"
.
join
([
""
,
define_macros
,
code
,
undef_macros
])
else
:
return
super
()
.
c_support_code_struct
(
node
,
name
)
def
c_cleanup_code_struct
(
self
,
node
,
name
):
if
"cleanup_code_struct"
in
self
.
code_sections
:
code
=
self
.
code_sections
[
"cleanup_code_struct"
]
define_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
return
"
\n
"
.
join
([
""
,
define_macros
,
code
,
undef_macros
])
else
:
return
super
()
.
c_cleanup_code_struct
(
node
,
name
)
def
format_c_function_args
(
self
,
inp
:
List
[
Text
],
out
:
List
[
Text
])
->
Text
:
"""Generate a string containing the arguments sent to the external C function.
The result will have the format: ``"input0, input1, input2, &output0, &output1"``.
"""
inp
=
list
(
inp
)
numi
=
getattr
(
self
,
"_cop_num_inputs"
,
len
(
inp
))
while
len
(
inp
)
<
numi
:
inp
.
append
(
"NULL"
)
out
=
[
f
"&{o}"
for
o
in
out
]
numo
=
getattr
(
self
,
"_cop_num_outputs"
,
len
(
out
))
while
len
(
out
)
<
numo
:
out
.
append
(
"NULL"
)
return
", "
.
join
(
inp
+
out
)
def
get_c_macros
(
self
,
node
:
Apply
,
name
:
Text
,
check_input
:
Optional
[
bool
]
=
None
)
->
Union
[
Tuple
[
str
],
Tuple
[
str
,
str
]]:
"Construct a pair of C ``#define`` and ``#undef`` code strings."
define_template
=
"#define
%
s
%
s"
undef_template
=
"#undef
%
s"
define_macros
=
[]
undef_macros
=
[]
if
check_input
is
None
:
check_input
=
getattr
(
self
,
"check_input"
,
True
)
if
check_input
:
# Extract the various properties of the input and output variables
variables
=
node
.
inputs
+
node
.
outputs
variable_names
=
[
f
"INPUT_{i}"
for
i
in
range
(
len
(
node
.
inputs
))]
+
[
f
"OUTPUT_{i}"
for
i
in
range
(
len
(
node
.
outputs
))
]
# Generate dtype macros
for
i
,
v
in
enumerate
(
variables
):
if
not
hasattr
(
v
,
"dtype"
):
continue
vname
=
variable_names
[
i
]
macro_name
=
"DTYPE_"
+
vname
macro_value
=
"npy_"
+
v
.
dtype
define_macros
.
append
(
define_template
%
(
macro_name
,
macro_value
))
undef_macros
.
append
(
undef_template
%
macro_name
)
d
=
np
.
dtype
(
v
.
dtype
)
macro_name
=
"TYPENUM_"
+
vname
macro_value
=
d
.
num
define_macros
.
append
(
define_template
%
(
macro_name
,
macro_value
))
undef_macros
.
append
(
undef_template
%
macro_name
)
macro_name
=
"ITEMSIZE_"
+
vname
macro_value
=
d
.
itemsize
define_macros
.
append
(
define_template
%
(
macro_name
,
macro_value
))
undef_macros
.
append
(
undef_template
%
macro_name
)
# Generate a macro to mark code as being apply-specific
define_macros
.
append
(
define_template
%
(
"APPLY_SPECIFIC(str)"
,
f
"str##_{name}"
))
undef_macros
.
append
(
undef_template
%
"APPLY_SPECIFIC"
)
for
n
,
v
in
self
.
__get_op_params
():
define_macros
.
append
(
define_template
%
(
n
,
v
))
undef_macros
.
append
(
undef_template
%
(
n
,))
return
"
\n
"
.
join
(
define_macros
),
"
\n
"
.
join
(
undef_macros
)
def
c_init_code_struct
(
self
,
node
,
name
,
sub
):
r""" Stitches all the macros and ``init_code_*``\s together."""
if
"init_code_struct"
in
self
.
code_sections
:
op_code
=
self
.
code_sections
[
"init_code_struct"
]
def_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
def_sub
,
undef_sub
=
get_sub_macros
(
sub
)
return
"
\n
"
.
join
(
[
""
,
def_macros
,
def_sub
,
op_code
,
undef_sub
,
undef_macros
]
)
else
:
return
super
()
.
c_init_code_struct
(
node
,
name
,
sub
)
def
c_code
(
self
,
node
,
name
,
inp
,
out
,
sub
):
if
self
.
func_name
is
not
None
:
assert
"code"
not
in
self
.
code_sections
define_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
,
check_input
=
False
)
params
=
""
if
"params"
in
sub
:
params
=
f
", {sub['params']}"
# Generate the C code
return
"""
%(define_macros)
s
{
if (
%(func_name)
s(
%(func_args)
s
%(params)
s) != 0) {
%(fail)
s
}
}
%(undef_macros)
s
"""
%
dict
(
func_name
=
self
.
func_name
,
fail
=
sub
[
"fail"
],
params
=
params
,
func_args
=
self
.
format_c_function_args
(
inp
,
out
),
define_macros
=
define_macros
,
undef_macros
=
undef_macros
,
)
else
:
if
"code"
in
self
.
code_sections
:
op_code
=
self
.
code_sections
[
"code"
]
def_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
def_sub
,
undef_sub
=
get_sub_macros
(
sub
)
def_io
,
undef_io
=
get_io_macros
(
inp
,
out
)
return
"
\n
"
.
join
(
[
def_macros
,
def_sub
,
def_io
,
op_code
,
undef_io
,
undef_sub
,
undef_macros
,
]
)
else
:
raise
NotImplementedError
()
def
c_code_cleanup
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
r"""Stitches all the macros and ``code_cleanup``\s together."""
if
"code_cleanup"
in
self
.
code_sections
:
op_code
=
self
.
code_sections
[
"code_cleanup"
]
def_macros
,
undef_macros
=
self
.
get_c_macros
(
node
,
name
)
def_sub
,
undef_sub
=
get_sub_macros
(
sub
)
def_io
,
undef_io
=
get_io_macros
(
inputs
,
outputs
)
return
"
\n
"
.
join
(
[
def_macros
,
def_sub
,
def_io
,
op_code
,
undef_io
,
undef_sub
,
undef_macros
,
]
)
else
:
return
super
()
.
c_code_cleanup
(
node
,
name
,
inputs
,
outputs
,
sub
)
class
_NoPythonCOp
(
COp
):
"""A class used to indicate that a `COp` does not provide a Python implementation.
XXX: Do not use this class; it's only for tracking bad implementations internally.
"""
def
perform
(
self
,
node
,
inputs
,
output_storage
,
params
=
None
):
raise
NotImplementedError
(
"No Python implementation is provided by this COp."
)
class
_NoPythonExternalCOp
(
ExternalCOp
):
"""A class used to indicate that an `ExternalCOp` does not provide a Python implementation.
XXX: Do not use this class; it's only for tracking bad implementations internally.
"""
def
perform
(
self
,
node
,
inputs
,
output_storage
,
params
=
None
):
raise
NotImplementedError
(
"No Python implementation is provided by this ExternalCOp."
)
aesara/raise_op.py
浏览文件 @
8a4505c0
...
...
@@ -7,9 +7,9 @@ import numpy as np
from
aesara.gradient
import
DisconnectedType
from
aesara.graph.basic
import
Apply
,
Variable
from
aesara.graph.op
import
COp
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.type
import
Generic
from
aesara.link.c.op
import
COp
class
ExceptionType
(
Generic
):
...
...
aesara/sandbox/multinomial.py
浏览文件 @
8a4505c0
...
...
@@ -7,7 +7,7 @@ import numpy as np
import
aesara.tensor
as
at
from
aesara.configdefaults
import
config
from
aesara.graph.basic
import
Apply
from
aesara.
graph
.op
import
COp
from
aesara.
link.c
.op
import
COp
from
aesara.scalar
import
Scalar
,
as_scalar
from
aesara.tensor.type
import
discrete_dtypes
...
...
aesara/sandbox/rng_mrg.py
浏览文件 @
8a4505c0
...
...
@@ -25,9 +25,9 @@ from aesara.compile import optdb
from
aesara.configdefaults
import
config
from
aesara.gradient
import
undefined_grad
from
aesara.graph.basic
import
Apply
,
Constant
,
Variable
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.opt
import
in2out
,
local_optimizer
from
aesara.graph.params_type
import
ParamsType
from
aesara.link.c.op
import
COp
,
Op
from
aesara.sandbox
import
multinomial
from
aesara.scalar
import
bool
as
bool_t
from
aesara.scalar
import
int32
as
int_t
...
...
aesara/scalar/basic.py
浏览文件 @
8a4505c0
...
...
@@ -27,10 +27,10 @@ from aesara.configdefaults import config
from
aesara.gradient
import
DisconnectedType
,
grad_undefined
from
aesara.graph.basic
import
Apply
,
Constant
,
Variable
,
clone
,
list_of_nodes
from
aesara.graph.fg
import
FunctionGraph
from
aesara.graph.op
import
COp
from
aesara.graph.opt
import
MergeOptimizer
from
aesara.graph.type
import
CType
from
aesara.graph.utils
import
MetaObject
,
MethodNotDefined
from
aesara.link.c.op
import
COp
from
aesara.misc.safe_asarray
import
_asarray
from
aesara.printing
import
pprint
from
aesara.utils
import
(
...
...
aesara/sparse/basic.py
浏览文件 @
8a4505c0
...
...
@@ -18,7 +18,8 @@ from aesara import scalar as aes
from
aesara.configdefaults
import
config
from
aesara.gradient
import
DisconnectedType
,
grad_not_implemented
,
grad_undefined
from
aesara.graph.basic
import
Apply
,
Constant
,
Variable
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.op
import
Op
from
aesara.link.c.op
import
COp
from
aesara.misc.safe_asarray
import
_asarray
from
aesara.sparse.type
import
SparseType
,
_is_sparse
from
aesara.sparse.utils
import
hash_from_sparse
...
...
aesara/sparse/opt.py
浏览文件 @
8a4505c0
...
...
@@ -5,8 +5,8 @@ import aesara
import
aesara.scalar
as
aes
from
aesara.configdefaults
import
config
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
COp
,
_NoPythonCOp
from
aesara.graph.opt
import
PatternSub
,
TopoOptimizer
,
local_optimizer
from
aesara.link.c.op
import
COp
,
_NoPythonCOp
from
aesara.misc.safe_asarray
import
_asarray
from
aesara.sparse
import
basic
as
sparse
from
aesara.sparse.basic
import
(
...
...
aesara/tensor/basic.py
浏览文件 @
8a4505c0
...
...
@@ -23,10 +23,11 @@ from aesara import scalar as aes
from
aesara.gradient
import
DisconnectedType
,
grad_not_implemented
,
grad_undefined
from
aesara.graph.basic
import
Apply
,
Constant
,
Variable
from
aesara.graph.fg
import
FunctionGraph
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.op
import
Op
from
aesara.graph.opt_utils
import
optimize_graph
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.type
import
Type
from
aesara.link.c.op
import
COp
from
aesara.misc.safe_asarray
import
_asarray
from
aesara.printing
import
min_informative_str
,
pprint
from
aesara.raise_op
import
CheckAndRaise
,
assert_op
...
...
aesara/tensor/blas.py
浏览文件 @
8a4505c0
...
...
@@ -147,7 +147,7 @@ from aesara.compile.mode import optdb
from
aesara.configdefaults
import
config
from
aesara.graph.basic
import
Apply
,
view_roots
from
aesara.graph.features
import
ReplacementDidNotRemoveError
,
ReplaceValidate
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.op
import
Op
from
aesara.graph.opt
import
(
EquilibriumOptimizer
,
GlobalOptimizer
,
...
...
@@ -158,6 +158,7 @@ from aesara.graph.opt import (
from
aesara.graph.optdb
import
SequenceDB
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.utils
import
InconsistencyError
,
MethodNotDefined
,
TestValueError
from
aesara.link.c.op
import
COp
from
aesara.printing
import
FunctionPrinter
,
debugprint
,
pprint
from
aesara.scalar
import
bool
as
bool_t
from
aesara.tensor
import
basic
as
at
...
...
aesara/tensor/blas_c.py
浏览文件 @
8a4505c0
from
aesara.configdefaults
import
config
from
aesara.graph.op
import
COp
from
aesara.graph.opt
import
in2out
from
aesara.graph.params_type
import
ParamsType
from
aesara.link.c.op
import
COp
from
aesara.scalar
import
bool
as
bool_t
from
aesara.tensor
import
basic
as
at
from
aesara.tensor.blas
import
(
...
...
aesara/tensor/elemwise.py
浏览文件 @
8a4505c0
...
...
@@ -8,10 +8,10 @@ from aesara.configdefaults import config
from
aesara.gradient
import
DisconnectedType
from
aesara.graph.basic
import
Apply
from
aesara.graph.null_type
import
NullType
from
aesara.graph.op
import
COp
,
ExternalCOp
,
OpenMPOp
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.utils
import
MethodNotDefined
from
aesara.link.c.basic
import
failure_code
from
aesara.link.c.op
import
COp
,
ExternalCOp
,
OpenMPOp
from
aesara.misc.frozendict
import
frozendict
from
aesara.misc.safe_asarray
import
_asarray
from
aesara.printing
import
FunctionPrinter
,
Printer
,
pprint
...
...
aesara/tensor/extra_ops.py
浏览文件 @
8a4505c0
...
...
@@ -11,9 +11,10 @@ from aesara.gradient import (
grad_undefined
,
)
from
aesara.graph.basic
import
Apply
,
Variable
,
equal_computations
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.op
import
Op
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.type
import
EnumList
,
Generic
from
aesara.link.c.op
import
COp
from
aesara.misc.safe_asarray
import
_asarray
from
aesara.raise_op
import
Assert
from
aesara.scalar
import
int32
as
int_t
...
...
aesara/tensor/math.py
浏览文件 @
8a4505c0
...
...
@@ -7,9 +7,10 @@ from aesara import config, printing
from
aesara
import
scalar
as
aes
from
aesara.gradient
import
DisconnectedType
from
aesara.graph.basic
import
Apply
,
Variable
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.op
import
Op
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.type
import
Generic
from
aesara.link.c.op
import
COp
from
aesara.misc.safe_asarray
import
_asarray
from
aesara.printing
import
pprint
from
aesara.scalar.basic
import
BinaryScalarOp
...
...
aesara/tensor/nnet/basic.py
浏览文件 @
8a4505c0
...
...
@@ -24,8 +24,9 @@ from aesara import scalar as aes
from
aesara.compile
import
optdb
from
aesara.gradient
import
DisconnectedType
,
grad_not_implemented
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.op
import
Op
from
aesara.graph.opt
import
copy_stack_trace
,
local_optimizer
,
optimizer
from
aesara.link.c.op
import
COp
from
aesara.raise_op
import
Assert
from
aesara.scalar
import
UnaryScalarOp
from
aesara.tensor
import
basic
as
at
...
...
aesara/tensor/nnet/conv.py
浏览文件 @
8a4505c0
...
...
@@ -24,7 +24,7 @@ except ImportError:
import
aesara
from
aesara.graph.basic
import
Apply
from
aesara.
graph
.op
import
OpenMPOp
from
aesara.
link.c
.op
import
OpenMPOp
from
aesara.tensor
import
blas
from
aesara.tensor.basic
import
(
as_tensor_variable
,
...
...
aesara/tensor/nnet/corr.py
浏览文件 @
8a4505c0
...
...
@@ -5,9 +5,10 @@ from typing import Optional
import
aesara
from
aesara.configdefaults
import
config
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
OpenMPOp
,
_NoPythonOp
from
aesara.graph.op
import
_NoPythonOp
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.type
import
EnumList
from
aesara.link.c.op
import
OpenMPOp
from
aesara.scalar
import
int8
,
int64
from
aesara.tensor
import
blas_headers
from
aesara.tensor.basic
import
as_tensor_variable
...
...
aesara/tensor/nnet/corr3d.py
浏览文件 @
8a4505c0
...
...
@@ -5,9 +5,10 @@ from typing import Optional
import
aesara
from
aesara.configdefaults
import
config
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
OpenMPOp
,
_NoPythonOp
from
aesara.graph.op
import
_NoPythonOp
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.type
import
EnumList
from
aesara.link.c.op
import
OpenMPOp
from
aesara.scalar
import
int64
from
aesara.tensor
import
blas_headers
from
aesara.tensor.basic
import
as_tensor_variable
...
...
aesara/tensor/nnet/ctc.py
浏览文件 @
8a4505c0
...
...
@@ -5,9 +5,9 @@ import aesara.tensor as at
from
aesara.configdefaults
import
config
from
aesara.gradient
import
grad_undefined
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
ExternalCOp
,
OpenMPOp
from
aesara.graph.opt
import
local_optimizer
from
aesara.link.c.cmodule
import
GCC_compiler
from
aesara.link.c.op
import
ExternalCOp
,
OpenMPOp
from
aesara.tensor.basic_opt
import
register_canonicalize
from
aesara.tensor.blas
import
batched_dot
from
aesara.tensor.extra_ops
import
cpu_contiguous
...
...
aesara/tensor/nnet/neighbours.py
浏览文件 @
8a4505c0
...
...
@@ -7,8 +7,8 @@ import numpy as np
import
aesara
from
aesara.gradient
import
grad_not_implemented
,
grad_undefined
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
COp
from
aesara.graph.type
import
EnumList
from
aesara.link.c.op
import
COp
from
aesara.tensor.basic
import
arange
,
as_tensor_variable
,
concatenate
,
stack
,
zeros
from
aesara.tensor.math
import
ceil_intdiv
from
aesara.tensor.subtensor
import
inc_subtensor
,
set_subtensor
...
...
aesara/tensor/shape.py
浏览文件 @
8a4505c0
...
...
@@ -7,8 +7,8 @@ import numpy as np
import
aesara
from
aesara.gradient
import
DisconnectedType
from
aesara.graph.basic
import
Apply
,
Constant
,
Variable
from
aesara.graph.op
import
COp
from
aesara.graph.params_type
import
ParamsType
from
aesara.link.c.op
import
COp
from
aesara.misc.safe_asarray
import
_asarray
from
aesara.scalar
import
int32
from
aesara.tensor
import
_get_vector_length
...
...
aesara/tensor/signal/pool.py
浏览文件 @
8a4505c0
...
...
@@ -12,10 +12,10 @@ import aesara.tensor.basic as at
import
aesara.tensor.math
as
tm
from
aesara.gradient
import
DisconnectedType
from
aesara.graph.basic
import
Apply
,
Constant
,
Variable
from
aesara.graph.op
import
OpenMPOp
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.type
import
EnumList
from
aesara.graph.utils
import
MethodNotDefined
from
aesara.link.c.op
import
OpenMPOp
from
aesara.scalar
import
bool
as
bool_t
from
aesara.tensor.type
import
TensorType
,
int_dtypes
...
...
aesara/tensor/subtensor.py
浏览文件 @
8a4505c0
...
...
@@ -11,10 +11,11 @@ from aesara import scalar as aes
from
aesara.configdefaults
import
config
from
aesara.gradient
import
DisconnectedType
from
aesara.graph.basic
import
Apply
,
Constant
,
Variable
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.op
import
Op
from
aesara.graph.params_type
import
ParamsType
from
aesara.graph.type
import
Type
from
aesara.graph.utils
import
MethodNotDefined
from
aesara.link.c.op
import
COp
from
aesara.misc.safe_asarray
import
_asarray
from
aesara.printing
import
Printer
,
pprint
,
set_precedence
from
aesara.scalar.basic
import
ScalarConstant
...
...
aesara/typed_list/basic.py
浏览文件 @
8a4505c0
...
...
@@ -4,7 +4,8 @@ import aesara.tensor as at
from
aesara.compile.debugmode
import
_lessbroken_deepcopy
from
aesara.configdefaults
import
config
from
aesara.graph.basic
import
Apply
,
Constant
,
Variable
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.op
import
Op
from
aesara.link.c.op
import
COp
from
aesara.tensor.type
import
scalar
from
aesara.tensor.type_other
import
SliceType
from
aesara.tensor.var
import
TensorVariable
...
...
doc/extending/creating_a_c_op.rst
浏览文件 @
8a4505c0
...
...
@@ -475,7 +475,7 @@ storage with the right shape and number of dimensions.
import numpy
import aesara
from aesara.
graph
.op import COp
from aesara.
link.c
.op import COp
from aesara.graph.basic import Apply
...
...
@@ -745,7 +745,7 @@ The new :class:`Op` is defined inside a Python file with the following code :
.. testcode::
import aesara
from aesara.
graph
.op import ExternalCOp
from aesara.
link.c
.op import ExternalCOp
class VectorTimesVector(ExternalCOp):
__props__ = ()
...
...
doc/extending/other_ops.rst
浏览文件 @
8a4505c0
...
...
@@ -168,7 +168,7 @@ To allow consistent interface of Ops that support OpenMP, we have some
helper code. Doing this also allows to enable/disable OpenMP globally
or per op for fine-grained control.
Your Op needs to inherit from ``aesara.
graph
.op.OpenMPOp``. If it overrides
Your Op needs to inherit from ``aesara.
link.c
.op.OpenMPOp``. If it overrides
the ``__init__()`` method, it must have an ``openmp=None`` parameter
and must call ``super(MyOpClass, self).__init__(openmp=openmp)``.
...
...
doc/extending/using_params.rst
浏览文件 @
8a4505c0
...
...
@@ -139,8 +139,8 @@ the params type.
.. testcode::
from aesara.graph.op import COp
from aesara.graph.type import Generic
from aesara.link.c.op import COp
from aesara.scalar import as_scalar
class MulOp(COp):
...
...
setup.cfg
浏览文件 @
8a4505c0
...
...
@@ -151,6 +151,10 @@ check_untyped_defs = False
ignore_errors = True
check_untyped_defs = False
[mypy-aesara.link.c.op]
ignore_errors = True
check_untyped_defs = False
[mypy-aesara.link.utils]
ignore_errors = True
check_untyped_defs = False
...
...
tests/compile/test_debugmode.py
浏览文件 @
8a4505c0
...
...
@@ -17,9 +17,10 @@ from aesara.compile.mode import predefined_modes
from
aesara.configdefaults
import
config
from
aesara.graph.basic
import
Apply
,
Variable
from
aesara.graph.features
import
BadOptimization
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.op
import
Op
from
aesara.graph.opt
import
local_optimizer
from
aesara.graph.optdb
import
EquilibriumDB
from
aesara.link.c.op
import
COp
from
aesara.tensor.math
import
add
,
dot
,
log
from
aesara.tensor.type
import
TensorType
,
dvector
,
fmatrix
,
fvector
,
vector
from
tests
import
unittest_tools
as
utt
...
...
tests/graph/test_compute_test_value.py
浏览文件 @
8a4505c0
...
...
@@ -9,8 +9,9 @@ from aesara import scalar as aes
from
aesara.configdefaults
import
config
from
aesara.graph
import
utils
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.op
import
Op
from
aesara.graph.type
import
Type
from
aesara.link.c.op
import
COp
from
aesara.tensor.math
import
_allclose
,
dot
from
aesara.tensor.type
import
fmatrix
,
iscalar
,
matrix
,
vector
...
...
tests/graph/test_op.py
浏览文件 @
8a4505c0
...
...
@@ -4,13 +4,12 @@ import pytest
import
aesara
import
aesara.graph.op
as
op
import
aesara.tensor
as
at
from
aesara
import
scalar
as
aes
from
aesara
import
shared
from
aesara.configdefaults
import
config
from
aesara.graph.basic
import
Apply
,
Variable
from
aesara.graph.op
import
COp
,
Op
from
aesara.graph.op
import
Op
from
aesara.graph.type
import
Generic
,
Type
from
aesara.graph.utils
import
MethodNotDefined
,
TestValueError
from
aesara.graph.utils
import
TestValueError
from
aesara.tensor.math
import
log
from
aesara.tensor.type
import
dmatrix
,
dscalar
,
dvector
,
vector
...
...
@@ -82,38 +81,6 @@ class NoInputOp(Op):
output_storage
[
0
][
0
]
=
"test Op no input"
class
StructOp
(
COp
):
__props__
=
()
def
do_constant_folding
(
self
,
fgraph
,
node
):
# we are not constant
return
False
# The input only serves to distinguish thunks
def
make_node
(
self
,
i
):
return
Apply
(
self
,
[
i
],
[
aes
.
uint64
()])
def
c_support_code_struct
(
self
,
node
,
name
):
return
f
"npy_uint64 counter{name};"
def
c_init_code_struct
(
self
,
node
,
name
,
sub
):
return
f
"counter{name} = 0;"
def
c_code
(
self
,
node
,
name
,
input_names
,
outputs_names
,
sub
):
return
"""
%(out)
s = counter
%(name)
s;
counter
%(name)
s++;
"""
%
dict
(
out
=
outputs_names
[
0
],
name
=
name
)
def
c_code_cache_version
(
self
):
return
(
1
,)
def
perform
(
self
,
*
args
,
**
kwargs
):
raise
NotImplementedError
(
"No Python implementation available."
)
class
TestOp
:
# Sanity tests
...
...
@@ -141,106 +108,8 @@ class TestOp:
rval
=
f
()
assert
rval
==
"test Op no input"
@pytest.mark.skipif
(
not
config
.
cxx
,
reason
=
"G++ not available, so we need to skip this test."
)
def
test_op_struct
(
self
):
sop
=
StructOp
()
c
=
sop
(
aesara
.
tensor
.
constant
(
0
))
mode
=
None
if
config
.
mode
==
"FAST_COMPILE"
:
mode
=
"FAST_RUN"
f
=
aesara
.
function
([],
c
,
mode
=
mode
)
rval
=
f
()
assert
rval
==
0
rval
=
f
()
assert
rval
==
1
c2
=
sop
(
aesara
.
tensor
.
constant
(
1
))
f2
=
aesara
.
function
([],
[
c
,
c2
],
mode
=
mode
)
rval
=
f2
()
assert
rval
==
[
0
,
0
]
class
TestMakeThunk
:
def
test_no_c_code
(
self
):
class
IncOnePython
(
COp
):
"""An Op with only a Python (perform) implementation"""
__props__
=
()
def
make_node
(
self
,
input
):
input
=
aes
.
as_scalar
(
input
)
output
=
input
.
type
()
return
Apply
(
self
,
[
input
],
[
output
])
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
input
,)
=
inputs
(
output
,)
=
outputs
output
[
0
]
=
input
+
1
i
=
aes
.
int32
(
"i"
)
o
=
IncOnePython
()(
i
)
# Check that the c_code function is not implemented
with
pytest
.
raises
(
NotImplementedError
):
o
.
owner
.
op
.
c_code
(
o
.
owner
,
"o"
,
[
"x"
],
"z"
,
{
"fail"
:
""
})
storage_map
=
{
i
:
[
np
.
int32
(
3
)],
o
:
[
None
]}
compute_map
=
{
i
:
[
True
],
o
:
[
False
]}
thunk
=
o
.
owner
.
op
.
make_thunk
(
o
.
owner
,
storage_map
,
compute_map
,
no_recycling
=
[]
)
required
=
thunk
()
# Check everything went OK
assert
not
required
# We provided all inputs
assert
compute_map
[
o
][
0
]
assert
storage_map
[
o
][
0
]
==
4
def
test_no_perform
(
self
):
class
IncOneC
(
COp
):
"""An Op with only a C (c_code) implementation"""
__props__
=
()
def
make_node
(
self
,
input
):
input
=
aes
.
as_scalar
(
input
)
output
=
input
.
type
()
return
Apply
(
self
,
[
input
],
[
output
])
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
(
x
,)
=
inputs
(
z
,)
=
outputs
return
f
"{z} = {x} + 1;"
def
perform
(
self
,
*
args
,
**
kwargs
):
raise
NotImplementedError
(
"No Python implementation available."
)
i
=
aes
.
int32
(
"i"
)
o
=
IncOneC
()(
i
)
# Check that the perform function is not implemented
with
pytest
.
raises
((
NotImplementedError
,
MethodNotDefined
)):
o
.
owner
.
op
.
perform
(
o
.
owner
,
0
,
[
None
])
storage_map
=
{
i
:
[
np
.
int32
(
3
)],
o
:
[
None
]}
compute_map
=
{
i
:
[
True
],
o
:
[
False
]}
thunk
=
o
.
owner
.
op
.
make_thunk
(
o
.
owner
,
storage_map
,
compute_map
,
no_recycling
=
[]
)
if
config
.
cxx
:
required
=
thunk
()
# Check everything went OK
assert
not
required
# We provided all inputs
assert
compute_map
[
o
][
0
]
assert
storage_map
[
o
][
0
]
==
4
else
:
with
pytest
.
raises
((
NotImplementedError
,
MethodNotDefined
)):
thunk
()
def
test_no_make_node
(
self
):
class
DoubleOp
(
Op
):
"""An Op without make_node"""
...
...
tests/graph/test_params_type.py
浏览文件 @
8a4505c0
...
...
@@ -4,9 +4,9 @@ import pytest
import
aesara
from
aesara
import
tensor
as
at
from
aesara.graph.basic
import
Apply
from
aesara.graph.op
import
COp
,
ExternalCOp
from
aesara.graph.params_type
import
Params
,
ParamsType
from
aesara.graph.type
import
EnumList
,
Generic
from
aesara.link.c.op
import
COp
,
ExternalCOp
from
aesara.scalar
import
Scalar
from
aesara.tensor.type
import
TensorType
,
matrix
from
tests
import
unittest_tools
as
utt
...
...
tests/graph/test_types.py
浏览文件 @
8a4505c0
...
...
@@ -6,8 +6,8 @@ import pytest
import
aesara
from
aesara
import
scalar
as
aes
from
aesara.graph.basic
import
Apply
,
Variable
from
aesara.graph.op
import
COp
from
aesara.graph.type
import
CDataType
,
CEnumType
,
EnumList
,
EnumType
,
Type
from
aesara.link.c.op
import
COp
from
aesara.tensor.type
import
TensorType
,
continuous_dtypes
...
...
tests/link/c/test_basic.py
浏览文件 @
8a4505c0
...
...
@@ -7,10 +7,10 @@ from aesara.compile.mode import Mode
from
aesara.configdefaults
import
config
from
aesara.graph.basic
import
Apply
,
Constant
,
Variable
from
aesara.graph.fg
import
FunctionGraph
from
aesara.graph.op
import
COp
from
aesara.graph.type
import
CType
from
aesara.link.basic
import
PerformLinker
from
aesara.link.c.basic
import
CLinker
,
DualLinker
,
OpWiseCLinker
from
aesara.link.c.op
import
COp
from
aesara.tensor.type
import
iscalar
,
matrix
,
vector
from
tests.link.test_link
import
make_function
...
...
tests/link/c/test_op.py
0 → 100644
浏览文件 @
8a4505c0
import
numpy
as
np
import
pytest
import
aesara
from
aesara
import
scalar
as
aes
from
aesara.configdefaults
import
config
from
aesara.graph.basic
import
Apply
from
aesara.graph.utils
import
MethodNotDefined
from
aesara.link.c.op
import
COp
class
StructOp
(
COp
):
__props__
=
()
def
do_constant_folding
(
self
,
fgraph
,
node
):
# we are not constant
return
False
# The input only serves to distinguish thunks
def
make_node
(
self
,
i
):
return
Apply
(
self
,
[
i
],
[
aes
.
uint64
()])
def
c_support_code_struct
(
self
,
node
,
name
):
return
f
"npy_uint64 counter{name};"
def
c_init_code_struct
(
self
,
node
,
name
,
sub
):
return
f
"counter{name} = 0;"
def
c_code
(
self
,
node
,
name
,
input_names
,
outputs_names
,
sub
):
return
"""
%(out)
s = counter
%(name)
s;
counter
%(name)
s++;
"""
%
dict
(
out
=
outputs_names
[
0
],
name
=
name
)
def
c_code_cache_version
(
self
):
return
(
1
,)
def
perform
(
self
,
*
args
,
**
kwargs
):
raise
NotImplementedError
(
"No Python implementation available."
)
class
TestCOp
:
@pytest.mark.skipif
(
not
config
.
cxx
,
reason
=
"G++ not available, so we need to skip this test."
)
def
test_op_struct
(
self
):
sop
=
StructOp
()
c
=
sop
(
aesara
.
tensor
.
constant
(
0
))
mode
=
None
if
config
.
mode
==
"FAST_COMPILE"
:
mode
=
"FAST_RUN"
f
=
aesara
.
function
([],
c
,
mode
=
mode
)
rval
=
f
()
assert
rval
==
0
rval
=
f
()
assert
rval
==
1
c2
=
sop
(
aesara
.
tensor
.
constant
(
1
))
f2
=
aesara
.
function
([],
[
c
,
c2
],
mode
=
mode
)
rval
=
f2
()
assert
rval
==
[
0
,
0
]
class
TestMakeThunk
:
def
test_no_c_code
(
self
):
class
IncOnePython
(
COp
):
"""An Op with only a Python (perform) implementation"""
__props__
=
()
def
make_node
(
self
,
input
):
input
=
aes
.
as_scalar
(
input
)
output
=
input
.
type
()
return
Apply
(
self
,
[
input
],
[
output
])
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
input
,)
=
inputs
(
output
,)
=
outputs
output
[
0
]
=
input
+
1
i
=
aes
.
int32
(
"i"
)
o
=
IncOnePython
()(
i
)
# Check that the c_code function is not implemented
with
pytest
.
raises
(
NotImplementedError
):
o
.
owner
.
op
.
c_code
(
o
.
owner
,
"o"
,
[
"x"
],
"z"
,
{
"fail"
:
""
})
storage_map
=
{
i
:
[
np
.
int32
(
3
)],
o
:
[
None
]}
compute_map
=
{
i
:
[
True
],
o
:
[
False
]}
thunk
=
o
.
owner
.
op
.
make_thunk
(
o
.
owner
,
storage_map
,
compute_map
,
no_recycling
=
[]
)
required
=
thunk
()
# Check everything went OK
assert
not
required
# We provided all inputs
assert
compute_map
[
o
][
0
]
assert
storage_map
[
o
][
0
]
==
4
def
test_no_perform
(
self
):
class
IncOneC
(
COp
):
"""An Op with only a C (c_code) implementation"""
__props__
=
()
def
make_node
(
self
,
input
):
input
=
aes
.
as_scalar
(
input
)
output
=
input
.
type
()
return
Apply
(
self
,
[
input
],
[
output
])
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
(
x
,)
=
inputs
(
z
,)
=
outputs
return
f
"{z} = {x} + 1;"
def
perform
(
self
,
*
args
,
**
kwargs
):
raise
NotImplementedError
(
"No Python implementation available."
)
i
=
aes
.
int32
(
"i"
)
o
=
IncOneC
()(
i
)
# Check that the perform function is not implemented
with
pytest
.
raises
((
NotImplementedError
,
MethodNotDefined
)):
o
.
owner
.
op
.
perform
(
o
.
owner
,
0
,
[
None
])
storage_map
=
{
i
:
[
np
.
int32
(
3
)],
o
:
[
None
]}
compute_map
=
{
i
:
[
True
],
o
:
[
False
]}
thunk
=
o
.
owner
.
op
.
make_thunk
(
o
.
owner
,
storage_map
,
compute_map
,
no_recycling
=
[]
)
if
config
.
cxx
:
required
=
thunk
()
# Check everything went OK
assert
not
required
# We provided all inputs
assert
compute_map
[
o
][
0
]
assert
storage_map
[
o
][
0
]
==
4
else
:
with
pytest
.
raises
((
NotImplementedError
,
MethodNotDefined
)):
thunk
()
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论