提交 320f66a6 authored 作者: Virgile Andreani's avatar Virgile Andreani 提交者: Virgile Andreani

Remove unused config options

上级 eaf05be6
...@@ -510,116 +510,12 @@ import ``pytensor`` and print the config variable, as in: ...@@ -510,116 +510,12 @@ import ``pytensor`` and print the config variable, as in:
Removing these asserts can speed up execution. Removing these asserts can speed up execution.
.. attribute:: config.dnn__enabled
String value: ``'auto'``, ``'True'``, ``'False'``, ``'no_check'``
Default: ``'auto'``
If ``'auto'``, automatically detect and use
`cuDNN <https://developer.nvidia.com/cudnn>`_ when it is available.
If cuDNN is unavailable, do not raise an error.
If ``'True'``, require the use of cuDNN. If cuDNN is unavailable, raise an error.
If ``'False'``, neither use cuDNN nor check if it is available.
If ``'no_check'``, assume cuDNN is present and that the versions between the
header and library match.
.. attribute:: config.dnn__include_path
Default: ``include`` sub-folder in CUDA root directory, or headers paths defined for the compiler.
Location of the cuDNN header.
.. attribute:: config.dnn__library_path
Default: Library sub-folder (``lib64`` on Linux) in CUDA root directory, or
libraries paths defined for the compiler.
Location of the cuDNN library.
.. attribute:: config.conv__assert_shape .. attribute:: config.conv__assert_shape
If ``True``, ``AbstractConv*`` :class:`Op`\s will verify that user-provided shapes If ``True``, ``AbstractConv*`` :class:`Op`\s will verify that user-provided shapes
match the run-time shapes. This is a debugging option, and may slow down match the run-time shapes. This is a debugging option, and may slow down
compilation. compilation.
.. attribute:: config.dnn.conv.workmem
Deprecated, use :attr:`config.dnn__conv__algo_fwd`.
.. attribute:: config.dnn.conv.workmem_bwd
Deprecated, use :attr:`config.dnn__conv__algo_bwd_filter` and
:attr:`config.dnn__conv__algo_bwd_data` instead.
.. attribute:: config.dnn__conv__algo_fwd
String value:
``'small'``, ``'none'``, ``'large'``, ``'fft'``, ``'fft_tiling'``,
``'winograd'``, ``'winograd_non_fused'``, ``'guess_once'``, ``'guess_on_shape_change'``,
``'time_once'``, ``'time_on_shape_change'``.
Default: ``'small'``
3d convolution only support ``'none'``, ``'small'``, ``'fft_tiling'``, ``'guess_once'``,
``'guess_on_shape_change'``, ``'time_once'``, ``'time_on_shape_change'``.
.. attribute:: config.dnn.conv.algo_bwd
Deprecated, use :attr:`config.dnn__conv__algo_bwd_filter` and
:attr:`config.dnn__conv__algo_bwd_data` instead.
.. attribute:: config.dnn__conv__algo_bwd_filter
String value:
``'none'``, ``'deterministic'``, ``'fft'``, ``'small'``, ``'winograd_non_fused'``, ``'fft_tiling'``, ``'guess_once'``,
``'guess_on_shape_change'``, ``'time_once'``, ``'time_on_shape_change'``.
Default: ``'none'``
3d convolution only supports ``'none'``, ``'small'``, ``'guess_once'``,
``'guess_on_shape_change'``, ``'time_once'``, ``'time_on_shape_change'``.
.. attribute:: config.dnn__conv__algo_bwd_data
String value:
``'none'``, ``'deterministic'``, ``'fft'``, ``'fft_tiling'``, ``'winograd'``,
``'winograd_non_fused'``, ``'guess_once'``, ``'guess_on_shape_change'``, ``'time_once'``,
``'time_on_shape_change'``.
Default: ``'none'``
3d convolution only supports ``'none'``, ``'deterministic'``, ``'fft_tiling'``
``'guess_once'``, ``'guess_on_shape_change'``, ``'time_once'``,
``'time_on_shape_change'``.
.. attribute:: config.magma__enabled
String value: ``'True'``, ``'False'``
Default: ``'False'``
If ``'True'``, use `magma <http://icl.cs.utk.edu/magma/>`_ for matrix
computations.
If ``'False'``, disable magma.
.. attribute:: config.magma__include_path
Default: ``''``
Location of the magma headers.
.. attribute:: config.magma__library_path
Default: ``''``
Location of the magma library.
.. attribute:: config.ctc__root .. attribute:: config.ctc__root
Default: ``''`` Default: ``''``
......
...@@ -34,62 +34,6 @@ from pytensor.utils import ( ...@@ -34,62 +34,6 @@ from pytensor.utils import (
_logger = logging.getLogger("pytensor.configdefaults") _logger = logging.getLogger("pytensor.configdefaults")
def get_cuda_root() -> Path | None:
# We look for the cuda path since we need headers from there
if (v := os.getenv("CUDA_ROOT")) is not None:
return Path(v)
if (v := os.getenv("CUDA_PATH")) is not None:
return Path(v)
if (s := os.getenv("PATH")) is None:
return Path()
for dir in s.split(os.pathsep):
if (Path(dir) / "nvcc").exists():
return Path(dir).absolute().parent
return None
def default_cuda_include() -> Path | None:
if config.cuda__root:
return config.cuda__root / "include"
return None
def default_dnn_base_path() -> Path | None:
# We want to default to the cuda root if cudnn is installed there
if config.cuda__root:
# The include doesn't change location between OS.
if (config.cuda__root / "include/cudnn.h").exists():
return config.cuda__root
return None
def default_dnn_inc_path() -> Path | None:
if config.dnn__base_path:
return config.dnn__base_path / "include"
return None
def default_dnn_lib_path() -> Path | None:
if config.dnn__base_path:
if sys.platform == "win32":
path = config.dnn__base_path / "lib/x64"
elif sys.platform == "darwin":
path = config.dnn__base_path / "lib"
else:
# This is linux
path = config.dnn__base_path / "lib64"
return path
return None
def default_dnn_bin_path() -> Path | None:
if config.dnn__base_path:
if sys.platform == "win32":
return config.dnn__base_path / "bin"
return config.dnn__library_path
return None
def _filter_mode(val): def _filter_mode(val):
# Do not add FAST_RUN_NOGC to this list (nor any other ALL CAPS shortcut). # Do not add FAST_RUN_NOGC to this list (nor any other ALL CAPS shortcut).
# The way to get FAST_RUN_NOGC is with the flag 'linker=c|py_nogc'. # The way to get FAST_RUN_NOGC is with the flag 'linker=c|py_nogc'.
...@@ -607,15 +551,6 @@ def add_compile_configvars(): ...@@ -607,15 +551,6 @@ def add_compile_configvars():
in_c_key=False, in_c_key=False,
) )
config.add(
"ctc__root",
"Directory which contains the root of Baidu CTC library. It is assumed \
that the compiled library is either inside the build, lib or lib64 \
subdirectory, and the header inside the include directory.",
StrParam("", mutable=False),
in_c_key=False,
)
def _is_valid_cmp_sloppy(v): def _is_valid_cmp_sloppy(v):
return v in (0, 1, 2) return v in (0, 1, 2)
......
...@@ -74,12 +74,8 @@ class PyTensorConfigParser: ...@@ -74,12 +74,8 @@ class PyTensorConfigParser:
warn_float64: str warn_float64: str
pickle_test_value: bool pickle_test_value: bool
cast_policy: str cast_policy: str
deterministic: str
device: str device: str
force_device: bool
conv__assert_shape: bool
print_global_stats: bool print_global_stats: bool
assert_no_cpu_op: str
unpickle_function: bool unpickle_function: bool
# add_compile_configvars # add_compile_configvars
mode: str mode: str
...@@ -90,7 +86,6 @@ class PyTensorConfigParser: ...@@ -90,7 +86,6 @@ class PyTensorConfigParser:
optimizer_verbose: bool optimizer_verbose: bool
on_opt_error: str on_opt_error: str
nocleanup: bool nocleanup: bool
on_unused_import: str
gcc__cxxflags: str gcc__cxxflags: str
cmodule__warn_no_version: bool cmodule__warn_no_version: bool
cmodule__remove_gxx_opt: bool cmodule__remove_gxx_opt: bool
...@@ -98,9 +93,7 @@ class PyTensorConfigParser: ...@@ -98,9 +93,7 @@ class PyTensorConfigParser:
cmodule__preload_cache: bool cmodule__preload_cache: bool
cmodule__age_thresh_use: int cmodule__age_thresh_use: int
cmodule__debug: bool cmodule__debug: bool
compile__wait: int
compile__timeout: int compile__timeout: int
ctc__root: str
# add_tensor_configvars # add_tensor_configvars
tensor__cmp_sloppy: int tensor__cmp_sloppy: int
lib__amblibm: bool lib__amblibm: bool
...@@ -151,8 +144,6 @@ class PyTensorConfigParser: ...@@ -151,8 +144,6 @@ class PyTensorConfigParser:
cycle_detection: str cycle_detection: str
check_stack_trace: str check_stack_trace: str
metaopt__verbose: int metaopt__verbose: int
metaopt__optimizer_excluding: str
metaopt__optimizer_including: str
# add_vm_configvars # add_vm_configvars
profile: bool profile: bool
profile_optimizer: bool profile_optimizer: bool
...@@ -175,10 +166,6 @@ class PyTensorConfigParser: ...@@ -175,10 +166,6 @@ class PyTensorConfigParser:
# add_blas_configvars # add_blas_configvars
blas__ldflags: str blas__ldflags: str
blas__check_openmp: bool blas__check_openmp: bool
# add CUDA (?)
cuda__root: Path | None
dnn__base_path: Path | None
dnn__library_path: Path | None
def __init__( def __init__(
self, self,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论