提交 739d97dd authored 作者: Virgile Andreani's avatar Virgile Andreani 提交者: Virgile Andreani

Removed unused config options

上级 aab9faee
......@@ -103,14 +103,6 @@ import ``pytensor`` and print the config variable, as in:
String value: either ``'cpu'``
.. attribute:: force_device
Bool value: either ``True`` or ``False``
Default: ``False``
This flag's value cannot be modified during the program execution.
.. attribute:: print_active_device
Bool value: either ``True`` or ``False``
......@@ -139,16 +131,6 @@ import ``pytensor`` and print the config variable, as in:
equal to ``float64`` is created.
This can be used to help find upcasts to ``float64`` in user code.
.. attribute:: deterministic
String value: either ``'default'``, ``'more'``
Default: ``'default'``
If ``more``, sometimes PyTensor will select :class:`Op` implementations that
are more "deterministic", but slower. See the ``dnn.conv.algo*``
flags for more cases.
.. attribute:: allow_gc
Bool value: either ``True`` or ``False``
......@@ -412,16 +394,6 @@ import ``pytensor`` and print the config variable, as in:
ignore it (i.e. ``'ignore'``).
We suggest never using ``'ignore'`` except during testing.
.. attribute:: assert_no_cpu_op
String value: ``'ignore'`` or ``'warn'`` or ``'raise'`` or ``'pdb'``
Default: ``'ignore'``
If there is a CPU :class:`Op` in the computational graph, depending on its value,
this flag can either raise a warning, an exception or drop into the frame
with ``pdb``.
.. attribute:: on_shape_error
String value: ``'warn'`` or ``'raise'``
......@@ -797,18 +769,3 @@ import ``pytensor`` and print the config variable, as in:
The verbosity level of the meta-rewriter: ``0`` for silent, ``1`` to only
warn when PyTensor cannot meta-rewrite an :class:`Op`, ``2`` for full output (e.g.
timings and the rewrites selected).
.. attribute:: config.metaopt__optimizer_excluding
Default: ``""``
A list of rewrite tags that we don't want included in the meta-rewriter.
Multiple tags are separate by ``':'``.
.. attribute:: config.metaopt__optimizer_including
Default: ``""``
A list of rewriter tags to be included during meta-rewriting.
Multiple tags are separate by ``':'``.
......@@ -260,15 +260,6 @@ def add_basic_configvars():
),
)
config.add(
"deterministic",
"If `more`, sometimes we will select some implementation that "
"are more deterministic, but slower. Also see "
"the dnn.conv.algo* flags to cover more cases.",
EnumStr("default", ["more"]),
in_c_key=False,
)
config.add(
"device",
("Default device for computations. only cpu is supported for now"),
......@@ -276,13 +267,6 @@ def add_basic_configvars():
in_c_key=False,
)
config.add(
"force_device",
"Raise an error if we can't use the specified device",
BoolParam(False, mutable=False),
in_c_key=False,
)
config.add(
"conv__assert_shape",
"If True, AbstractConv* ops will verify that user-provided"
......@@ -299,14 +283,6 @@ def add_basic_configvars():
in_c_key=False,
)
# This flag determines whether or not to raise error/warning message if
# there is a CPU Op in the computational graph.
config.add(
"assert_no_cpu_op",
"Raise an error/warning if there is a CPU op in the computational graph.",
EnumStr("ignore", ["warn", "raise", "pdb"], mutable=True),
in_c_key=False,
)
config.add(
"unpickle_function",
(
......@@ -1043,20 +1019,6 @@ def add_metaopt_configvars():
in_c_key=False,
)
config.add(
"metaopt__optimizer_excluding",
("exclude optimizers with these tags. Separate tags with ':'."),
StrParam(""),
in_c_key=False,
)
config.add(
"metaopt__optimizer_including",
("include optimizers with these tags. Separate tags with ':'."),
StrParam(""),
in_c_key=False,
)
def add_vm_configvars():
config.add(
......@@ -1295,55 +1257,6 @@ def add_caching_dir_configvars():
)
# Those are the options provided by PyTensor to choose algorithms at runtime.
SUPPORTED_DNN_CONV_ALGO_RUNTIME = (
"guess_once",
"guess_on_shape_change",
"time_once",
"time_on_shape_change",
)
# Those are the supported algorithm by PyTensor,
# The tests will reference those lists.
SUPPORTED_DNN_CONV_ALGO_FWD = (
"small",
"none",
"large",
"fft",
"fft_tiling",
"winograd",
"winograd_non_fused",
*SUPPORTED_DNN_CONV_ALGO_RUNTIME,
)
SUPPORTED_DNN_CONV_ALGO_BWD_DATA = (
"none",
"deterministic",
"fft",
"fft_tiling",
"winograd",
"winograd_non_fused",
*SUPPORTED_DNN_CONV_ALGO_RUNTIME,
)
SUPPORTED_DNN_CONV_ALGO_BWD_FILTER = (
"none",
"deterministic",
"fft",
"small",
"winograd_non_fused",
"fft_tiling",
*SUPPORTED_DNN_CONV_ALGO_RUNTIME,
)
SUPPORTED_DNN_CONV_PRECISION = (
"as_input_f32",
"as_input",
"float16",
"float32",
"float64",
)
# Eventually, the instance of `PyTensorConfigParser` should be created right here,
# where it is also populated with settings.
config = _create_default_config()
......
......@@ -75,6 +75,7 @@ class PyTensorConfigParser:
pickle_test_value: bool
cast_policy: str
device: str
conv__assert_shape: bool
print_global_stats: bool
unpickle_function: bool
# add_compile_configvars
......@@ -86,6 +87,7 @@ class PyTensorConfigParser:
optimizer_verbose: bool
on_opt_error: str
nocleanup: bool
on_unused_input: str
gcc__cxxflags: str
cmodule__warn_no_version: bool
cmodule__remove_gxx_opt: bool
......@@ -93,6 +95,7 @@ class PyTensorConfigParser:
cmodule__preload_cache: bool
cmodule__age_thresh_use: int
cmodule__debug: bool
compile__wait: int
compile__timeout: int
# add_tensor_configvars
tensor__cmp_sloppy: int
......@@ -143,6 +146,7 @@ class PyTensorConfigParser:
optdb__max_use_ratio: float
cycle_detection: str
check_stack_trace: str
# add_metaopt_configvars
metaopt__verbose: int
# add_vm_configvars
profile: bool
......@@ -177,7 +181,6 @@ class PyTensorConfigParser:
self._pytensor_cfg = pytensor_cfg
self._pytensor_raw_cfg = pytensor_raw_cfg
self._config_var_dict: dict = {}
super().__init__()
def __str__(self, print_doc=True):
sio = StringIO()
......@@ -375,7 +378,6 @@ class ConfigParam:
# more appropriate user-provided default value.
# Calling `filter` here may actually be harmful if the default value is
# invalid and causes a crash or has unwanted side effects.
super().__init__()
@property
def default(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论