提交 020fc625 authored 作者: Mohammad Pezeshki's avatar Mohammad Pezeshki

configdefaults.py in pep8

上级 f7a5312c
......@@ -24,7 +24,7 @@ def floatX_convert(s):
AddConfigVar('floatX',
"Default floating-point precision for python casts",
EnumStr('float64', 'float32', convert=floatX_convert,),
)
)
AddConfigVar('warn_float64',
"Do an action when a tensor variable with float64 dtype is"
......@@ -32,20 +32,20 @@ AddConfigVar('warn_float64',
" gpu back-end and are slow with gamer GPUs.",
EnumStr('ignore', 'warn', 'raise', 'pdb'),
in_c_key=False,
)
)
AddConfigVar('cast_policy',
"Rules for implicit type casting",
EnumStr('custom', 'numpy+floatX',
# The 'numpy' policy was originally planned to provide a smooth
# transition from numpy. It was meant to behave the same as
# numpy+floatX, but keeping float64 when numpy would. However
# the current implementation of some cast mechanisms makes it
# a bit more complex to add than what was expected, so it is
# currently not available.
# numpy,
),
)
'Rules for implicit type casting',
EnumStr('custom', 'numpy+floatX',
# The 'numpy' policy was originally planned to provide a
# smooth transition from numpy. It was meant to behave the
# same asnumpy+floatX, but keeping float64 when numpy
# would. However the current implementation of some cast
# mechanisms makes it a bit more complex to add than what
# was expected, so it is currently not available.
# numpy,
),
)
# python 2.* define int / int to return int and int // int to return int.
# python 3* define int / int to return float and int // int to return int.
......@@ -53,10 +53,10 @@ AddConfigVar('cast_policy',
# than numpy. When we will do the transition, we should create an int_warn
# and floatX_warn option.
AddConfigVar('int_division',
"What to do when one computes x / y, where both x and y are of "
"integer types",
EnumStr('int', 'raise', 'floatX'),
in_c_key=False)
"What to do when one computes x / y, where both x and y are of "
"integer types",
EnumStr('int', 'raise', 'floatX'),
in_c_key=False)
# gpu means let the driver select the gpu. Needed in case of gpu in
# exclusive mode.
......@@ -82,14 +82,14 @@ class DeviceParam(ConfigParam):
def __str__(self):
return '%s (cpu, gpu*, opencl*, cuda*) ' % (self.fullname,)
AddConfigVar('device',
("Default device for computations. If gpu*, change the default to try "
"to move computation to it and to put shared variable of float32 "
"on it. Do not use upper case letters, only lower case even if "
"NVIDIA use capital letters."),
DeviceParam('cpu', allow_override=False),
in_c_key=False,
)
AddConfigVar(
'device',
("Default device for computations. If gpu*, change the default to try "
"to move computation to it and to put shared variable of float32 "
"on it. Do not use upper case letters, only lower case even if "
"NVIDIA use capital letters."),
DeviceParam('cpu', allow_override=False),
in_c_key=False,)
AddConfigVar('gpuarray.init_device',
"""
......@@ -99,28 +99,31 @@ AddConfigVar('gpuarray.init_device',
StrParam(''),
in_c_key=False)
AddConfigVar('init_gpu_device',
("Initialize the gpu device to use, works only if device=cpu. "
"Unlike 'device', setting this option will NOT move computations, "
"nor shared variables, to the specified GPU. "
"It can be used to run GPU-specific tests on a particular GPU."),
EnumStr('', 'gpu',
AddConfigVar(
'init_gpu_device',
("Initialize the gpu device to use, works only if device=cpu. "
"Unlike 'device', setting this option will NOT move computations, "
"nor shared variables, to the specified GPU. "
"It can be used to run GPU-specific tests on a particular GPU."),
EnumStr('', 'gpu',
'gpu0', 'gpu1', 'gpu2', 'gpu3',
'gpu4', 'gpu5', 'gpu6', 'gpu7',
'gpu8', 'gpu9', 'gpu10', 'gpu11',
'gpu12', 'gpu13', 'gpu14', 'gpu15',
allow_override=False),
in_c_key=False)
allow_override=False),
in_c_key=False)
AddConfigVar('force_device',
"Raise an error if we can't use the specified device",
BoolParam(False, allow_override=False),
in_c_key=False)
AddConfigVar(
'force_device',
"Raise an error if we can't use the specified device",
BoolParam(False, allow_override=False),
in_c_key=False)
AddConfigVar('print_active_device',
"Print active device at when the GPU device is initialized.",
BoolParam(True, allow_override=False),
in_c_key=False)
AddConfigVar(
'print_active_device',
"Print active device at when the GPU device is initialized.",
BoolParam(True, allow_override=False),
in_c_key=False)
# Do not add FAST_RUN_NOGC to this list (nor any other ALL CAPS shortcut).
......@@ -129,11 +132,12 @@ AddConfigVar('print_active_device',
# scalable.
# Also, please be careful not to modify the first item in the enum when adding
# new modes, since it is the default mode.
AddConfigVar('mode',
"Default compilation mode",
EnumStr('Mode', 'ProfileMode', 'DebugMode', 'FAST_RUN',
'FAST_COMPILE', 'PROFILE_MODE', 'DEBUG_MODE'),
in_c_key=False)
AddConfigVar(
'mode',
"Default compilation mode",
EnumStr('Mode', 'ProfileMode', 'DebugMode', 'FAST_RUN',
'FAST_COMPILE', 'PROFILE_MODE', 'DEBUG_MODE'),
in_c_key=False)
param = "g++"
......@@ -209,22 +213,24 @@ AddConfigVar('allow_gc',
in_c_key=False)
# Keep the default optimizer the same as the one for the mode FAST_RUN
AddConfigVar('optimizer',
("Default optimizer. If not None, will use this linker with the Mode "
"object (not ProfileMode(deprecated) or DebugMode)"),
EnumStr('fast_run', 'merge', 'fast_compile', 'None'),
in_c_key=False)
AddConfigVar(
'optimizer',
("Default optimizer. If not None, will use this linker with the Mode "
"object (not ProfileMode(deprecated) or DebugMode)"),
EnumStr('fast_run', 'merge', 'fast_compile', 'None'),
in_c_key=False)
AddConfigVar('optimizer_verbose',
"If True, we print all optimization being applied",
BoolParam(False),
in_c_key=False)
AddConfigVar('on_opt_error',
("What to do when an optimization crashes: warn and skip it, raise "
"the exception, or fall into the pdb debugger."),
EnumStr('warn', 'raise', 'pdb'),
in_c_key=False)
AddConfigVar(
'on_opt_error',
("What to do when an optimization crashes: warn and skip it, raise "
"the exception, or fall into the pdb debugger."),
EnumStr('warn', 'raise', 'pdb'),
in_c_key=False)
def safe_no_home(home):
......@@ -239,23 +245,25 @@ def safe_no_home(home):
"""
if home:
raise RuntimeError(
'The `config.home` option has been removed and should not be '
'used anymore. Please set the `config.base_compiledir` option '
'instead (for instance to: %s)' %
os.path.join(home, '.theano'))
'The `config.home` option has been removed and should not be '
'used anymore. Please set the `config.base_compiledir` option '
'instead (for instance to: %s)' %
os.path.join(home, '.theano'))
return True
AddConfigVar('home',
"This config option was removed in 0.5: do not use it!",
ConfigParam('', allow_override=False, filter=safe_no_home),
in_c_key=False)
AddConfigVar(
'home',
"This config option was removed in 0.5: do not use it!",
ConfigParam('', allow_override=False, filter=safe_no_home),
in_c_key=False)
AddConfigVar('nocleanup',
"Suppress the deletion of code files that did not compile cleanly",
BoolParam(False),
in_c_key=False)
AddConfigVar(
'nocleanup',
"Suppress the deletion of code files that did not compile cleanly",
BoolParam(False),
in_c_key=False)
AddConfigVar('on_unused_input',
"What to do if a variable in the 'inputs' list of "
......@@ -267,40 +275,46 @@ AddConfigVar('on_unused_input',
# So changing it after import will not modify these global variables.
# This could be done differently... but for now we simply prevent it from being
# changed at runtime.
AddConfigVar('tensor.cmp_sloppy',
"Relax tensor._allclose (0) not at all, (1) a bit, (2) more",
IntParam(0, lambda i: i in (0, 1, 2), allow_override=False),
in_c_key=False)
AddConfigVar('tensor.local_elemwise_fusion',
("Enable or not in fast_run mode(fast_run optimization) the elemwise "
"fusion optimization"),
BoolParam(True),
in_c_key=False)
AddConfigVar('gpu.local_elemwise_fusion',
("Enable or not in fast_run mode(fast_run optimization) the gpu "
"elemwise fusion optimization"),
BoolParam(True),
in_c_key=False)
AddConfigVar(
'tensor.cmp_sloppy',
"Relax tensor._allclose (0) not at all, (1) a bit, (2) more",
IntParam(0, lambda i: i in (0, 1, 2), allow_override=False),
in_c_key=False)
AddConfigVar(
'tensor.local_elemwise_fusion',
("Enable or not in fast_run mode(fast_run optimization) the elemwise "
"fusion optimization"),
BoolParam(True),
in_c_key=False)
AddConfigVar(
'gpu.local_elemwise_fusion',
("Enable or not in fast_run mode(fast_run optimization) the gpu "
"elemwise fusion optimization"),
BoolParam(True),
in_c_key=False)
# http://developer.amd.com/CPU/LIBRARIES/LIBM/Pages/default.aspx
AddConfigVar('lib.amdlibm',
"Use amd's amdlibm numerical library",
BoolParam(False))
AddConfigVar('gpuelemwise.sync',
"when true, wait that the gpu fct finished and check it error code.",
BoolParam(True),
in_c_key=False)
AddConfigVar('traceback.limit',
"The number of stack to trace. -1 mean all.",
# We default to 6 to be able to know where v1 + v2 is created in the
# user script. The bigger this number is, the more run time it takes.
# We need to default to 7 to support theano.tensor.tensor(...).
IntParam(7),
in_c_key=False)
AddConfigVar(
'lib.amdlibm',
"Use amd's amdlibm numerical library",
BoolParam(False))
AddConfigVar(
'gpuelemwise.sync',
"when true, wait that the gpu fct finished and check it error code.",
BoolParam(True),
in_c_key=False)
AddConfigVar(
'traceback.limit',
"The number of stack to trace. -1 mean all.",
# We default to 6 to be able to know where v1 + v2 is created in the
# user script. The bigger this number is, the more run time it takes.
# We need to default to 7 to support theano.tensor.tensor(...).
IntParam(7),
in_c_key=False)
AddConfigVar('experimental.mrg',
"Another random number generator that work on the gpu",
......@@ -329,14 +343,14 @@ AddConfigVar('numpy.seterr_all',
"by the following flags: seterr_divide, seterr_over, "
"seterr_under and seterr_invalid."),
EnumStr('ignore', 'warn', 'raise', 'call', 'print', 'log', 'None',
allow_override=False),
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_divide',
("Sets numpy's behavior for division by zero, see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_over',
......@@ -344,7 +358,7 @@ AddConfigVar('numpy.seterr_over',
"see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_under',
......@@ -352,7 +366,7 @@ AddConfigVar('numpy.seterr_under',
"see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_invalid',
......@@ -360,7 +374,7 @@ AddConfigVar('numpy.seterr_invalid',
"see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
allow_override=False),
in_c_key=False)
###
......@@ -422,30 +436,33 @@ AddConfigVar('warn.sum_div_dimshuffle_bug',
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar('warn.subtensor_merge_bug',
"Warn if previous versions of Theano (before 0.5rc2) could have given "
"incorrect results when indexing into a subtensor with negative "
"stride (for instance, for instance, x[a:b:-1][c]).",
BoolParam(warn_default('0.5')),
in_c_key=False)
AddConfigVar('warn.gpu_set_subtensor1',
"Warn if previous versions of Theano (before 0.6) could have given "
"incorrect results when moving to the gpu "
"set_subtensor(x[int vector], new_value)",
BoolParam(warn_default('0.6')),
in_c_key=False)
AddConfigVar('warn.vm_gc_bug',
"There was a bug that existed in the default Theano configuration,"
" only in the development version between July 5th 2012"
" and July 30th 2012. This was not in a released version."
" If your code was affected by this bug, a warning"
" will be printed during the code execution if you use the"
" `linker=vm,vm.lazy=True,warn.vm_gc_bug=True` Theano flags."
" This warning is disabled by default as the bug was not released.",
BoolParam(False),
in_c_key=False)
AddConfigVar(
'warn.subtensor_merge_bug',
"Warn if previous versions of Theano (before 0.5rc2) could have given "
"incorrect results when indexing into a subtensor with negative "
"stride (for instance, for instance, x[a:b:-1][c]).",
BoolParam(warn_default('0.5')),
in_c_key=False)
AddConfigVar(
'warn.gpu_set_subtensor1',
"Warn if previous versions of Theano (before 0.6) could have given "
"incorrect results when moving to the gpu "
"set_subtensor(x[int vector], new_value)",
BoolParam(warn_default('0.6')),
in_c_key=False)
AddConfigVar(
'warn.vm_gc_bug',
"There was a bug that existed in the default Theano configuration,"
" only in the development version between July 5th 2012"
" and July 30th 2012. This was not in a released version."
" If your code was affected by this bug, a warning"
" will be printed during the code execution if you use the"
" `linker=vm,vm.lazy=True,warn.vm_gc_bug=True` Theano flags."
" This warning is disabled by default as the bug was not released.",
BoolParam(False),
in_c_key=False)
AddConfigVar('warn.signal_conv2d_interface',
("Warn we use the new signal.conv2d() when its interface"
......@@ -474,13 +491,14 @@ AddConfigVar('warn.inc_set_subtensor1',
BoolParam(warn_default('0.7')),
in_c_key=False)
AddConfigVar('compute_test_value',
("If 'True', Theano will run each op at graph build time, using "
"Constants, SharedVariables and the tag 'test_value' as inputs "
"to the function. This helps the user track down problems in the "
"graph before it gets optimized."),
EnumStr('off', 'ignore', 'warn', 'raise', 'pdb'),
in_c_key=False)
AddConfigVar(
'compute_test_value',
("If 'True', Theano will run each op at graph build time, using "
"Constants, SharedVariables and the tag 'test_value' as inputs "
"to the function. This helps the user track down problems in the "
"graph before it gets optimized."),
EnumStr('off', 'ignore', 'warn', 'raise', 'pdb'),
in_c_key=False)
AddConfigVar('compute_test_value_opt',
......@@ -497,10 +515,11 @@ AddConfigVar('unpickle_function',
BoolParam(True),
in_c_key=False)
AddConfigVar('reoptimize_unpickled_function',
"Re-optimize the graph when a theano function is unpickled from the disk.",
BoolParam(True, allow_override=True),
in_c_key=False)
AddConfigVar(
'reoptimize_unpickled_function',
"Re-optimize the graph when a theano function is unpickled from the disk.",
BoolParam(True, allow_override=True),
in_c_key=False)
"""Note to developers:
......@@ -509,17 +528,18 @@ AddConfigVar('reoptimize_unpickled_function',
== 'high', you should include a call to printing.min_informative_str
on all important apply nodes.
"""
AddConfigVar('exception_verbosity',
"If 'low', the text of exceptions will generally refer " \
+ "to apply nodes with short names such as " \
+ "Elemwise{add_no_inplace}. If 'high', some exceptions " \
+ "will also refer to apply nodes with long descriptions " \
+ """ like:
A. Elemwise{add_no_inplace}
B. log_likelihood_v_given_h
C. log_likelihood_h""",
EnumStr('low', 'high'),
in_c_key=False)
AddConfigVar(
'exception_verbosity',
"If 'low', the text of exceptions will generally refer "
+ "to apply nodes with short names such as "
+ "Elemwise{add_no_inplace}. If 'high', some exceptions "
+ "will also refer to apply nodes with long descriptions "
+ """ like:
A. Elemwise{add_no_inplace}
B. log_likelihood_v_given_h
C. log_likelihood_h""",
EnumStr('low', 'high'),
in_c_key=False)
# Test if the env variable is set
var = os.getenv('OMP_NUM_THREADS', None)
......@@ -560,7 +580,7 @@ AddConfigVar('openmp',
" If it is set to 1, we disable openmp in Theano by default.",
BoolParam(default_openmp),
in_c_key=False,
)
)
AddConfigVar('openmp_elemwise_minsize',
"If OpenMP is enabled, this is the minimum size of vectors "
......@@ -568,19 +588,21 @@ AddConfigVar('openmp_elemwise_minsize',
"in element wise ops.",
IntParam(200000),
in_c_key=False,
)
AddConfigVar('check_input',
"Specify if types should check their input in their C code. "
"It can be used to speed up compilation, reduce overhead "
"(particularly for scalars) and reduce the number of generated C "
"files.",
BoolParam(True))
AddConfigVar('cache_optimizations',
"WARNING: work in progress, does not work yet. "
"Specify if the optimization cache should be used. This cache will "
"any optimized graph and its optimization. Actually slow downs a lot "
"the first optimization, and could possibly still contains some bugs. "
"Use at your own risks.",
BoolParam(False))
)
AddConfigVar(
'check_input',
"Specify if types should check their input in their C code. "
"It can be used to speed up compilation, reduce overhead "
"(particularly for scalars) and reduce the number of generated C "
"files.",
BoolParam(True))
AddConfigVar(
'cache_optimizations',
"WARNING: work in progress, does not work yet. "
"Specify if the optimization cache should be used. This cache will "
"any optimized graph and its optimization. Actually slow downs a lot "
"the first optimization, and could possibly still contains some bugs. "
"Use at your own risks.",
BoolParam(False))
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论