提交 020fc625 authored 作者: Mohammad Pezeshki's avatar Mohammad Pezeshki

configdefaults.py in pep8

上级 f7a5312c
......@@ -24,7 +24,7 @@ def floatX_convert(s):
AddConfigVar('floatX',
"Default floating-point precision for python casts",
EnumStr('float64', 'float32', convert=floatX_convert,),
)
)
AddConfigVar('warn_float64',
"Do an action when a tensor variable with float64 dtype is"
......@@ -32,17 +32,17 @@ AddConfigVar('warn_float64',
" gpu back-end and are slow with gamer GPUs.",
EnumStr('ignore', 'warn', 'raise', 'pdb'),
in_c_key=False,
)
)
AddConfigVar('cast_policy',
"Rules for implicit type casting",
'Rules for implicit type casting',
EnumStr('custom', 'numpy+floatX',
# The 'numpy' policy was originally planned to provide a smooth
# transition from numpy. It was meant to behave the same as
# numpy+floatX, but keeping float64 when numpy would. However
# the current implementation of some cast mechanisms makes it
# a bit more complex to add than what was expected, so it is
# currently not available.
# The 'numpy' policy was originally planned to provide a
# smooth transition from numpy. It was meant to behave the
# same asnumpy+floatX, but keeping float64 when numpy
# would. However the current implementation of some cast
# mechanisms makes it a bit more complex to add than what
# was expected, so it is currently not available.
# numpy,
),
)
......@@ -82,14 +82,14 @@ class DeviceParam(ConfigParam):
def __str__(self):
return '%s (cpu, gpu*, opencl*, cuda*) ' % (self.fullname,)
AddConfigVar('device',
AddConfigVar(
'device',
("Default device for computations. If gpu*, change the default to try "
"to move computation to it and to put shared variable of float32 "
"on it. Do not use upper case letters, only lower case even if "
"NVIDIA use capital letters."),
DeviceParam('cpu', allow_override=False),
in_c_key=False,
)
in_c_key=False,)
AddConfigVar('gpuarray.init_device',
"""
......@@ -99,7 +99,8 @@ AddConfigVar('gpuarray.init_device',
StrParam(''),
in_c_key=False)
AddConfigVar('init_gpu_device',
AddConfigVar(
'init_gpu_device',
("Initialize the gpu device to use, works only if device=cpu. "
"Unlike 'device', setting this option will NOT move computations, "
"nor shared variables, to the specified GPU. "
......@@ -112,12 +113,14 @@ AddConfigVar('init_gpu_device',
allow_override=False),
in_c_key=False)
AddConfigVar('force_device',
AddConfigVar(
'force_device',
"Raise an error if we can't use the specified device",
BoolParam(False, allow_override=False),
in_c_key=False)
AddConfigVar('print_active_device',
AddConfigVar(
'print_active_device',
"Print active device at when the GPU device is initialized.",
BoolParam(True, allow_override=False),
in_c_key=False)
......@@ -129,7 +132,8 @@ AddConfigVar('print_active_device',
# scalable.
# Also, please be careful not to modify the first item in the enum when adding
# new modes, since it is the default mode.
AddConfigVar('mode',
AddConfigVar(
'mode',
"Default compilation mode",
EnumStr('Mode', 'ProfileMode', 'DebugMode', 'FAST_RUN',
'FAST_COMPILE', 'PROFILE_MODE', 'DEBUG_MODE'),
......@@ -209,7 +213,8 @@ AddConfigVar('allow_gc',
in_c_key=False)
# Keep the default optimizer the same as the one for the mode FAST_RUN
AddConfigVar('optimizer',
AddConfigVar(
'optimizer',
("Default optimizer. If not None, will use this linker with the Mode "
"object (not ProfileMode(deprecated) or DebugMode)"),
EnumStr('fast_run', 'merge', 'fast_compile', 'None'),
......@@ -220,7 +225,8 @@ AddConfigVar('optimizer_verbose',
BoolParam(False),
in_c_key=False)
AddConfigVar('on_opt_error',
AddConfigVar(
'on_opt_error',
("What to do when an optimization crashes: warn and skip it, raise "
"the exception, or fall into the pdb debugger."),
EnumStr('warn', 'raise', 'pdb'),
......@@ -246,13 +252,15 @@ def safe_no_home(home):
return True
AddConfigVar('home',
AddConfigVar(
'home',
"This config option was removed in 0.5: do not use it!",
ConfigParam('', allow_override=False, filter=safe_no_home),
in_c_key=False)
AddConfigVar('nocleanup',
AddConfigVar(
'nocleanup',
"Suppress the deletion of code files that did not compile cleanly",
BoolParam(False),
in_c_key=False)
......@@ -267,38 +275,44 @@ AddConfigVar('on_unused_input',
# So changing it after import will not modify these global variables.
# This could be done differently... but for now we simply prevent it from being
# changed at runtime.
AddConfigVar('tensor.cmp_sloppy',
AddConfigVar(
'tensor.cmp_sloppy',
"Relax tensor._allclose (0) not at all, (1) a bit, (2) more",
IntParam(0, lambda i: i in (0, 1, 2), allow_override=False),
in_c_key=False)
AddConfigVar('tensor.local_elemwise_fusion',
AddConfigVar(
'tensor.local_elemwise_fusion',
("Enable or not in fast_run mode(fast_run optimization) the elemwise "
"fusion optimization"),
BoolParam(True),
in_c_key=False)
AddConfigVar('gpu.local_elemwise_fusion',
AddConfigVar(
'gpu.local_elemwise_fusion',
("Enable or not in fast_run mode(fast_run optimization) the gpu "
"elemwise fusion optimization"),
BoolParam(True),
in_c_key=False)
# http://developer.amd.com/CPU/LIBRARIES/LIBM/Pages/default.aspx
AddConfigVar('lib.amdlibm',
AddConfigVar(
'lib.amdlibm',
"Use amd's amdlibm numerical library",
BoolParam(False))
AddConfigVar('gpuelemwise.sync',
AddConfigVar(
'gpuelemwise.sync',
"when true, wait that the gpu fct finished and check it error code.",
BoolParam(True),
in_c_key=False)
AddConfigVar('traceback.limit',
AddConfigVar(
'traceback.limit',
"The number of stack to trace. -1 mean all.",
# We default to 6 to be able to know where v1 + v2 is created in the
# user script. The bigger this number is, the more run time it takes.
# We need to default to 7 to support theano.tensor.tensor(...).
# We default to 6 to be able to know where v1 + v2 is created in the
# user script. The bigger this number is, the more run time it takes.
# We need to default to 7 to support theano.tensor.tensor(...).
IntParam(7),
in_c_key=False)
......@@ -422,21 +436,24 @@ AddConfigVar('warn.sum_div_dimshuffle_bug',
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar('warn.subtensor_merge_bug',
AddConfigVar(
'warn.subtensor_merge_bug',
"Warn if previous versions of Theano (before 0.5rc2) could have given "
"incorrect results when indexing into a subtensor with negative "
"stride (for instance, for instance, x[a:b:-1][c]).",
BoolParam(warn_default('0.5')),
in_c_key=False)
AddConfigVar('warn.gpu_set_subtensor1',
AddConfigVar(
'warn.gpu_set_subtensor1',
"Warn if previous versions of Theano (before 0.6) could have given "
"incorrect results when moving to the gpu "
"set_subtensor(x[int vector], new_value)",
BoolParam(warn_default('0.6')),
in_c_key=False)
AddConfigVar('warn.vm_gc_bug',
AddConfigVar(
'warn.vm_gc_bug',
"There was a bug that existed in the default Theano configuration,"
" only in the development version between July 5th 2012"
" and July 30th 2012. This was not in a released version."
......@@ -474,7 +491,8 @@ AddConfigVar('warn.inc_set_subtensor1',
BoolParam(warn_default('0.7')),
in_c_key=False)
AddConfigVar('compute_test_value',
AddConfigVar(
'compute_test_value',
("If 'True', Theano will run each op at graph build time, using "
"Constants, SharedVariables and the tag 'test_value' as inputs "
"to the function. This helps the user track down problems in the "
......@@ -497,7 +515,8 @@ AddConfigVar('unpickle_function',
BoolParam(True),
in_c_key=False)
AddConfigVar('reoptimize_unpickled_function',
AddConfigVar(
'reoptimize_unpickled_function',
"Re-optimize the graph when a theano function is unpickled from the disk.",
BoolParam(True, allow_override=True),
in_c_key=False)
......@@ -509,11 +528,12 @@ AddConfigVar('reoptimize_unpickled_function',
== 'high', you should include a call to printing.min_informative_str
on all important apply nodes.
"""
AddConfigVar('exception_verbosity',
"If 'low', the text of exceptions will generally refer " \
+ "to apply nodes with short names such as " \
+ "Elemwise{add_no_inplace}. If 'high', some exceptions " \
+ "will also refer to apply nodes with long descriptions " \
AddConfigVar(
'exception_verbosity',
"If 'low', the text of exceptions will generally refer "
+ "to apply nodes with short names such as "
+ "Elemwise{add_no_inplace}. If 'high', some exceptions "
+ "will also refer to apply nodes with long descriptions "
+ """ like:
A. Elemwise{add_no_inplace}
B. log_likelihood_v_given_h
......@@ -570,14 +590,16 @@ AddConfigVar('openmp_elemwise_minsize',
in_c_key=False,
)
AddConfigVar('check_input',
AddConfigVar(
'check_input',
"Specify if types should check their input in their C code. "
"It can be used to speed up compilation, reduce overhead "
"(particularly for scalars) and reduce the number of generated C "
"files.",
BoolParam(True))
AddConfigVar('cache_optimizations',
AddConfigVar(
'cache_optimizations',
"WARNING: work in progress, does not work yet. "
"Specify if the optimization cache should be used. This cache will "
"any optimized graph and its optimization. Actually slow downs a lot "
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论