提交 2352fcec authored 作者: Brandon T. Willard's avatar Brandon T. Willard

Update and refactor logger warnings

上级 53486aae
...@@ -255,7 +255,7 @@ class InferShapeTester: ...@@ -255,7 +255,7 @@ class InferShapeTester:
else: else:
shp = inp.shape shp = inp.shape
if len(set(shp)) != len(shp): if len(set(shp)) != len(shp):
_logger.warn( _logger.warning(
"While testing shape inference for %r, we received an" "While testing shape inference for %r, we received an"
" input with a shape that has some repeated values: %r" " input with a shape that has some repeated values: %r"
", like a square matrix. This makes it impossible to" ", like a square matrix. This makes it impossible to"
......
...@@ -1437,7 +1437,7 @@ def _check_preallocated_output( ...@@ -1437,7 +1437,7 @@ def _check_preallocated_output(
fn_attr_name = ops_with_inner_function[type(node.op)] fn_attr_name = ops_with_inner_function[type(node.op)]
fn = getattr(node.op, fn_attr_name, None) fn = getattr(node.op, fn_attr_name, None)
if not fn or not hasattr(fn, "maker") or not hasattr(fn.maker, "mode"): if not fn or not hasattr(fn, "maker") or not hasattr(fn.maker, "mode"):
_logger.warn( _logger.warning(
"Expected theano function not found in %s.%s", node.op, fn_attr_name "Expected theano function not found in %s.%s", node.op, fn_attr_name
) )
else: else:
...@@ -1482,7 +1482,7 @@ def _check_preallocated_output( ...@@ -1482,7 +1482,7 @@ def _check_preallocated_output(
if not out_map: if not out_map:
# Map is empty, there is no need to execute thunk() again # Map is empty, there is no need to execute thunk() again
_logger.warn("%s: out_map is empty", name) _logger.warning("%s: out_map is empty", name)
continue continue
# Copy the inputs over, if they were marked as destroyed or viewed # Copy the inputs over, if they were marked as destroyed or viewed
...@@ -1904,7 +1904,7 @@ class _Linker(gof.link.LocalLinker): ...@@ -1904,7 +1904,7 @@ class _Linker(gof.link.LocalLinker):
thunks_py.append(None) thunks_py.append(None)
if not self.maker.mode.check_c_code and thunks_py[-1] is None: if not self.maker.mode.check_c_code and thunks_py[-1] is None:
_logger.warn( _logger.warning(
"Op %s doesn't have a perform, " "Op %s doesn't have a perform, "
"forcing check of the C code" % node.op "forcing check of the C code" % node.op
) )
...@@ -1921,7 +1921,7 @@ class _Linker(gof.link.LocalLinker): ...@@ -1921,7 +1921,7 @@ class _Linker(gof.link.LocalLinker):
elif thunks_c[-1] is None: elif thunks_c[-1] is None:
thunks_c[-1] = thunk_other thunks_c[-1] = thunk_other
else: else:
_logger.warn( _logger.warning(
"We won't check the perform function " "We won't check the perform function "
"of node '%s' but we will check its " "of node '%s' but we will check its "
"make_thunk function" % node "make_thunk function" % node
......
...@@ -2055,7 +2055,7 @@ class GCC_compiler(Compiler): ...@@ -2055,7 +2055,7 @@ class GCC_compiler(Compiler):
and "clang-omp++" not in theano.config.cxx and "clang-omp++" not in theano.config.cxx
and "icpc" not in theano.config.cxx and "icpc" not in theano.config.cxx
): ):
_logger.warn( _logger.warning(
"OPTIMIZATION WARNING: your Theano flag `cxx` seems not to be" "OPTIMIZATION WARNING: your Theano flag `cxx` seems not to be"
" the g++ compiler. So we disable the compiler optimization" " the g++ compiler. So we disable the compiler optimization"
" specific to g++ that tell to compile for a specific CPU." " specific to g++ that tell to compile for a specific CPU."
...@@ -2124,7 +2124,7 @@ class GCC_compiler(Compiler): ...@@ -2124,7 +2124,7 @@ class GCC_compiler(Compiler):
) )
else: else:
reported_lines = native_lines reported_lines = native_lines
_logger.warn( _logger.warning(
"OPTIMIZATION WARNING: Theano was not able to find the" "OPTIMIZATION WARNING: Theano was not able to find the"
" g++ parameters that tune the compilation to your " " g++ parameters that tune the compilation to your "
" specific CPU. This can slow down the execution of Theano" " specific CPU. This can slow down the execution of Theano"
...@@ -2137,7 +2137,7 @@ class GCC_compiler(Compiler): ...@@ -2137,7 +2137,7 @@ class GCC_compiler(Compiler):
default_lines = get_lines("%s -E -v -" % theano.config.cxx) default_lines = get_lines("%s -E -v -" % theano.config.cxx)
_logger.info("g++ default lines: %s", default_lines) _logger.info("g++ default lines: %s", default_lines)
if len(default_lines) < 1: if len(default_lines) < 1:
_logger.warn( _logger.warning(
"OPTIMIZATION WARNING: Theano was not able to find the" "OPTIMIZATION WARNING: Theano was not able to find the"
" default g++ parameters. This is needed to tune" " default g++ parameters. This is needed to tune"
" the compilation to your specific" " the compilation to your specific"
......
...@@ -349,7 +349,7 @@ def refresh_lock(lock_file): ...@@ -349,7 +349,7 @@ def refresh_lock(lock_file):
# This way, only 1 test would fail. # This way, only 1 test would fail.
while get_lock.n_lock > 0: while get_lock.n_lock > 0:
release_lock() release_lock()
_logger.warn( _logger.warning(
"Refreshing lock failed, we release the" "Refreshing lock failed, we release the"
" lock before raising again the exception" " lock before raising again the exception"
) )
......
...@@ -924,7 +924,7 @@ class VM_Linker(link.LocalLinker): ...@@ -924,7 +924,7 @@ class VM_Linker(link.LocalLinker):
if self.use_cloop and ( if self.use_cloop and (
self.callback is not None or self.callback_input is not None self.callback is not None or self.callback_input is not None
): ):
logger.warn("CVM does not support callback, using Stack VM.") logger.warning("CVM does not support callback, using Stack VM.")
if self.use_cloop and config.profile_memory: if self.use_cloop and config.profile_memory:
warnings.warn("CVM does not support memory profile, using Stack VM.") warnings.warn("CVM does not support memory profile, using Stack VM.")
if not self.use_cloop and self.allow_partial_eval: if not self.use_cloop and self.allow_partial_eval:
......
...@@ -862,7 +862,7 @@ def pydotprint( ...@@ -862,7 +862,7 @@ def pydotprint(
): ):
cond = node cond = node
if cond is None: if cond is None:
_logger.warn( _logger.warning(
"pydotprint: cond_highlight is set but there is no" "pydotprint: cond_highlight is set but there is no"
" IfElse node in the graph" " IfElse node in the graph"
) )
......
...@@ -559,7 +559,7 @@ class ConvOp(OpenMPOp): ...@@ -559,7 +559,7 @@ class ConvOp(OpenMPOp):
" bsize(%i). We revert it to %i. This" " bsize(%i). We revert it to %i. This"
" won't change the result, but may make it slower." " won't change the result, but may make it slower."
) )
_logger.warn(warnstr, self.unroll_batch, self.bsize, new) _logger.warning(warnstr, self.unroll_batch, self.bsize, new)
self.unroll_batch = new self.unroll_batch = new
...@@ -585,7 +585,7 @@ class ConvOp(OpenMPOp): ...@@ -585,7 +585,7 @@ class ConvOp(OpenMPOp):
" nkern(%i). We revert it to %i. This" " nkern(%i). We revert it to %i. This"
" won't change the result, but may make it slower." " won't change the result, but may make it slower."
) )
_logger.warn(warnstr, self.unroll_kern, self.nkern, new) _logger.warning(warnstr, self.unroll_kern, self.nkern, new)
self.unroll_kern = new self.unroll_kern = new
self.outshp = get_conv_output_shape( self.outshp = get_conv_output_shape(
......
...@@ -3251,7 +3251,7 @@ def merge_two_slices(slice1, len1, slice2, len2): ...@@ -3251,7 +3251,7 @@ def merge_two_slices(slice1, len1, slice2, len2):
# sl.stop backwards # sl.stop backwards
n_val = sl1.stop - 1 - sl2 * sl1.step n_val = sl1.stop - 1 - sl2 * sl1.step
if config.warn.subtensor_merge_bug: if config.warn.subtensor_merge_bug:
warnings.warn( warnings.warning(
( (
"Your current code is fine, but Theano versions " "Your current code is fine, but Theano versions "
"prior to 0.5rc2 might have given an incorrect result. " "prior to 0.5rc2 might have given an incorrect result. "
...@@ -3843,7 +3843,7 @@ def local_adv_sub1_adv_inc_sub1(node): ...@@ -3843,7 +3843,7 @@ def local_adv_sub1_adv_inc_sub1(node):
if not inp.owner.op.set_instead_of_inc: if not inp.owner.op.set_instead_of_inc:
if config.warn.inc_subtensor1_opt: if config.warn.inc_subtensor1_opt:
warnings.warn( warnings.warning(
"Your current code is fine, but Theano versions " "Your current code is fine, but Theano versions "
"between 0.7rc1 and 0.10 (or development versions " "between 0.7rc1 and 0.10 (or development versions "
"between Nov. 2014 and May 2017) " "between Nov. 2014 and May 2017) "
...@@ -5851,7 +5851,7 @@ def local_sum_prod_div_dimshuffle(node): ...@@ -5851,7 +5851,7 @@ def local_sum_prod_div_dimshuffle(node):
break break
if compatible_dims: if compatible_dims:
_logger.warn( _logger.warning(
"WARNING: Your current code is fine, but" "WARNING: Your current code is fine, but"
" Theano versions between " " Theano versions between "
"rev. 3bd9b789f5e8 (2010-06-16) and" "rev. 3bd9b789f5e8 (2010-06-16) and"
...@@ -5906,7 +5906,7 @@ def local_sum_prod_div_dimshuffle(node): ...@@ -5906,7 +5906,7 @@ def local_sum_prod_div_dimshuffle(node):
if config.warn.sum_div_dimshuffle_bug and isinstance( if config.warn.sum_div_dimshuffle_bug and isinstance(
node.op, T.Sum node.op, T.Sum
): ):
_logger.warn( _logger.warning(
"WARNING: Your current code is fine," "WARNING: Your current code is fine,"
" but Theano versions between " " but Theano versions between "
"rev. 3bd9b789f5e8 (2010-06-16) and" "rev. 3bd9b789f5e8 (2010-06-16) and"
...@@ -6016,7 +6016,7 @@ def local_op_of_op(node): ...@@ -6016,7 +6016,7 @@ def local_op_of_op(node):
and newaxis != newaxis_old and newaxis != newaxis_old
and len(newaxis) == len(newaxis_old) and len(newaxis) == len(newaxis_old)
): ):
_logger.warn( _logger.warning(
"WARNING (YOUR CURRENT CODE IS FINE): Theano " "WARNING (YOUR CURRENT CODE IS FINE): Theano "
"versions between version 9923a40c7b7a and August " "versions between version 9923a40c7b7a and August "
"2nd, 2010 generated bugged code in this case. " "2nd, 2010 generated bugged code in this case. "
...@@ -6102,7 +6102,7 @@ def local_reduce_join(node): ...@@ -6102,7 +6102,7 @@ def local_reduce_join(node):
# I put this warning late to don't add extra warning. # I put this warning late to don't add extra warning.
if len(reduce_axis) != 1 or 0 not in reduce_axis: if len(reduce_axis) != 1 or 0 not in reduce_axis:
if theano.config.warn.reduce_join: if theano.config.warn.reduce_join:
warnings.warn( warnings.warning(
( (
"Your current code is fine, but Theano versions " "Your current code is fine, but Theano versions "
"prior to 0.7 (or this development version Sept 2014) " "prior to 0.7 (or this development version Sept 2014) "
...@@ -7691,7 +7691,6 @@ def local_elemwise_fusion_op(OP, max_input_fct=lambda node: 32, maker=None): ...@@ -7691,7 +7691,6 @@ def local_elemwise_fusion_op(OP, max_input_fct=lambda node: 32, maker=None):
for i in node.inputs: for i in node.inputs:
do_fusion = False do_fusion = False
catch = False
# Will store inputs of the fused node that are not currently inputs # Will store inputs of the fused node that are not currently inputs
# of the node we want to create (to avoid duplicating inputs). # of the node we want to create (to avoid duplicating inputs).
tmp_input = [] tmp_input = []
...@@ -7712,7 +7711,6 @@ def local_elemwise_fusion_op(OP, max_input_fct=lambda node: 32, maker=None): ...@@ -7712,7 +7711,6 @@ def local_elemwise_fusion_op(OP, max_input_fct=lambda node: 32, maker=None):
# computation due to broadcast. # computation due to broadcast.
i.owner.outputs[0].broadcastable == node.outputs[0].broadcastable i.owner.outputs[0].broadcastable == node.outputs[0].broadcastable
): ):
do_fusion = True
try: try:
tmp_s_input = [] tmp_s_input = []
# we should not put duplicate input into s_inputs and inputs # we should not put duplicate input into s_inputs and inputs
...@@ -7746,12 +7744,11 @@ def local_elemwise_fusion_op(OP, max_input_fct=lambda node: 32, maker=None): ...@@ -7746,12 +7744,11 @@ def local_elemwise_fusion_op(OP, max_input_fct=lambda node: 32, maker=None):
["z" for z in i.owner.outputs], ["z" for z in i.owner.outputs],
{"fail": "%(fail)s"}, {"fail": "%(fail)s"},
) )
except MethodNotDefined:
catch = True do_fusion = True
except NotImplementedError:
catch = True except (NotImplementedError, MethodNotDefined):
if catch: _logger.warning(
_logger.info(
( (
"%s does not implement the c_code function." "%s does not implement the c_code function."
" As well as being potentially slow, this" " As well as being potentially slow, this"
...@@ -7819,8 +7816,8 @@ your code will run correctly, but may be slower.""" ...@@ -7819,8 +7816,8 @@ your code will run correctly, but may be slower."""
["z" for x in s_new_out], ["z" for x in s_new_out],
{"fail": "%(fail)s"}, {"fail": "%(fail)s"},
) )
except MethodNotDefined: except (NotImplementedError, MethodNotDefined):
_logger.info( _logger.warning(
( (
"%s does not implement the c_code function." "%s does not implement the c_code function."
" As well as being potentially slow, this disables " " As well as being potentially slow, this disables "
...@@ -7828,17 +7825,6 @@ your code will run correctly, but may be slower.""" ...@@ -7828,17 +7825,6 @@ your code will run correctly, but may be slower."""
) )
% str(s_new_out[0].owner.op) % str(s_new_out[0].owner.op)
) )
return False
except NotImplementedError:
_logger.info(
(
"%s does not implement the c_code function. As well"
" as being potentially slow, this disables loop"
" fusion of this op."
)
% str(s_new_out[0].owner.op)
)
return False
# create the composite op. # create the composite op.
C = scalar.Composite(s_inputs, s_new_out) C = scalar.Composite(s_inputs, s_new_out)
...@@ -7850,7 +7836,7 @@ your code will run correctly, but may be slower.""" ...@@ -7850,7 +7836,7 @@ your code will run correctly, but may be slower."""
assert node.outputs[0].dtype == n.outputs[0].dtype assert node.outputs[0].dtype == n.outputs[0].dtype
if len(n.inputs) > max_nb_input: if len(n.inputs) > max_nb_input:
_logger.info( _logger.warning(
"loop fusion failed because Op would exceed" " kernel argument limit." "loop fusion failed because Op would exceed" " kernel argument limit."
) )
return False return False
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论