提交 2352fcec authored 作者: Brandon T. Willard's avatar Brandon T. Willard

Update and refactor logger warnings

上级 53486aae
......@@ -255,7 +255,7 @@ class InferShapeTester:
else:
shp = inp.shape
if len(set(shp)) != len(shp):
_logger.warn(
_logger.warning(
"While testing shape inference for %r, we received an"
" input with a shape that has some repeated values: %r"
", like a square matrix. This makes it impossible to"
......
......@@ -1437,7 +1437,7 @@ def _check_preallocated_output(
fn_attr_name = ops_with_inner_function[type(node.op)]
fn = getattr(node.op, fn_attr_name, None)
if not fn or not hasattr(fn, "maker") or not hasattr(fn.maker, "mode"):
_logger.warn(
_logger.warning(
"Expected theano function not found in %s.%s", node.op, fn_attr_name
)
else:
......@@ -1482,7 +1482,7 @@ def _check_preallocated_output(
if not out_map:
# Map is empty, there is no need to execute thunk() again
_logger.warn("%s: out_map is empty", name)
_logger.warning("%s: out_map is empty", name)
continue
# Copy the inputs over, if they were marked as destroyed or viewed
......@@ -1904,7 +1904,7 @@ class _Linker(gof.link.LocalLinker):
thunks_py.append(None)
if not self.maker.mode.check_c_code and thunks_py[-1] is None:
_logger.warn(
_logger.warning(
"Op %s doesn't have a perform, "
"forcing check of the C code" % node.op
)
......@@ -1921,7 +1921,7 @@ class _Linker(gof.link.LocalLinker):
elif thunks_c[-1] is None:
thunks_c[-1] = thunk_other
else:
_logger.warn(
_logger.warning(
"We won't check the perform function "
"of node '%s' but we will check its "
"make_thunk function" % node
......
......@@ -2055,7 +2055,7 @@ class GCC_compiler(Compiler):
and "clang-omp++" not in theano.config.cxx
and "icpc" not in theano.config.cxx
):
_logger.warn(
_logger.warning(
"OPTIMIZATION WARNING: your Theano flag `cxx` seems not to be"
" the g++ compiler. So we disable the compiler optimization"
" specific to g++ that tell to compile for a specific CPU."
......@@ -2124,7 +2124,7 @@ class GCC_compiler(Compiler):
)
else:
reported_lines = native_lines
_logger.warn(
_logger.warning(
"OPTIMIZATION WARNING: Theano was not able to find the"
" g++ parameters that tune the compilation to your "
" specific CPU. This can slow down the execution of Theano"
......@@ -2137,7 +2137,7 @@ class GCC_compiler(Compiler):
default_lines = get_lines("%s -E -v -" % theano.config.cxx)
_logger.info("g++ default lines: %s", default_lines)
if len(default_lines) < 1:
_logger.warn(
_logger.warning(
"OPTIMIZATION WARNING: Theano was not able to find the"
" default g++ parameters. This is needed to tune"
" the compilation to your specific"
......
......@@ -349,7 +349,7 @@ def refresh_lock(lock_file):
# This way, only 1 test would fail.
while get_lock.n_lock > 0:
release_lock()
_logger.warn(
_logger.warning(
"Refreshing lock failed, we release the"
" lock before raising again the exception"
)
......
......@@ -924,7 +924,7 @@ class VM_Linker(link.LocalLinker):
if self.use_cloop and (
self.callback is not None or self.callback_input is not None
):
logger.warn("CVM does not support callback, using Stack VM.")
logger.warning("CVM does not support callback, using Stack VM.")
if self.use_cloop and config.profile_memory:
warnings.warn("CVM does not support memory profile, using Stack VM.")
if not self.use_cloop and self.allow_partial_eval:
......
......@@ -862,7 +862,7 @@ def pydotprint(
):
cond = node
if cond is None:
_logger.warn(
_logger.warning(
"pydotprint: cond_highlight is set but there is no"
" IfElse node in the graph"
)
......
......@@ -559,7 +559,7 @@ class ConvOp(OpenMPOp):
" bsize(%i). We revert it to %i. This"
" won't change the result, but may make it slower."
)
_logger.warn(warnstr, self.unroll_batch, self.bsize, new)
_logger.warning(warnstr, self.unroll_batch, self.bsize, new)
self.unroll_batch = new
......@@ -585,7 +585,7 @@ class ConvOp(OpenMPOp):
" nkern(%i). We revert it to %i. This"
" won't change the result, but may make it slower."
)
_logger.warn(warnstr, self.unroll_kern, self.nkern, new)
_logger.warning(warnstr, self.unroll_kern, self.nkern, new)
self.unroll_kern = new
self.outshp = get_conv_output_shape(
......
......@@ -3251,7 +3251,7 @@ def merge_two_slices(slice1, len1, slice2, len2):
# sl.stop backwards
n_val = sl1.stop - 1 - sl2 * sl1.step
if config.warn.subtensor_merge_bug:
warnings.warn(
warnings.warning(
(
"Your current code is fine, but Theano versions "
"prior to 0.5rc2 might have given an incorrect result. "
......@@ -3843,7 +3843,7 @@ def local_adv_sub1_adv_inc_sub1(node):
if not inp.owner.op.set_instead_of_inc:
if config.warn.inc_subtensor1_opt:
warnings.warn(
warnings.warning(
"Your current code is fine, but Theano versions "
"between 0.7rc1 and 0.10 (or development versions "
"between Nov. 2014 and May 2017) "
......@@ -5851,7 +5851,7 @@ def local_sum_prod_div_dimshuffle(node):
break
if compatible_dims:
_logger.warn(
_logger.warning(
"WARNING: Your current code is fine, but"
" Theano versions between "
"rev. 3bd9b789f5e8 (2010-06-16) and"
......@@ -5906,7 +5906,7 @@ def local_sum_prod_div_dimshuffle(node):
if config.warn.sum_div_dimshuffle_bug and isinstance(
node.op, T.Sum
):
_logger.warn(
_logger.warning(
"WARNING: Your current code is fine,"
" but Theano versions between "
"rev. 3bd9b789f5e8 (2010-06-16) and"
......@@ -6016,7 +6016,7 @@ def local_op_of_op(node):
and newaxis != newaxis_old
and len(newaxis) == len(newaxis_old)
):
_logger.warn(
_logger.warning(
"WARNING (YOUR CURRENT CODE IS FINE): Theano "
"versions between version 9923a40c7b7a and August "
"2nd, 2010 generated bugged code in this case. "
......@@ -6102,7 +6102,7 @@ def local_reduce_join(node):
# I put this warning late to don't add extra warning.
if len(reduce_axis) != 1 or 0 not in reduce_axis:
if theano.config.warn.reduce_join:
warnings.warn(
warnings.warning(
(
"Your current code is fine, but Theano versions "
"prior to 0.7 (or this development version Sept 2014) "
......@@ -7691,7 +7691,6 @@ def local_elemwise_fusion_op(OP, max_input_fct=lambda node: 32, maker=None):
for i in node.inputs:
do_fusion = False
catch = False
# Will store inputs of the fused node that are not currently inputs
# of the node we want to create (to avoid duplicating inputs).
tmp_input = []
......@@ -7712,7 +7711,6 @@ def local_elemwise_fusion_op(OP, max_input_fct=lambda node: 32, maker=None):
# computation due to broadcast.
i.owner.outputs[0].broadcastable == node.outputs[0].broadcastable
):
do_fusion = True
try:
tmp_s_input = []
# we should not put duplicate input into s_inputs and inputs
......@@ -7746,12 +7744,11 @@ def local_elemwise_fusion_op(OP, max_input_fct=lambda node: 32, maker=None):
["z" for z in i.owner.outputs],
{"fail": "%(fail)s"},
)
except MethodNotDefined:
catch = True
except NotImplementedError:
catch = True
if catch:
_logger.info(
do_fusion = True
except (NotImplementedError, MethodNotDefined):
_logger.warning(
(
"%s does not implement the c_code function."
" As well as being potentially slow, this"
......@@ -7819,8 +7816,8 @@ your code will run correctly, but may be slower."""
["z" for x in s_new_out],
{"fail": "%(fail)s"},
)
except MethodNotDefined:
_logger.info(
except (NotImplementedError, MethodNotDefined):
_logger.warning(
(
"%s does not implement the c_code function."
" As well as being potentially slow, this disables "
......@@ -7828,17 +7825,6 @@ your code will run correctly, but may be slower."""
)
% str(s_new_out[0].owner.op)
)
return False
except NotImplementedError:
_logger.info(
(
"%s does not implement the c_code function. As well"
" as being potentially slow, this disables loop"
" fusion of this op."
)
% str(s_new_out[0].owner.op)
)
return False
# create the composite op.
C = scalar.Composite(s_inputs, s_new_out)
......@@ -7850,7 +7836,7 @@ your code will run correctly, but may be slower."""
assert node.outputs[0].dtype == n.outputs[0].dtype
if len(n.inputs) > max_nb_input:
_logger.info(
_logger.warning(
"loop fusion failed because Op would exceed" " kernel argument limit."
)
return False
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论