提交 b2e9ea5d authored 作者: Pascal Lamblin's avatar Pascal Lamblin

More clean-up of logging code.

上级 ce0002f8
......@@ -16,10 +16,6 @@ from theano.compile import Function, debugmode
from theano.compile.profilemode import ProfileMode
_logger=logging.getLogger("theano.printing")
def _info(*msg):
_logger.info(' '.join(msg))
def _warn(*msg):
_logger.warn(' '.join(msg))
def debugprint(obj, depth=-1, print_type=False, file=None):
"""Print a computation graph to file
......@@ -476,7 +472,7 @@ def pydotprint(fct, outfile=None,
if node.op.__class__.__name__=='IfElse' and node.op.name == cond_highlight:
cond = node
if cond is None:
_warn("pydotprint: cond_highlight is set but there is no IfElse node in the graph")
_logger.warn("pydotprint: cond_highlight is set but there is no IfElse node in the graph")
cond_highlight = None
if cond_highlight is not None:
......
......@@ -933,7 +933,7 @@ def get_device_type_sizes():
del gpu_int_size
del t
except Exception, e:
_logger.warning(("OPTIMIZATION WARNING: "
_logger.warning(("Optimization Warning: "
"Got the following error, but we can ignore it. "
"This could cause less GpuElemwise fused together.\n"
"%s") % e)
......
......@@ -31,10 +31,6 @@ from elemwise import Elemwise, DimShuffle, CAReduce, Sum
import logging
_logger=logging.getLogger("theano.tensor.basic")
def _info(*msg):
_logger.info(' '.join(msg))
def _warn(*msg):
_logger.warn(' '.join(msg))
#This is needed as we will hide it later
python_complex=complex
......@@ -728,9 +724,9 @@ class TensorType(Type):
if not (a_missing.any() or (allow_remove_inf and a_inf.any())):
# There are no missing values in a, thus this is not the
# reason why numpy.allclose(a, b) returned False.
_info('numpy allclose failed for abs_err %f and rel_err %f' %(
numpy.max( abs(a-b)),
numpy.max( abs(a-b)/(abs(a)+abs(b)))))
_logger.info('numpy allclose failed for abs_err %f and rel_err %f',
numpy.max(abs(a-b)),
numpy.max(abs(a-b) / (abs(a) + abs(b))))
return False
# The following line is what numpy.allclose bases its decision
# upon, according to its documentation.
......
......@@ -31,10 +31,6 @@ except ImportError:
pass
_logger=logging.getLogger("theano.tensor.nnet.conv")
def _debug(*msg):
_logger.debug(' '.join([ str(x) for x in msg]))
def _warn(*msg):
_logger.warn(' '.join([ str(x) for x in msg]))
def conv2d(input, filters, image_shape=None, filter_shape=None,
......@@ -395,7 +391,7 @@ class ConvOp(Op):
warnstr = "OPTIMISATION WARNING: in ConvOp.__init__() unroll_batch(%i)"\
"must be 0 or a divisor of bsize(%i). We revert it to %i. This"\
" won't change the result, but may make it slower."
_warn(warnstr % (self.unroll_batch, self.bsize, new))
_logger.warn(warnstr, self.unroll_batch, self.bsize, new)
self.unroll_batch=new
......@@ -414,7 +410,7 @@ class ConvOp(Op):
warnstr = "OPTIMISATION WARNING: in ConvOp.__init__() unroll_kern(%i)"\
"should be 0 or a divisor of nkern(%i). We revert it to %i."\
"This won't change the result, but may make it slower."
_warn(warnstr % (self.unroll_kern, self.nkern, new))
_logger.warn(warnstr, self.unroll_kern, self.nkern, new)
self.unroll_kern=new
if all_shape:
......@@ -466,7 +462,11 @@ class ConvOp(Op):
self.unroll_kern=self.speed_unroll_batch_kern[time_unroll_batch_kern_idx][1]
self.unroll_patch = False
_debug("AUTO FIND VERSION OF C_CODE OF CONV OP",self.unroll_batch, self.unroll_kern, self.unroll_patch, self.bsize, self.nkern, time_unroll_patch, time_unroll_batch_kern)
_logger.debug("AUTO FIND VERSION OF C_CODE OF CONV OP "
"%s %s %s %s %s %s %s",
self.unroll_batch, self.unroll_kern, self.unroll_patch,
self.bsize, self.nkern, time_unroll_patch,
time_unroll_batch_kern)
self._rehash()
......@@ -764,7 +764,7 @@ class ConvOp(Op):
un_b = bsize
else:
un_b = 1
_warn("OPTIMISATION WARNING: in ConvOp.grad() we can't determine "\
_logger.warn("Optimization Warning: in ConvOp.grad() we can't determine "\
"a good unroll value for the batch. Maybe you can optimize this!")
if all_shape and un_k!=0 and nkern%un_k!=0:
......@@ -772,7 +772,7 @@ class ConvOp(Op):
un_k = nkern
else:
un_k = 1
_warn("OPTIMISATION WARNING: in ConvOp.grad() we can't determine "\
_logger.warn("Optimization Warning: in ConvOp.grad() we can't determine "\
"a good unroll value for the kernel. Maybe you can optimize this!")
dw = ConvOp(imshp, kshp, nkern, bsize, 1,1, output_mode='valid',
......@@ -983,18 +983,18 @@ using namespace std;
if self.imshp != self.imshp_logical or self.kshp != self.kshp_logical:
if self.verbose:
_debug("return imshp!=imshp_logical or self.kshp != self.kshp_logical shape version")
_logger.debug("return imshp!=imshp_logical or self.kshp != self.kshp_logical shape version")
return _conv_op_code_a % d
if self.unroll_patch:
if self.verbose:
_debug("return unroll patch version. all_shape=", all_shape)
_logger.debug("return unroll patch version. all_shape=%s", all_shape)
return _conv_op_code_unroll_patch%d
if self.unroll_batch>0 or self.unroll_kern>0:
assert self.unroll_batch>0
assert self.unroll_kern>0
if self.verbose:
_debug("return unrolled batch (%s) and kern code (%s)",
_logger.debug("return unrolled batch (%s) and kern code (%s)",
str(self.unroll_batch), str(self.unroll_kern))
return gen_conv_code_unroll_batch_kern(d, self.unroll_batch,
self.unroll_kern)
......@@ -1002,11 +1002,11 @@ using namespace std;
#TODO: should we choose the unroll size automatically with the bigger divisor under 5?
if self.out_mode == 'valid' and self.dx==0 and self.dy==0:
if self.verbose:
_debug("return gemm version")
_logger.debug("return gemm version")
return _conv_op_code_valid_gemm % d
else:
if self.verbose:
_debug("return no gemm version")
_logger.debug("return no gemm version")
return _conv_op_code_a % d
......
......@@ -14,10 +14,6 @@ from theano.tensor.nnet import conv
import logging
_logger=logging.getLogger("theano.tensor.signal.conv")
def _debug(*msg):
_logger.debug(' '.join(msg))
def _warn(*msg):
_logger.warn(' '.join(msg))
def conv2d(input, filters, image_shape=None, filter_shape=None,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论