提交 115c38aa authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Automatically format logging messages with loglevel and module.

Also get rid of all those debug(), info(), warning() and error() functions defined everywhere.
上级 f82346f7
...@@ -28,9 +28,14 @@ __docformat__ = "restructuredtext en" ...@@ -28,9 +28,14 @@ __docformat__ = "restructuredtext en"
# Set a default logger. It is important to do this before importing some other # Set a default logger. It is important to do this before importing some other
# theano code, since this code may want to log some messages. # theano code, since this code may want to log some messages.
import logging import logging
theano_logger = logging.getLogger("theano")
logging_default_handler = logging.StreamHandler() logging_default_handler = logging.StreamHandler()
logging.getLogger("theano").addHandler(logging_default_handler) logging_default_formatter = logging.Formatter(
logging.getLogger("theano").setLevel(logging.WARNING) fmt='%(levelname)s (%(name)s): %(message)s')
logging_default_handler.setFormatter(logging_default_formatter)
theano_logger.addHandler(logging_default_handler)
theano_logger.setLevel(logging.WARNING)
import configparser, configdefaults import configparser, configdefaults
......
...@@ -80,28 +80,12 @@ import logging ...@@ -80,28 +80,12 @@ import logging
_logger=logging.getLogger("theano.compile.debugmode") _logger=logging.getLogger("theano.compile.debugmode")
_logger.setLevel(logging.WARNING) _logger.setLevel(logging.WARNING)
def error(*args):
#sys.stderr.write('ERROR:'+ ' '.join(str(a) for a in args)+'\n')
_logger.error("ERROR: "+' '.join(str(a) for a in args))
def warning(*args):
#sys.stderr.write('WARNING:'+ ' '.join(str(a) for a in args)+'\n')
_logger.warning("WARNING: "+' '.join(str(a) for a in args))
def opt_warning(*args):
#sys.stderr.write('WARNING:'+ ' '.join(str(a) for a in args)+'\n')
_logger.warning("OPTIMIZATION WARNING: "+' '.join(str(a) for a in args))
def info(*args):
#sys.stderr.write('INFO:'+ ' '.join(str(a) for a in args)+'\n')
_logger.info("INFO: "+' '.join(str(a) for a in args))
def debug(*args):
#sys.stderr.write('DEBUG:'+ ' '.join(str(a) for a in args)+'\n')
_logger.debug("DEBUG: "+' '.join(str(a) for a in args))
# Filter to avoid duplicating optimization warnings # Filter to avoid duplicating optimization warnings
class NoDuplicateOptWarningFilter(logging.Filter): class NoDuplicateOptWarningFilter(logging.Filter):
prev_msgs = set([]) prev_msgs = set([])
def filter(self, record): def filter(self, record):
msg = record.getMessage() msg = record.getMessage()
if msg.startswith('OPTIMIZATION WARNING: '): if msg.startswith('Optimization Warning: '):
if msg in self.prev_msgs: if msg in self.prev_msgs:
return False return False
else: else:
...@@ -581,7 +565,9 @@ def _check_inputs(node, storage_map, r_vals, dr_vals, active_nodes, clobber_dr_v ...@@ -581,7 +565,9 @@ def _check_inputs(node, storage_map, r_vals, dr_vals, active_nodes, clobber_dr_v
# while not destroying anything # while not destroying anything
continue continue
if out_var is not in_var: if out_var is not in_var:
opt_warning("input idx %d marked as destroyed was not changed for node '%s'"%(ii[0],str(node))) _logger.warning("Optimization Warning: input idx %d marked "
"as destroyed was not changed for node '%s'",
ii[0], str(node))
if warn_input_not_reused: if warn_input_not_reused:
vmap=getattr(node.op,'view_map',{}) vmap=getattr(node.op,'view_map',{})
...@@ -598,7 +584,9 @@ def _check_inputs(node, storage_map, r_vals, dr_vals, active_nodes, clobber_dr_v ...@@ -598,7 +584,9 @@ def _check_inputs(node, storage_map, r_vals, dr_vals, active_nodes, clobber_dr_v
# This class is not in the final graph. # This class is not in the final graph.
continue continue
if not _may_share_memory(out_var, in_var): if not _may_share_memory(out_var, in_var):
opt_warning("input idx %d marked as viewed but new memory allocated by node '%s'"%(ii[0],str(node))) _logger.warning("Optimization Warning: input idx %d marked "
"as viewed but new memory allocated by node '%s'",
ii[0], str(node))
for r_idx, r in enumerate(node.inputs): for r_idx, r in enumerate(node.inputs):
if not r.type.values_eq(r_vals[r], storage_map[r][0]): if not r.type.values_eq(r_vals[r], storage_map[r][0]):
...@@ -928,7 +916,7 @@ def _check_preallocated_output(node, thunk, prealloc_modes, def_val, ...@@ -928,7 +916,7 @@ def _check_preallocated_output(node, thunk, prealloc_modes, def_val,
raise NotImplementedError('Negative strides in check_preallocated_output') raise NotImplementedError('Negative strides in check_preallocated_output')
for (name, out_map) in prealloc_maps: for (name, out_map) in prealloc_maps:
#debug('name =', name, ', perform =', perform) # _logger.debug('name = %s, perform = %s', name, perform)
# Copy the inputs over again # Copy the inputs over again
for r in node.inputs: for r in node.inputs:
storage_map[r][0] = _lessbroken_deepcopy(r_vals[r]) storage_map[r][0] = _lessbroken_deepcopy(r_vals[r])
...@@ -1245,7 +1233,7 @@ class _Linker(gof.link.LocalLinker): ...@@ -1245,7 +1233,7 @@ class _Linker(gof.link.LocalLinker):
# This is the function that runs when you evaluate the graph # This is the function that runs when you evaluate the graph
##### #####
def f(): def f():
debug("starting a DebugMode call") _logger.debug("starting a DebugMode call")
for x in no_recycling: for x in no_recycling:
x[0] = None x[0] = None
...@@ -1268,7 +1256,7 @@ class _Linker(gof.link.LocalLinker): ...@@ -1268,7 +1256,7 @@ class _Linker(gof.link.LocalLinker):
assert len(thunks_py) == len(order) assert len(thunks_py) == len(order)
# transfer the initial values from the storage_map to the r_vals # transfer the initial values from the storage_map to the r_vals
debug("DEBUGMODE: transfer initial values") _logger.debug("DEBUGMODE: transfer initial values")
# r_vals_initialized keeps track of the values that have # r_vals_initialized keeps track of the values that have
# actually been transferred from storage_map to r_vals # actually been transferred from storage_map to r_vals
r_vals_initialized = [] r_vals_initialized = []
...@@ -1301,7 +1289,7 @@ class _Linker(gof.link.LocalLinker): ...@@ -1301,7 +1289,7 @@ class _Linker(gof.link.LocalLinker):
for i, (thunk_py, thunk_c, node) in enumerate(zip(thunks_py, thunks_c, order)): for i, (thunk_py, thunk_c, node) in enumerate(zip(thunks_py, thunks_c, order)):
this_node_destroyed_variables = set() this_node_destroyed_variables = set()
debug(i, "DEBUGMODE: starting node", i, node) _logger.debug("%i - starting node %i %s", i, i, node)
# put a copy of each input into the storage_map # put a copy of each input into the storage_map
# also, check that inputs have valid values # also, check that inputs have valid values
...@@ -1315,7 +1303,8 @@ class _Linker(gof.link.LocalLinker): ...@@ -1315,7 +1303,8 @@ class _Linker(gof.link.LocalLinker):
## On the first call to thunk_py(), its output storage will be None ## On the first call to thunk_py(), its output storage will be None
if thunk_py: if thunk_py:
debug(i, "DEBUGMODE running thunk_py with None as output storage") _logger.debug("%i - running thunk_py with None as "
"output storage", i)
try: try:
thunk_py() thunk_py()
except utils.MethodNotDefined: except utils.MethodNotDefined:
...@@ -1342,7 +1331,9 @@ class _Linker(gof.link.LocalLinker): ...@@ -1342,7 +1331,9 @@ class _Linker(gof.link.LocalLinker):
storage_map[r][0] = None #clear the storage_map of outputs for the thunk_c storage_map[r][0] = None #clear the storage_map of outputs for the thunk_c
if config.DebugMode.check_preallocated_output: if config.DebugMode.check_preallocated_output:
debug('calling _check_preallocated_output with thunk_py') _logger.debug(
'%i - calling _check_preallocated_output '
'with thunk_py', i)
_check_preallocated_output( _check_preallocated_output(
node=node, node=node,
thunk=thunk_py, thunk=thunk_py,
...@@ -1383,7 +1374,7 @@ class _Linker(gof.link.LocalLinker): ...@@ -1383,7 +1374,7 @@ class _Linker(gof.link.LocalLinker):
clobber = False clobber = False
debug(i, "DEBUGMODE running thunk_c") _logger.debug("%i - running thunk_c", i)
## First time, with None in output_storage ## First time, with None in output_storage
try: try:
thunk_c() thunk_c()
...@@ -1429,7 +1420,9 @@ class _Linker(gof.link.LocalLinker): ...@@ -1429,7 +1420,9 @@ class _Linker(gof.link.LocalLinker):
thunk_c() thunk_c()
except: except:
raise_with_op(node) raise_with_op(node)
debug('calling _check_preallocated_output with thunk_c') _logger.debug(
'%i - calling _check_preallocated_output '
'with thunk_c', i)
_check_preallocated_output( _check_preallocated_output(
node=node, node=node,
thunk=thunk, thunk=thunk,
...@@ -1452,7 +1445,7 @@ class _Linker(gof.link.LocalLinker): ...@@ -1452,7 +1445,7 @@ class _Linker(gof.link.LocalLinker):
for r in node.inputs: for r in node.inputs:
#print >> sys.stderr, i, "DEBUGMODE clearing input", r #print >> sys.stderr, i, "DEBUGMODE clearing input", r
storage_map[r][0] = None storage_map[r][0] = None
debug("done with node") _logger.debug("%i - done with node", i)
if False: if False:
#This could be useful to help finding refcount problem. #This could be useful to help finding refcount problem.
......
...@@ -8,9 +8,6 @@ import logging ...@@ -8,9 +8,6 @@ import logging
_logger=logging.getLogger("theano.compile.io") _logger=logging.getLogger("theano.compile.io")
_logger.setLevel(logging.WARNING) _logger.setLevel(logging.WARNING)
def warning(*args):
_logger.warning("WARNING: "+' '.join(str(a) for a in args))
class SymbolicInput(object): class SymbolicInput(object):
""" """
Represents a symbolic input for use with function or FunctionMaker. Represents a symbolic input for use with function or FunctionMaker.
...@@ -206,11 +203,12 @@ class In(SymbolicInput): ...@@ -206,11 +203,12 @@ class In(SymbolicInput):
# to False with mutable=True. # to False with mutable=True.
if mutable: if mutable:
if borrow==False: if borrow==False:
warning("Symbolic input for variable %s (name=%s) has flags "\ _logger.warning("Symbolic input for variable %s (name=%s) has "
"mutable=True, borrow=False. This combination is "\ "flags mutable=True, borrow=False. This combination is "
"incompatible since mutable=True implies that the input "\ "incompatible since mutable=True implies that the "
"variable may be both aliased (borrow=True) and over-"\ "input variable may be both aliased (borrow=True) and "
"written. We set borrow=True and continue." % (variable, name)) "over-written. We set borrow=True and continue.",
variable, name)
borrow = True borrow = True
# borrow=None basically means False. We can't set default value to False because of the # borrow=None basically means False. We can't set default value to False because of the
......
...@@ -15,11 +15,6 @@ from theano.gof import Container, Variable, generic ...@@ -15,11 +15,6 @@ from theano.gof import Container, Variable, generic
_logger = logging.getLogger('theano.compile.sharedvalue') _logger = logging.getLogger('theano.compile.sharedvalue')
_logger.setLevel(logging.DEBUG) _logger.setLevel(logging.DEBUG)
def debug(*msg): _logger.debug(' '.join(str(m) for m in msg))
def info(*msg): _logger.info(' '.join(str(m) for m in msg))
def warn(*msg): _logger.warn(' '.join(str(m) for m in msg))
def warning(*msg): _logger.warning(' '.join(str(m) for m in msg))
def error(*msg): _logger.error(' '.join(str(m) for m in msg))
AddConfigVar('shared.value_borrows', AddConfigVar('shared.value_borrows',
("DEPRECATED. You should not use the 'value' property of shared" ("DEPRECATED. You should not use the 'value' property of shared"
......
...@@ -5,8 +5,6 @@ import logging ...@@ -5,8 +5,6 @@ import logging
from theano.configparser import TheanoConfigParser, AddConfigVar, EnumStr, StrParam, IntParam, FloatParam, BoolParam from theano.configparser import TheanoConfigParser, AddConfigVar, EnumStr, StrParam, IntParam, FloatParam, BoolParam
_logger = logging.getLogger('theano.configdefaults') _logger = logging.getLogger('theano.configdefaults')
def warning(*msg):
_logger.warning('WARNING theano.configdefaults: '+' '.join(msg))
config = TheanoConfigParser() config = TheanoConfigParser()
...@@ -85,9 +83,10 @@ except OSError: ...@@ -85,9 +83,10 @@ except OSError:
EnumStr('c|py', 'py', 'c', 'c|py_nogc', 'c&py', EnumStr('c|py', 'py', 'c', 'c|py_nogc', 'c&py',
'vm', 'cvm', 'vm_nogc', 'cvm_nogc'), 'vm', 'cvm', 'vm_nogc', 'cvm_nogc'),
in_c_key=False) in_c_key=False)
warning('GCC not detected ! Theano will be unable to execute optimized '+ _logger.warning('GCC not detected ! Theano will be unable to execute '
'C-implementations (for both CPU and GPU) and will default to '+ 'optimized C-implementations (for both CPU and GPU) and will '
'Python implementations. Performance will be severely degraded.') 'default to Python implementations. Performance will be severely '
'degraded.')
del dummy_stdin del dummy_stdin
......
...@@ -2,20 +2,6 @@ import cPickle, logging, sys ...@@ -2,20 +2,6 @@ import cPickle, logging, sys
_logger=logging.getLogger("theano.gof.callcache") _logger=logging.getLogger("theano.gof.callcache")
def warning(*args):
sys.stderr.write('WARNING:'+ ' '.join(str(a) for a in args)+'\n')
_logger.warning(' '.join(str(a) for a in args))
def error(*args):
sys.stderr.write('ERROR:'+ ' '.join(str(a) for a in args)+'\n')
_logger.error(' '.join(str(a) for a in args))
def info(*args):
sys.stderr.write('INFO:'+ ' '.join(str(a) for a in args)+'\n')
_logger.info(' '.join(str(a) for a in args))
def debug(*args):
sys.stderr.write('DEBUG:'+ ' '.join(str(a) for a in args)+'\n')
_logger.debug(' '.join(str(a) for a in args))
class CallCache(object): class CallCache(object):
def __init__(self, filename=None): def __init__(self, filename=None):
self.filename = filename self.filename = filename
...@@ -45,10 +31,10 @@ class CallCache(object): ...@@ -45,10 +31,10 @@ class CallCache(object):
#backport #backport
#key = (fn, tuple(args)) if key is None else key #key = (fn, tuple(args)) if key is None else key
if key not in self.cache: if key not in self.cache:
debug('cache miss', len(self.cache)) _logger.debug('cache miss %i', len(self.cache))
self.cache[key] = fn(*args) self.cache[key] = fn(*args)
else: else:
debug('cache hit', len(self.cache)) _logger.debug('cache hit %i', len(self.cache))
return self.cache[key] return self.cache[key]
def __del__(self): def __del__(self):
...@@ -56,5 +42,5 @@ class CallCache(object): ...@@ -56,5 +42,5 @@ class CallCache(object):
if self.filename: if self.filename:
self.persist() self.persist()
except Exception, e: except Exception, e:
_logging.error('persist failed', self.filename, e) _logger.error('persist failed %s %s', self.filename, e)
...@@ -51,14 +51,6 @@ import cmodule ...@@ -51,14 +51,6 @@ import cmodule
import logging import logging
_logger=logging.getLogger("theano.gof.cc") _logger=logging.getLogger("theano.gof.cc")
_logger.setLevel(logging.WARN) _logger.setLevel(logging.WARN)
def info(*args):
_logger.info(' '.join(str(a) for a in args))
def debug(*args):
_logger.debug(' '.join(str(a) for a in args))
def warning(*args):
_logger.warning(' '.join(str(a) for a in args))
def error(*args):
_logger.error(' '.join(str(a) for a in args))
from theano.gof.callcache import CallCache from theano.gof.callcache import CallCache
...@@ -546,7 +538,7 @@ class CLinker(link.Linker): ...@@ -546,7 +538,7 @@ class CLinker(link.Linker):
except utils.MethodNotDefined: except utils.MethodNotDefined:
cleanup = "" cleanup = ""
info('compiling un-versioned Apply', node) _logger.info('compiling un-versioned Apply %s', str(node))
blocks.append(CodeBlock("", behavior, cleanup, sub)) blocks.append(CodeBlock("", behavior, cleanup, sub))
tasks.append((node, 'code', id)) tasks.append((node, 'code', id))
...@@ -993,7 +985,7 @@ class CLinker(link.Linker): ...@@ -993,7 +985,7 @@ class CLinker(link.Linker):
yield src_code yield src_code
get_lock() get_lock()
try: try:
debug("LOCATION", location) _logger.debug("LOCATION %s", str(location))
c_compiler = self.c_compiler() c_compiler = self.c_compiler()
libs = self.libraries() libs = self.libraries()
preargs = self.compile_args() preargs = self.compile_args()
......
差异被折叠。
...@@ -8,14 +8,6 @@ import socket # only used for gethostname() ...@@ -8,14 +8,6 @@ import socket # only used for gethostname()
import logging import logging
_logger=logging.getLogger("theano.gof.compilelock") _logger=logging.getLogger("theano.gof.compilelock")
_logger.setLevel(logging.INFO) # INFO will show the the messages "Refreshing lock" message _logger.setLevel(logging.INFO) # INFO will show the the messages "Refreshing lock" message
def info(*args):
_logger.info(' '.join(str(a) for a in args))
def debug(*args):
_logger.debug(' '.join(str(a) for a in args))
def warning(*args):
_logger.warning(' '.join(str(a) for a in args))
def error(*args):
_logger.error(' '.join(str(a) for a in args))
# In seconds, time that a process will wait before deciding to override an # In seconds, time that a process will wait before deciding to override an
# existing lock. An override only happens when the existing lock is held by # existing lock. An override only happens when the existing lock is held by
...@@ -66,7 +58,7 @@ def get_lock(): ...@@ -66,7 +58,7 @@ def get_lock():
now = time.time() now = time.time()
if now - get_lock.start_time > refresh_every: if now - get_lock.start_time > refresh_every:
lockpath = os.path.join(get_lock.lock_dir, 'lock') lockpath = os.path.join(get_lock.lock_dir, 'lock')
info('Refreshing lock', lockpath) _logger.info('Refreshing lock %s', str(lockpath))
refresh_lock(lockpath) refresh_lock(lockpath)
get_lock.start_time = now get_lock.start_time = now
get_lock.n_lock += 1 get_lock.n_lock += 1
...@@ -172,8 +164,8 @@ def lock(tmp_dir, timeout=120, min_wait=5, max_wait=10, verbosity=1): ...@@ -172,8 +164,8 @@ def lock(tmp_dir, timeout=120, min_wait=5, max_wait=10, verbosity=1):
if other_dead: if other_dead:
if not no_display: if not no_display:
msg = "process '%s'" % read_owner.split('_')[0] msg = "process '%s'" % read_owner.split('_')[0]
warning("Overriding existing lock by dead %s (I am " _logger.warning("Overriding existing lock by dead %s "
"process '%s')"% (msg, my_pid)) "(I am process '%s')", msg, my_pid)
get_lock.unlocker.unlock() get_lock.unlocker.unlock()
continue continue
if last_owner == read_owner: if last_owner == read_owner:
...@@ -185,8 +177,8 @@ def lock(tmp_dir, timeout=120, min_wait=5, max_wait=10, verbosity=1): ...@@ -185,8 +177,8 @@ def lock(tmp_dir, timeout=120, min_wait=5, max_wait=10, verbosity=1):
msg = 'unknown process' msg = 'unknown process'
else: else:
msg = "process '%s'" % read_owner.split('_')[0] msg = "process '%s'" % read_owner.split('_')[0]
warning("Overriding existing lock by %s (I am " _logger.warning("Overriding existing lock by %s "
"process '%s')"% (msg, my_pid)) "(I am process '%s')", msg, my_pid)
get_lock.unlocker.unlock() get_lock.unlocker.unlock()
continue continue
else: else:
...@@ -198,9 +190,9 @@ def lock(tmp_dir, timeout=120, min_wait=5, max_wait=10, verbosity=1): ...@@ -198,9 +190,9 @@ def lock(tmp_dir, timeout=120, min_wait=5, max_wait=10, verbosity=1):
msg = 'unknown process' msg = 'unknown process'
else: else:
msg = "process '%s'" % read_owner.split('_')[0] msg = "process '%s'" % read_owner.split('_')[0]
info("Waiting for existing lock by %s (I am " _logger.info("Waiting for existing lock by %s (I am "
"process '%s')" % (msg, my_pid)) "process '%s')", msg, my_pid)
info("To manually release the lock, delete", tmp_dir) _logger.info("To manually release the lock, delete %s", tmp_dir)
if verbosity <= 1: if verbosity <= 1:
no_display = True no_display = True
time.sleep(random.uniform(min_wait, max_wait)) time.sleep(random.uniform(min_wait, max_wait))
...@@ -229,7 +221,7 @@ def lock(tmp_dir, timeout=120, min_wait=5, max_wait=10, verbosity=1): ...@@ -229,7 +221,7 @@ def lock(tmp_dir, timeout=120, min_wait=5, max_wait=10, verbosity=1):
except Exception, e: except Exception, e:
# If something wrong happened, we try again. # If something wrong happened, we try again.
warning("Something wrong happened:", type(e), e) _logger.warning("Something wrong happened: %s %s", type(e), e)
time.sleep(random.uniform(min_wait, max_wait)) time.sleep(random.uniform(min_wait, max_wait))
continue continue
......
...@@ -5,16 +5,7 @@ from theano import config ...@@ -5,16 +5,7 @@ from theano import config
from theano.gof.compilelock import get_lock, release_lock from theano.gof.compilelock import get_lock, release_lock
from theano.gof import cmodule from theano.gof import cmodule
_logger_name = 'theano.gof.lazylinker_c' _logger = logging.getLogger('theano.gof.lazylinker_c')
_logger = logging.getLogger(_logger_name)
def warning(*msg):
_logger.warning(_logger_name+'WARNING: '+' '.join(str(m) for m in msg))
def info(*msg):
_logger.info(_logger_name+'INFO: '+' '.join(str(m) for m in msg))
def debug(*msg):
_logger.debug(_logger_name+'DEBUG: '+' '.join(str(m) for m in msg))
# Ensure the compiledir is in `sys.path` to be able to reload an existing # Ensure the compiledir is in `sys.path` to be able to reload an existing
# precompiled library. # precompiled library.
...@@ -45,7 +36,7 @@ except ImportError: ...@@ -45,7 +36,7 @@ except ImportError:
if version != getattr(lazylinker_ext, '_version', None): if version != getattr(lazylinker_ext, '_version', None):
raise ImportError() raise ImportError()
except ImportError: except ImportError:
info("COMPILING NEW CVM") _logger.info("Compiling new CVM")
dirname = 'lazylinker_ext' dirname = 'lazylinker_ext'
cfile = os.path.join(theano.__path__[0], 'gof', 'lazylinker_c.c') cfile = os.path.join(theano.__path__[0], 'gof', 'lazylinker_c.c')
code = open(cfile).read() code = open(cfile).read()
...@@ -67,7 +58,7 @@ except ImportError: ...@@ -67,7 +58,7 @@ except ImportError:
from lazylinker_ext import lazylinker_ext as lazy_c from lazylinker_ext import lazylinker_ext as lazy_c
assert (lazylinker_ext._version == assert (lazylinker_ext._version ==
lazy_c.get_version()) lazy_c.get_version())
info("NEW VERSION", lazylinker_ext._version) _logger.info("New version %s", lazylinker_ext._version)
finally: finally:
# Release lock on compilation directory. # Release lock on compilation directory.
release_lock() release_lock()
......
...@@ -123,7 +123,7 @@ class SeqOptimizer(Optimizer, list): ...@@ -123,7 +123,7 @@ class SeqOptimizer(Optimizer, list):
def warn(exc, self, optimizer): def warn(exc, self, optimizer):
"""Default failure_callback for SeqOptimizer """Default failure_callback for SeqOptimizer
""" """
_logger.error("ERROR: SeqOptimizer apply %s"% str(optimizer)) _logger.error("SeqOptimizer apply %s"% str(optimizer))
_logger.error("Traceback:") _logger.error("Traceback:")
_logger.error(traceback.format_exc()) _logger.error(traceback.format_exc())
if config.on_opt_error == 'raise': if config.on_opt_error == 'raise':
...@@ -774,7 +774,7 @@ class NavigatorOptimizer(Optimizer): ...@@ -774,7 +774,7 @@ class NavigatorOptimizer(Optimizer):
def warn(exc, nav, repl_pairs, local_opt): def warn(exc, nav, repl_pairs, local_opt):
"""failure_callback for NavigatorOptimizer: print traceback """failure_callback for NavigatorOptimizer: print traceback
""" """
_logger.error("ERROR: Optimization failure due to: %s" % str(local_opt)) _logger.error("Optimization failure due to: %s" % str(local_opt))
_logger.error("TRACEBACK:") _logger.error("TRACEBACK:")
_logger.error(traceback.format_exc()) _logger.error(traceback.format_exc())
if isinstance(exc, AssertionError) or config.on_opt_error == 'raise': if isinstance(exc, AssertionError) or config.on_opt_error == 'raise':
...@@ -1110,7 +1110,7 @@ class EquilibriumOptimizer(NavigatorOptimizer): ...@@ -1110,7 +1110,7 @@ class EquilibriumOptimizer(NavigatorOptimizer):
self.detach_updater(env, u) self.detach_updater(env, u)
self.detach_updater(env, u) #TODO: erase this line, it's redundant at best self.detach_updater(env, u) #TODO: erase this line, it's redundant at best
if max_use_abort: if max_use_abort:
_logger.error("ERROR: EquilibriumOptimizer max'ed out") _logger.error("EquilibriumOptimizer max'ed out")
def print_summary(self, stream=sys.stdout, level=0): def print_summary(self, stream=sys.stdout, level=0):
print >> stream, "%s%s id=%i" %(' '*level, self.__class__.__name__, id(self)) print >> stream, "%s%s id=%i" %(' '*level, self.__class__.__name__, id(self))
......
...@@ -19,11 +19,6 @@ import gof.utils ...@@ -19,11 +19,6 @@ import gof.utils
from raise_op import Raise from raise_op import Raise
def warning(*msg):
_logger.warning('WARNING theano.gradient: '+' '.join(msg))
def info(*msg):
_logger.info('INFO theano.gradient: '+' '.join(msg))
_msg_retType = 'op.grad(...) returned a non-list' _msg_retType = 'op.grad(...) returned a non-list'
_msg_badlen = 'op.grad(...) returned wrong number of gradients' _msg_badlen = 'op.grad(...) returned wrong number of gradients'
...@@ -109,8 +104,9 @@ def grad_sources_inputs(sources, graph_inputs, warn_type=True): ...@@ -109,8 +104,9 @@ def grad_sources_inputs(sources, graph_inputs, warn_type=True):
if g_r and (getattr(r,'type',0) != getattr(g_r,'type', 1)): if g_r and (getattr(r,'type',0) != getattr(g_r,'type', 1)):
r_type = getattr(r,'type', None) r_type = getattr(r,'type', None)
g_r_type = getattr(g_r,'type', None) g_r_type = getattr(g_r,'type', None)
warning('%s.grad returned a different type (%s) for input %i of type (%s)'%( _logger.warning('%s.grad returned a different type (%s) '
node.op, g_r_type, ii, r_type)) 'for input %i of type (%s)',
node.op, g_r_type, ii, r_type)
if g_r and len(sources) == 1 and sources[0][0].name and r.name: if g_r and len(sources) == 1 and sources[0][0].name and r.name:
g_r.name = "(d%s/d%s)" % (sources[0][0].name, r.name) g_r.name = "(d%s/d%s)" % (sources[0][0].name, r.name)
if g_r is not None: if g_r is not None:
......
...@@ -16,19 +16,11 @@ from tensor import opt ...@@ -16,19 +16,11 @@ from tensor import opt
_logger = logging.getLogger('theano.lazycond') _logger = logging.getLogger('theano.lazycond')
def warning(*msg):
_logger.warning(_logger_name+'WARNING: '+' '.join(str(m) for m in msg))
def info(*msg):
_logger.info(_logger_name+'INFO: '+' '.join(str(m) for m in msg))
def debug(*msg):
_logger.debug(_logger_name+'DEBUG: '+' '.join(str(m) for m in msg))
@gof.local_optimizer([None]) @gof.local_optimizer([None])
def ifelse_make_inplace(node): def ifelse_make_inplace(node):
op = node.op op = node.op
if isinstance(op, IfElse) and not op.as_view : if isinstance(op, IfElse) and not op.as_view :
debug('ifelse_make_inplace applied') _logger.debug('ifelse_make_inplace applied')
return IfElse(as_view = True, return IfElse(as_view = True,
gpu = op.gpu, name=op.name).make_node(*node.inputs).outputs gpu = op.gpu, name=op.name).make_node(*node.inputs).outputs
return False return False
......
...@@ -7,18 +7,6 @@ import nvcc_compiler ...@@ -7,18 +7,6 @@ import nvcc_compiler
_logger_name = 'theano.sandbox.cuda' _logger_name = 'theano.sandbox.cuda'
_logger = logging.getLogger(_logger_name) _logger = logging.getLogger(_logger_name)
_logger.setLevel(logging.WARNING) _logger.setLevel(logging.WARNING)
def error(*msg):
_logger.error('ERROR (%s): %s'% (
_logger_name, ' '.join(str(m) for m in msg)))
def warning(*msg):
_logger.warning('WARNING (%s): %s'% ( _logger_name,
' '.join(str(m) for m in msg)))
def info(*msg):
_logger.info('INFO (%s): %s'% ( _logger_name,
' '.join(str(m) for m in msg)))
def debug(*msg):
_logger.debug('DEBUG (%s): %s'% ( _logger_name,
' '.join(str(m) for m in msg)))
AddConfigVar('cuda.root', AddConfigVar('cuda.root',
"""directory with bin/, lib/, include/ for cuda utilities. """directory with bin/, lib/, include/ for cuda utilities.
...@@ -122,7 +110,7 @@ try: ...@@ -122,7 +110,7 @@ try:
include_dirs=[cuda_path], libs=['cublas']) include_dirs=[cuda_path], libs=['cublas'])
from cuda_ndarray.cuda_ndarray import * from cuda_ndarray.cuda_ndarray import *
except Exception, e: except Exception, e:
error( "Failed to compile cuda_ndarray.cu: %s" % str(e)) _logger.error( "Failed to compile cuda_ndarray.cu: %s", str(e))
set_cuda_disabled() set_cuda_disabled()
if cuda_available: if cuda_available:
...@@ -158,14 +146,13 @@ if cuda_available: ...@@ -158,14 +146,13 @@ if cuda_available:
# we compiled! # we compiled!
import cuda_ndarray.cuda_ndarray import cuda_ndarray.cuda_ndarray
if cuda_ndarray_so != cuda_ndarray.cuda_ndarray.__file__: if cuda_ndarray_so != cuda_ndarray.cuda_ndarray.__file__:
warning("WARNING: cuda_ndarray was loaded from", _logger.warning("cuda_ndarray was loaded from %s, but Theano expected "
"to load it from %s. This is not expected as theano should "
"compile it automatically for you. Do you have a directory "
"called cuda_ndarray in your LD_LIBRARY_PATH environment "
"variable? If so, please remove it as it is outdated.",
cuda_ndarray.cuda_ndarray.__file__, cuda_ndarray.cuda_ndarray.__file__,
"but Theano expected to load it from", cuda_ndarray_so)
cuda_ndarray_so,
"""This is not expected as theano should compile it
automatically for you. Do you have a directory called cuda_ndarray in your
LD_LIBRARY_PATH environment variable? If so, please remove it as it is
outdated!""")
shared_constructor = float32_shared_constructor shared_constructor = float32_shared_constructor
...@@ -205,7 +192,7 @@ def use(device, ...@@ -205,7 +192,7 @@ def use(device,
"with error:\n%s" % ( "with error:\n%s" % (
device, cuda_initialization_error_message)) device, cuda_initialization_error_message))
elif not nvcc_compiler.is_nvcc_available(): elif not nvcc_compiler.is_nvcc_available():
error('nvcc compiler not found on $PATH.' _logger.error('nvcc compiler not found on $PATH.'
' Check your nvcc installation and try again.') ' Check your nvcc installation and try again.')
return return
elif not cuda_available: elif not cuda_available:
...@@ -215,7 +202,8 @@ def use(device, ...@@ -215,7 +202,8 @@ def use(device,
error_addendum = " (error: %s)" % cuda_initialization_error_message error_addendum = " (error: %s)" % cuda_initialization_error_message
except NameError: # cuda_initialization_error_message is not available b/c compilation failed except NameError: # cuda_initialization_error_message is not available b/c compilation failed
pass pass
warning('CUDA is installed, but device %s is not available%s' % (device, error_addendum)) _logger.warning('CUDA is installed, but device %s is not available %s',
device, error_addendum)
return return
if device == 'gpu': if device == 'gpu':
...@@ -245,7 +233,8 @@ def use(device, ...@@ -245,7 +233,8 @@ def use(device,
print >> sys.stderr, "Using gpu device %d: %s" % (active_device_number(), active_device_name()) print >> sys.stderr, "Using gpu device %d: %s" % (active_device_number(), active_device_name())
except (EnvironmentError, ValueError), e: except (EnvironmentError, ValueError), e:
_logger.error(("ERROR: Not using GPU." _logger.error(("ERROR: Not using GPU."
" Initialisation of device %i failed:\n%s") % (device, e)) " Initialisation of device %i failed:\n%s"),
device, e)
cuda_enabled = False cuda_enabled = False
if force: if force:
e.args+=(("You asked to force this device and it failed." e.args+=(("You asked to force this device and it failed."
...@@ -253,8 +242,9 @@ def use(device, ...@@ -253,8 +242,9 @@ def use(device,
raise raise
elif use.device_number != device: elif use.device_number != device:
_logger.warning(("WARNING: ignoring call to use(%s), GPU number %i " _logger.warning(("Ignoring call to use(%s), GPU number %i "
"is already in use.") %(str(device), use.device_number)) "is already in use."),
str(device), use.device_number)
if default_to_move_computation_to_gpu: if default_to_move_computation_to_gpu:
optdb.add_tags('gpu_opt', optdb.add_tags('gpu_opt',
...@@ -294,10 +284,12 @@ if config.device.startswith('gpu'): ...@@ -294,10 +284,12 @@ if config.device.startswith('gpu'):
elif config.init_gpu_device: elif config.init_gpu_device:
assert config.device=="cpu", ("We can use the Theano flag init_gpu_device" assert config.device=="cpu", ("We can use the Theano flag init_gpu_device"
" only when the Theano flag device=='cpu'") " only when the Theano flag device=='cpu'")
warning(("GPU device %s will be initialized, and used if a GPU is needed. " _logger.warning(("GPU device %s will be initialized, and used if a GPU is "
"needed. "
"However, no computation, nor shared variables, will be implicitly " "However, no computation, nor shared variables, will be implicitly "
"moved to that device. If you want that behavior, use the 'device' " "moved to that device. If you want that behavior, use the 'device' "
"flag instead.") % config.init_gpu_device) "flag instead."),
config.init_gpu_device)
use(device=config.init_gpu_device, use(device=config.init_gpu_device,
force=config.force_device, force=config.force_device,
default_to_move_computation_to_gpu=False, default_to_move_computation_to_gpu=False,
......
...@@ -18,12 +18,6 @@ _logger_name = 'theano.sandbox.cuda.basic_ops' ...@@ -18,12 +18,6 @@ _logger_name = 'theano.sandbox.cuda.basic_ops'
_logger = logging.getLogger(_logger_name) _logger = logging.getLogger(_logger_name)
_logger.setLevel(logging.INFO) _logger.setLevel(logging.INFO)
_logger.addHandler(logging.StreamHandler()) #TO REMOVE _logger.addHandler(logging.StreamHandler()) #TO REMOVE
def warning(*msg):
_logger.warning(_logger_name+'WARNING: '+' '.join(str(m) for m in msg))
def info(*msg):
_logger.info(_logger_name+'INFO: '+' '.join(str(m) for m in msg))
def debug(*msg):
_logger.debug(_logger_name+'DEBUG: '+' '.join(str(m) for m in msg))
def as_cuda_ndarray_variable(x): def as_cuda_ndarray_variable(x):
if hasattr(x, '_as_CudaNdarrayVariable'): if hasattr(x, '_as_CudaNdarrayVariable'):
......
...@@ -15,12 +15,6 @@ _logger_name = 'theano.sandbox.cuda.elemwise' ...@@ -15,12 +15,6 @@ _logger_name = 'theano.sandbox.cuda.elemwise'
_logger = logging.getLogger(_logger_name) _logger = logging.getLogger(_logger_name)
_logger.setLevel(logging.INFO) _logger.setLevel(logging.INFO)
_logger.addHandler(logging.StreamHandler()) #TO REMOVE _logger.addHandler(logging.StreamHandler()) #TO REMOVE
def warning(*msg):
_logger.warning(_logger_name+'WARNING: '+' '.join(str(m) for m in msg))
def info(*msg):
_logger.info(_logger_name+'INFO: '+' '.join(str(m) for m in msg))
def debug(*msg):
_logger.debug(_logger_name+'DEBUG: '+' '.join(str(m) for m in msg))
def _logical_scalar(x): def _logical_scalar(x):
......
...@@ -9,19 +9,6 @@ _logger.setLevel(logging.WARN) ...@@ -9,19 +9,6 @@ _logger.setLevel(logging.WARN)
from theano.configparser import config, AddConfigVar, StrParam, BoolParam from theano.configparser import config, AddConfigVar, StrParam, BoolParam
def error(*args):
#sys.stderr.write('ERROR:'+ ' '.join(str(a) for a in args)+'\n')
_logger.error("ERROR: "+' '.join(str(a) for a in args))
def warning(*args):
#sys.stderr.write('WARNING:'+ ' '.join(str(a) for a in args)+'\n')
_logger.warning("WARNING: "+' '.join(str(a) for a in args))
def info(*args):
#sys.stderr.write('INFO:'+ ' '.join(str(a) for a in args)+'\n')
_logger.info("INFO: "+' '.join(str(a) for a in args))
def debug(*args):
#sys.stderr.write('DEBUG:'+ ' '.join(str(a) for a in args)+'\n')
_logger.debug("DEBUG: "+' '.join(str(a) for a in args))
AddConfigVar('nvcc.compiler_bindir', AddConfigVar('nvcc.compiler_bindir',
"If defined, nvcc compiler driver will seek g++ and gcc in this directory", "If defined, nvcc compiler driver will seek g++ and gcc in this directory",
StrParam("")) StrParam(""))
...@@ -143,7 +130,7 @@ def nvcc_module_compile_str( ...@@ -143,7 +130,7 @@ def nvcc_module_compile_str(
cppfilename = os.path.join(location, 'mod.cu') cppfilename = os.path.join(location, 'mod.cu')
cppfile = file(cppfilename, 'w') cppfile = file(cppfilename, 'w')
debug('Writing module C++ code to', cppfilename) _logger.debug('Writing module C++ code to %s', cppfilename)
ofiles = [] ofiles = []
rval = None rval = None
...@@ -152,7 +139,7 @@ def nvcc_module_compile_str( ...@@ -152,7 +139,7 @@ def nvcc_module_compile_str(
lib_filename = os.path.join(location, '%s.%s' % lib_filename = os.path.join(location, '%s.%s' %
(module_name, get_lib_extension())) (module_name, get_lib_extension()))
debug('Generating shared lib', lib_filename) _logger.debug('Generating shared lib %s', lib_filename)
# TODO: Why do these args cause failure on gtx285 that has 1.3 compute capability? '--gpu-architecture=compute_13', '--gpu-code=compute_13', # TODO: Why do these args cause failure on gtx285 that has 1.3 compute capability? '--gpu-architecture=compute_13', '--gpu-code=compute_13',
preargs1=[pa for pa in preargs if pa.startswith('-O') or pa.startswith('--maxrregcount=')]#nvcc argument preargs1=[pa for pa in preargs if pa.startswith('-O') or pa.startswith('--maxrregcount=')]#nvcc argument
preargs2=[pa for pa in preargs if pa not in preargs1]#other arguments preargs2=[pa for pa in preargs if pa not in preargs1]#other arguments
...@@ -225,7 +212,7 @@ def nvcc_module_compile_str( ...@@ -225,7 +212,7 @@ def nvcc_module_compile_str(
cmd.pop(cmd.index(fwk_str)) cmd.pop(cmd.index(fwk_str))
#cmd.append("--ptxas-options=-v") #uncomment this to see register and shared-mem requirements #cmd.append("--ptxas-options=-v") #uncomment this to see register and shared-mem requirements
debug('Running cmd', ' '.join(cmd)) _logger.debug('Running cmd %s', ' '.join(cmd))
orig_dir = os.getcwd() orig_dir = os.getcwd()
try: try:
os.chdir(location) os.chdir(location)
...@@ -248,7 +235,7 @@ def nvcc_module_compile_str( ...@@ -248,7 +235,7 @@ def nvcc_module_compile_str(
continue continue
if 'statement is unreachable' in eline: if 'statement is unreachable' in eline:
continue continue
_logger.info("NVCC: "+eline) _logger.info("NVCC: %s", eline)
if p.returncode: if p.returncode:
# filter the output from the compiler # filter the output from the compiler
......
...@@ -56,13 +56,6 @@ from scan_utils import clone ...@@ -56,13 +56,6 @@ from scan_utils import clone
# Logging function for sending warning or info # Logging function for sending warning or info
_logger = logging.getLogger('theano.scan_module') _logger = logging.getLogger('theano.scan_module')
def warning(*msg):
_logger.warning('WARNING theano.scan: '+' '.join(msg))
def info(*msg):
_logger.info('INFO theano.scan: '+' '.join(msg))
@gof.local_optimizer([None]) @gof.local_optimizer([None])
def scan_make_inplace(node): def scan_make_inplace(node):
......
...@@ -58,12 +58,6 @@ from theano.updates import Updates ...@@ -58,12 +58,6 @@ from theano.updates import Updates
# Logging function for sending warning or info # Logging function for sending warning or info
_logger = logging.getLogger('theano.scan_module.scan') _logger = logging.getLogger('theano.scan_module.scan')
def warning(*msg):
_logger.warning('WARNING theano.scan: '+' '.join(msg))
def info(*msg):
_logger.info('INFO theano.scan: '+' '.join(msg))
def scan( fn def scan( fn
, sequences = None , sequences = None
...@@ -371,10 +365,10 @@ def scan( fn ...@@ -371,10 +365,10 @@ def scan( fn
# ^ initial state but taps not provided # ^ initial state but taps not provided
if outs_info[i].has_key('taps'): if outs_info[i].has_key('taps'):
# ^ explicitly provided a None for taps # ^ explicitly provided a None for taps
warning (' Output %s ( index %d) has a initial state ' _logger.warning('Output %s ( index %d) has a initial state '
' but taps is explicitly set to None ' % ( 'but taps is explicitly set to None ',
getattr(outs_info[i]['initial'],'name','None') getattr(outs_info[i]['initial'],'name','None'),
, i) ) i)
outs_info[i]['taps'] = [-1] outs_info[i]['taps'] = [-1]
else: else:
# if a None is provided as the output info we replace it # if a None is provided as the output info we replace it
...@@ -428,8 +422,8 @@ def scan( fn ...@@ -428,8 +422,8 @@ def scan( fn
if config.compute_test_value != 'ignore': if config.compute_test_value != 'ignore':
# No need to print a warning or raise an error now, # No need to print a warning or raise an error now,
# it will be done when fn will be called. # it will be done when fn will be called.
info(('Cannot compute test value for the inner ' _logger.info(('Cannot compute test value for the inner '
'function of scan, input value missing'), e) 'function of scan, input value missing %s'), e)
# Add names to slices for debugging and pretty printing .. # Add names to slices for debugging and pretty printing ..
# that is if the input already has a name # that is if the input already has a name
...@@ -556,8 +550,8 @@ def scan( fn ...@@ -556,8 +550,8 @@ def scan( fn
if config.compute_test_value != 'ignore': if config.compute_test_value != 'ignore':
# No need to print a warning or raise an error now, # No need to print a warning or raise an error now,
# it will be done when fn will be called. # it will be done when fn will be called.
info(('Cannot compute test value for the inner ' _logger.info(('Cannot compute test value for the inner '
'function of scan, input value missing'), e) 'function of scan, input value missing %s'), e)
if getattr(init_out['initial'],'name', None) is not None: if getattr(init_out['initial'],'name', None) is not None:
arg.name = init_out['initial'].name+'[t-1]' arg.name = init_out['initial'].name+'[t-1]'
...@@ -614,8 +608,8 @@ def scan( fn ...@@ -614,8 +608,8 @@ def scan( fn
if config.compute_test_value != 'ignore': if config.compute_test_value != 'ignore':
# No need to print a warning or raise an error now, # No need to print a warning or raise an error now,
# it will be done when fn will be called. # it will be done when fn will be called.
info(('Cannot compute test value for the inner ' _logger.info(('Cannot compute test value for the inner '
'function of scan, input value missing.'), e) 'function of scan, input value missing. %s'), e)
# give it a name or debugging and pretty printing # give it a name or debugging and pretty printing
if getattr(init_out['initial'],'name', None) is not None: if getattr(init_out['initial'],'name', None) is not None:
......
...@@ -37,14 +37,6 @@ from scan_utils import safe_new, safe_to_cpu, traverse ...@@ -37,14 +37,6 @@ from scan_utils import safe_new, safe_to_cpu, traverse
# Logging function for sending warning or info # Logging function for sending warning or info
_logger = logging.getLogger('theano.scan_module.scan_op') _logger = logging.getLogger('theano.scan_module.scan_op')
def warning(*msg):
_logger.warning('WARNING theano.scan: '+' '.join(msg))
def info(*msg):
_logger.info('INFO theano.scan: '+' '.join(msg))
from theano.sandbox import cuda from theano.sandbox import cuda
class Scan(Op): class Scan(Op):
......
...@@ -41,12 +41,6 @@ import theano ...@@ -41,12 +41,6 @@ import theano
# Logging function for sending warning or info # Logging function for sending warning or info
_logger = logging.getLogger('theano.scan_utils') _logger = logging.getLogger('theano.scan_utils')
def warning(*msg):
_logger.warning('WARNING theano.scan: '+' '.join(msg))
def info(*msg):
_logger.info('INFO theano.scan: '+' '.join(msg))
def safe_new(x): def safe_new(x):
if isinstance(x, numpy.ndarray): if isinstance(x, numpy.ndarray):
......
...@@ -20,12 +20,6 @@ import scan ...@@ -20,12 +20,6 @@ import scan
# Logging function for sending warning or info # Logging function for sending warning or info
_logger = logging.getLogger('theano.scan_module.scan_views') _logger = logging.getLogger('theano.scan_module.scan_views')
def warning(*msg):
_logger.warning('WARNING theano.scan: '+' '.join(msg))
def info(*msg):
_logger.info('INFO theano.scan: '+' '.join(msg))
################ Declaration of Views for Scan ####################### ################ Declaration of Views for Scan #######################
......
...@@ -18,11 +18,6 @@ from theano.tensor.blas_headers import blas_header_text #, cblas_header_text ...@@ -18,11 +18,6 @@ from theano.tensor.blas_headers import blas_header_text #, cblas_header_text
_logger = logging.getLogger('theano.tensor.blas') _logger = logging.getLogger('theano.tensor.blas')
_logger.setLevel(logging.WARN) _logger.setLevel(logging.WARN)
def debug(*msg): _logger.debug(' '.join(str(m) for m in msg))
def info(*msg): _logger.info(' '.join(str(m) for m in msg))
def warn(*msg): _logger.warn(' '.join(str(m) for m in msg))
def warning(*msg): _logger.warning(' '.join(str(m) for m in msg))
def error(*msg): _logger.error(' '.join(str(m) for m in msg))
try: try:
import scipy.linalg.blas import scipy.linalg.blas
...@@ -35,7 +30,8 @@ try: ...@@ -35,7 +30,8 @@ try:
} }
except ImportError, e: except ImportError, e:
_have_fblas = False _have_fblas = False
warning('Failed to import scipy.linalg.blas.fblas. Falling back on slower implementations (%s)' % str(e)) _logger.warning('Failed to import scipy.linalg.blas.fblas. '
'Falling back on slower implementations (%s)', str(e))
class Gemv(Op): class Gemv(Op):
""" """
...@@ -138,7 +134,9 @@ def ldflags(libs=True, flags=False, libs_dir=False, include_dir=False): ...@@ -138,7 +134,9 @@ def ldflags(libs=True, flags=False, libs_dir=False, include_dir=False):
if any([f.find(ll)>=0 for ll in l]): if any([f.find(ll)>=0 for ll in l]):
found_dyn=True found_dyn=True
if not found_dyn and dirs: if not found_dyn and dirs:
warning("We did not found a dynamic library into the library_dir of the library we use for blas. If you use ATLAS, make sure to compile it with dynamics library.") _logger.warning("We did not found a dynamic library into the "
"library_dir of the library we use for blas. If you use "
"ATLAS, make sure to compile it with dynamics library.")
for t in config.blas.ldflags.split(): for t in config.blas.ldflags.split():
try: try:
...@@ -1061,7 +1059,7 @@ def local_dot_to_dot22(node): ...@@ -1061,7 +1059,7 @@ def local_dot_to_dot22(node):
x,y = node.inputs x,y = node.inputs
if y.type.dtype != x.type.dtype: if y.type.dtype != x.type.dtype:
# TODO: upcast one so the types match # TODO: upcast one so the types match
info('Not optimizing dot with inputs', x, y, x.type, y.type) _logger.info('Not optimizing dot with inputs %s %s %s %s', x, y, x.type, y.type)
return return
if y.type.dtype.startswith('float'): if y.type.dtype.startswith('float'):
if _is_real_matrix(x) and _is_real_matrix(y): if _is_real_matrix(x) and _is_real_matrix(y):
...@@ -1074,7 +1072,7 @@ def local_dot_to_dot22(node): ...@@ -1074,7 +1072,7 @@ def local_dot_to_dot22(node):
if _is_real_vector(x) and _is_real_vector(x): if _is_real_vector(x) and _is_real_vector(x):
return [_dot22(x.dimshuffle('x',0), y.dimshuffle(0,'x')).dimshuffle()] return [_dot22(x.dimshuffle('x',0), y.dimshuffle(0,'x')).dimshuffle()]
info('Not optimizing dot with inputs', x, y, x.type, y.type) _logger.info('Not optimizing dot with inputs %s %s %s %s', x, y, x.type, y.type)
@local_optimizer([gemm_no_inplace]) @local_optimizer([gemm_no_inplace])
def local_inplace_gemm(node): def local_inplace_gemm(node):
...@@ -1225,7 +1223,10 @@ def local_dot22_to_dot22scalar(node): ...@@ -1225,7 +1223,10 @@ def local_dot22_to_dot22scalar(node):
return [T.mul(m.owner.inputs[1-i],dot)] return [T.mul(m.owner.inputs[1-i],dot)]
elif m.owner and m.owner.op == T.mul: elif m.owner and m.owner.op == T.mul:
info('Not optimizing dot22 with inputs', d, m, d.type, m.type, 'we need to check in a recursive way in the mul if we can reorder the graph. The canonizer should have done this.') _logger.info('Not optimizing dot22 with inputs %s %s %s %s. '
'we need to check in a recursive way in the mul if we can '
'reorder the graph. The canonizer should have done this.',
d, m, d.type, m.type)
else: else:
return False return False
...@@ -1235,7 +1236,9 @@ def local_dot22_to_dot22scalar(node): ...@@ -1235,7 +1236,9 @@ def local_dot22_to_dot22scalar(node):
scalar_idx = i scalar_idx = i
break break
if scalar_idx<0: if scalar_idx<0:
info('Not optimizing dot22 with inputs', node.inputs, [x.type for x in node.inputs], 'as the type of the scalar can\'t be upcasted to the matrix type') _logger.info('Not optimizing dot22 with inputs %s %s, as the type '
'of the scalar cannot be upcasted to the matrix type',
node.inputs, [x.type for x in node.inputs])
return False return False
assert scalar_idx<len(node.inputs) assert scalar_idx<len(node.inputs)
s = node.inputs[scalar_idx] s = node.inputs[scalar_idx]
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论