提交 b4244cfa authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #2020 from zploskey/close_opened_files

Close opened files
...@@ -476,8 +476,8 @@ class KeyData(object): ...@@ -476,8 +476,8 @@ class KeyData(object):
""" """
# Note that writing in binary mode is important under Windows. # Note that writing in binary mode is important under Windows.
try: try:
cPickle.dump(self, open(self.key_pkl, 'wb'), with open(self.key_pkl, 'wb') as f:
protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(self, f, protocol=cPickle.HIGHEST_PROTOCOL)
except cPickle.PicklingError: except cPickle.PicklingError:
_logger.warning("Cache leak due to unpickle-able key data %s", _logger.warning("Cache leak due to unpickle-able key data %s",
self.keys) self.keys)
...@@ -681,7 +681,8 @@ class ModuleCache(object): ...@@ -681,7 +681,8 @@ class ModuleCache(object):
"unpickle cache file %s", key_pkl) "unpickle cache file %s", key_pkl)
try: try:
key_data = cPickle.load(open(key_pkl, 'rb')) with open(key_pkl, 'rb') as f:
key_data = cPickle.load(f)
except EOFError: except EOFError:
# Happened once... not sure why (would be worth # Happened once... not sure why (would be worth
# investigating if it ever happens again). # investigating if it ever happens again).
...@@ -1126,7 +1127,9 @@ class ModuleCache(object): ...@@ -1126,7 +1127,9 @@ class ModuleCache(object):
# Verify that when we reload the KeyData from the pickled file, the # Verify that when we reload the KeyData from the pickled file, the
# same key can be found in it, and is not equal to more than one # same key can be found in it, and is not equal to more than one
# other key. # other key.
key_data = cPickle.load(open(key_pkl, 'rb')) with open(key_pkl, 'rb') as f:
key_data = cPickle.load(f)
found = sum(key == other_key for other_key in key_data.keys) found = sum(key == other_key for other_key in key_data.keys)
msg = '' msg = ''
if found == 0: if found == 0:
......
...@@ -178,13 +178,15 @@ def lock(tmp_dir, timeout=120, min_wait=5, max_wait=10, verbosity=1): ...@@ -178,13 +178,15 @@ def lock(tmp_dir, timeout=120, min_wait=5, max_wait=10, verbosity=1):
other_dead = False other_dead = False
while os.path.isdir(tmp_dir): while os.path.isdir(tmp_dir):
try: try:
read_owner = open(lock_file).readlines()[0].strip() with open(lock_file) as f:
# the try is transtion code for old locks read_owner = f.readlines()[0].strip()
# it may be removed when poeple have upgraded
# The try is transition code for old locks.
# It may be removed when people have upgraded.
try: try:
other_host = read_owner.split('_')[2] other_host = read_owner.split('_')[2]
except IndexError: except IndexError:
other_host = () # make sure it isn't equal to any host other_host = () # make sure it isn't equal to any host
if other_host == socket.gethostname(): if other_host == socket.gethostname():
try: try:
os.kill(int(read_owner.split('_')[0]), 0) os.kill(int(read_owner.split('_')[0]), 0)
...@@ -250,7 +252,9 @@ def lock(tmp_dir, timeout=120, min_wait=5, max_wait=10, verbosity=1): ...@@ -250,7 +252,9 @@ def lock(tmp_dir, timeout=120, min_wait=5, max_wait=10, verbosity=1):
# Verify we are really the lock owner (this should not be needed, # Verify we are really the lock owner (this should not be needed,
# but better be safe than sorry). # but better be safe than sorry).
owner = open(lock_file).readlines()[0].strip() with open(lock_file) as f:
owner = f.readlines()[0].strip()
if owner != unique_id: if owner != unique_id:
# Too bad, try again. # Too bad, try again.
continue continue
......
...@@ -46,15 +46,15 @@ def call_subprocess_Popen(command, **params): ...@@ -46,15 +46,15 @@ def call_subprocess_Popen(command, **params):
""" """
if 'stdout' in params or 'stderr' in params: if 'stdout' in params or 'stderr' in params:
raise TypeError("don't use stderr or stdout with call_subprocess_Popen") raise TypeError("don't use stderr or stdout with call_subprocess_Popen")
null = open(os.devnull, 'wb') with open(os.devnull, 'wb') as null:
# stdin to devnull is a workaround for a crash in a weird Windows # stdin to devnull is a workaround for a crash in a weird Windows
# environement where sys.stdin was None # environment where sys.stdin was None
params.setdefault('stdin', null) params.setdefault('stdin', null)
params['stdout'] = null params['stdout'] = null
params['stderr'] = null params['stderr'] = null
p = subprocess_Popen(command, **params) p = subprocess_Popen(command, **params)
p.wait() returncode = p.wait()
return p.returncode return returncode
def output_subprocess_Popen(command, **params): def output_subprocess_Popen(command, **params):
""" """
......
...@@ -152,7 +152,10 @@ def run(stdout, stderr, argv, theano_nose, batch_size, time_profile, ...@@ -152,7 +152,10 @@ def run(stdout, stderr, argv, theano_nose, batch_size, time_profile,
stderr.flush() stderr.flush()
assert rval == 0 assert rval == 0
noseids_file = '.noseids' noseids_file = '.noseids'
data = cPickle.load(open(noseids_file, 'rb'))
with open(noseids_file, 'rb') as f:
data = cPickle.load(f)
ids = data['ids'] ids = data['ids']
n_tests = len(ids) n_tests = len(ids)
if n_tests == 0: if n_tests == 0:
...@@ -193,8 +196,9 @@ def run(stdout, stderr, argv, theano_nose, batch_size, time_profile, ...@@ -193,8 +196,9 @@ def run(stdout, stderr, argv, theano_nose, batch_size, time_profile,
# otherwise this field may get erased. We use a set because it # otherwise this field may get erased. We use a set because it
# seems like it is not systematically erased though, and we want # seems like it is not systematically erased though, and we want
# to avoid duplicates. # to avoid duplicates.
failed = failed.union(cPickle.load(open(noseids_file, 'rb')) with open(noseids_file, 'rb') as f:
['failed']) failed = failed.union(cPickle.load(f)['failed'])
print '%s%% done in %.3fs (failed: %s)' % ( print '%s%% done in %.3fs (failed: %s)' % (
(test_range[-1] * 100) // n_tests, t1 - t0, len(failed)) (test_range[-1] * 100) // n_tests, t1 - t0, len(failed))
# Sort for cosmetic purpose only. # Sort for cosmetic purpose only.
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论