提交 a35b11c3 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

merge

...@@ -194,20 +194,21 @@ class ModuleCache(object): ...@@ -194,20 +194,21 @@ class ModuleCache(object):
If the ``version`` is either 0 or (), then the corresponding module is unversioned, and If the ``version`` is either 0 or (), then the corresponding module is unversioned, and
will be deleted in an atexit() handler. will be deleted in an atexit() handler.
If the ``version`` is neither 0 nor (), then the module will be kept in the cache between If the ``version`` is neither 0 nor (), then the module will be kept in the cache between
processes, but it may be deleted if another key comes processes.
along that has the same ``rest``, and a ``version`` that is considered higher than the
first one.
:todo: Versioning functionality is planned for implementation later, it is not implemented
yet. An unversioned module is not deleted by the process that creates it. Deleting such modules
does not work on NFS filesystems because the tmpdir in which the library resides is in use
until the end of the process' lifetime. Instead, unversioned modules are left in their
tmpdirs without corresponding .pkl files. These modules and their directories are erased
by subsequent processes' refresh() functions.
""" """
dirname = "" dirname = ""
"""The working directory that is managed by this interface""" """The working directory that is managed by this interface"""
module_from_name = {} module_from_name = {}
"""maps module names to loaded module objects""" """maps a module filename to the loaded module object"""
entry_from_key = {} entry_from_key = {}
"""Maps keys to the filename of a .so/.pyd. """Maps keys to the filename of a .so/.pyd.
...@@ -271,7 +272,17 @@ class ModuleCache(object): ...@@ -271,7 +272,17 @@ class ModuleCache(object):
for root, dirs, files in os.walk(self.dirname): for root, dirs, files in os.walk(self.dirname):
if os.path.join(root, 'key.pkl') in self.loaded_key_pkl: if os.path.join(root, 'key.pkl') in self.loaded_key_pkl:
continue continue
if 'key.pkl' in files: elif 'delete.me' in files or len(files)==0:
# On NFS filesystems, it is impossible to delete a directory with open
# files in it. So instead, some commands in this file will respond to a
# failed rmtree() by touching a 'delete.me' file. This file is a message
# for a future process to try deleting the directory.
try:
shutil.rmtree(root)
except:
# the directory is still in use?? We just leave it for future removal.
pass
elif 'key.pkl' in files:
key_pkl = os.path.join(root, 'key.pkl') key_pkl = os.path.join(root, 'key.pkl')
debug('refresh adding', key_pkl) debug('refresh adding', key_pkl)
try: try:
...@@ -323,8 +334,13 @@ class ModuleCache(object): ...@@ -323,8 +334,13 @@ class ModuleCache(object):
info("deleting ModuleCache entry", entry) info("deleting ModuleCache entry", entry)
del self.entry_from_key[key] del self.entry_from_key[key]
if key[0]: if key[0]:
#this is a versioned entry, so should have been on disk # this is a versioned entry, so should have been on disk
self.loaded_key_pkl.remove(os.path.join(os.path.dirname(entry), 'key.pkl')) # Something weird happened to cause this, so we are responding by
# printing a warning, removing evidence that we ever saw this mystery
# key.
pkl_file_to_remove = os.path.join(os.path.dirname(entry), 'key.pkl')
warn('Removing key file %s because the corresponding module is gone from the file system.' % pkl_file_to_remove)
self.loaded_key_pkl.remove(pkl_file_to_remove)
finally: finally:
compilelock.release_lock() compilelock.release_lock()
...@@ -430,8 +446,8 @@ class ModuleCache(object): ...@@ -430,8 +446,8 @@ class ModuleCache(object):
del self.entry_from_key[key] del self.entry_from_key[key]
parent = os.path.dirname(entry) parent = os.path.dirname(entry)
assert parent.startswith(os.path.join(self.dirname, 'tmp')) assert parent.startswith(os.path.join(self.dirname, 'tmp'))
debug("Removing cache dir", parent) info("clear_old removing cache dir", parent)
shutil.rmtree(parent) _rmtree(parent)
finally: finally:
compilelock.release_lock() compilelock.release_lock()
...@@ -460,13 +476,24 @@ class ModuleCache(object): ...@@ -460,13 +476,24 @@ class ModuleCache(object):
parent = os.path.dirname(entry) parent = os.path.dirname(entry)
assert parent.startswith(os.path.join(self.dirname, 'tmp')) assert parent.startswith(os.path.join(self.dirname, 'tmp'))
debug("Removing unversioned dir", parent) info("clear_unversioned removing cache dir", parent)
shutil.rmtree(parent) _rmtree(parent)
def _on_atexit(self): def _on_atexit(self):
self.refresh() self.refresh()
self.clear_old() self.clear_old()
self.clear_unversioned() self.clear_unversioned()
def _rmtree(parent):
try:
shutil.rmtree(parent)
except Exception, e:
try:
# mark this directory for deletion by a future refresh()
open(os.path.join(parent,'delete.me'), 'w').close()
except Exception, ee:
warning('Failed to remove or mark cache directory %s for removal' % parent, ee)
_module_cache = None _module_cache = None
def get_module_cache(dirname, force_fresh=None): def get_module_cache(dirname, force_fresh=None):
global _module_cache global _module_cache
......
...@@ -422,9 +422,14 @@ class Generic(SingletonType): ...@@ -422,9 +422,14 @@ class Generic(SingletonType):
PyObject* %(name)s; PyObject* %(name)s;
""" % locals() """ % locals()
def c_init(self, name, sub):
return """
%(name)s = NULL;
""" % locals()
def c_extract(self, name, sub): def c_extract(self, name, sub):
return """ return """
Py_XINCREF(py_%(name)s); Py_INCREF(py_%(name)s);
%(name)s = py_%(name)s; %(name)s = py_%(name)s;
""" % locals() """ % locals()
...@@ -435,9 +440,10 @@ class Generic(SingletonType): ...@@ -435,9 +440,10 @@ class Generic(SingletonType):
def c_sync(self, name, sub): def c_sync(self, name, sub):
return """ return """
Py_XDECREF(py_%(name)s); assert(py_%(name)s->ob_refcnt > 1);
py_%(name)s = %(name)s; Py_DECREF(py_%(name)s);
Py_XINCREF(py_%(name)s); py_%(name)s = %(name)s ? %(name)s : Py_None;
Py_INCREF(py_%(name)s);
""" % locals() """ % locals()
......
...@@ -67,6 +67,9 @@ class DimShuffle(Op): ...@@ -67,6 +67,9 @@ class DimShuffle(Op):
DimShuffle((False, False, False), [2, 0, 1]) -> AxBxC to CxAxB DimShuffle((False, False, False), [2, 0, 1]) -> AxBxC to CxAxB
DimShuffle((False, False), [0, 'x', 1]) -> AxB to Ax1xB DimShuffle((False, False), [0, 'x', 1]) -> AxB to Ax1xB
DimShuffle((False, False), [1, 'x', 0]) -> AxB to Bx1xA DimShuffle((False, False), [1, 'x', 0]) -> AxB to Bx1xA
The reordering of the dimensions can be done in numpy with the transpose function.
Adding, subtracting dimensions can be done with reshape.
""" """
def __init__(self, input_broadcastable, new_order, inplace = False): def __init__(self, input_broadcastable, new_order, inplace = False):
......
...@@ -1944,6 +1944,20 @@ def test_convert_to_complex(): ...@@ -1944,6 +1944,20 @@ def test_convert_to_complex():
b = value(numpy.ones(3, dtype='complex128')) b = value(numpy.ones(3, dtype='complex128'))
f = function([a],basic.convert_to_complex128(a)) f = function([a],basic.convert_to_complex128(a))
assert a.type.values_eq_approx(b.data, f(a.data)) assert a.type.values_eq_approx(b.data, f(a.data))
for t in ['int8','int16','int32','int64','float32']:
a = value(numpy.ones(3, dtype=t))
b = value(numpy.ones(3, dtype='complex64'))
f = function([a],basic.convert_to_complex64(a))
assert a.type.values_eq_approx(b.data, f(a.data))
#this work, but should we allow it? How well it is implemented?
for t in ['float64']:
a = value(numpy.ones(3, dtype=t))
b = value(numpy.ones(3, dtype='complex64'))
f = function([a],basic.convert_to_complex64(a))
assert a.type.values_eq_approx(b.data, f(a.data))
def test_bug_complext_10_august_09(): def test_bug_complext_10_august_09():
v0 = dmatrix() v0 = dmatrix()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论