提交 80a1e8e0 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Remove tentacles from gof.

上级 9ab1e81d
...@@ -751,8 +751,6 @@ class CLinker(link.Linker): ...@@ -751,8 +751,6 @@ class CLinker(link.Linker):
# This ensures that, when defining functions in support code, # This ensures that, when defining functions in support code,
# we cannot have two different functions, in different modules, # we cannot have two different functions, in different modules,
# that have the same name. # that have the same name.
# It was problematic, in particular, on Mac OS X (10.6 and 10.7)
# when defining CUDA kernels (with Cuda 4.2 and 5.0). See gh-1172.
name = "node_<<<<HASH_PLACEHOLDER>>>>_%i" % node_num name = "node_<<<<HASH_PLACEHOLDER>>>>_%i" % node_num
isyms = [symbol[r] for r in node.inputs] isyms = [symbol[r] for r in node.inputs]
osyms = [symbol[r] for r in node.outputs] osyms = [symbol[r] for r in node.outputs]
......
...@@ -796,12 +796,6 @@ class ModuleCache(object): ...@@ -796,12 +796,6 @@ class ModuleCache(object):
msg='broken cache directory [EOF]', msg='broken cache directory [EOF]',
level=logging.WARNING) level=logging.WARNING)
continue continue
except ValueError:
# This can happen when we have bad config value
# in the cuda.nvcc_compiler.py file.
# We should not hide it here, as this will cause
# an unrelated error to appear.
raise
except Exception: except Exception:
unpickle_failure() unpickle_failure()
if delete_if_problem: if delete_if_problem:
...@@ -1323,7 +1317,7 @@ class ModuleCache(object): ...@@ -1323,7 +1317,7 @@ class ModuleCache(object):
to -1 in order to delete all unversioned cached modules regardless to -1 in order to delete all unversioned cached modules regardless
of their age. of their age.
clear_base_files : bool clear_base_files : bool
If True, then delete base directories 'cuda_ndarray', 'cutils_ext', If True, then delete base directories 'cutils_ext',
'lazylinker_ext' and 'scan_perform' if they are present. 'lazylinker_ext' and 'scan_perform' if they are present.
If False, those directories are left intact. If False, those directories are left intact.
delete_if_problem delete_if_problem
...@@ -1340,8 +1334,8 @@ class ModuleCache(object): ...@@ -1340,8 +1334,8 @@ class ModuleCache(object):
def clear_base_files(self): def clear_base_files(self):
""" """
Remove base directories 'cuda_ndarray', 'cutils_ext', 'lazylinker_ext' Remove base directories 'cutils_ext', 'lazylinker_ext' and
and 'scan_perform' if present. 'scan_perform' if present.
Note that we do not delete them outright because it may not work on Note that we do not delete them outright because it may not work on
some systems due to these modules being currently in use. Instead we some systems due to these modules being currently in use. Instead we
...@@ -1350,8 +1344,7 @@ class ModuleCache(object): ...@@ -1350,8 +1344,7 @@ class ModuleCache(object):
""" """
with compilelock.lock_ctx(): with compilelock.lock_ctx():
for base_dir in ('cuda_ndarray', 'cutils_ext', 'lazylinker_ext', for base_dir in ('cutils_ext', 'lazylinker_ext', 'scan_perform'):
'scan_perform'):
to_delete = os.path.join(self.dirname, base_dir + '.delete.me') to_delete = os.path.join(self.dirname, base_dir + '.delete.me')
if os.path.isdir(to_delete): if os.path.isdir(to_delete):
try: try:
......
...@@ -215,7 +215,7 @@ class Apply(Node): ...@@ -215,7 +215,7 @@ class Apply(Node):
strict : bool strict : bool
If True, the type fields of all the inputs must be equal If True, the type fields of all the inputs must be equal
to the current ones (or compatible, for instance Tensor / to the current ones (or compatible, for instance Tensor /
CudaNdarray of the same dtype and broadcastable patterns, GpuArray of the same dtype and broadcastable patterns,
in which case they will be converted into current Type), and in which case they will be converted into current Type), and
returned outputs are guaranteed to have the same types as returned outputs are guaranteed to have the same types as
self.outputs. If False, then there's no guarantee that the self.outputs. If False, then there's no guarantee that the
...@@ -307,7 +307,7 @@ class Variable(Node): ...@@ -307,7 +307,7 @@ class Variable(Node):
- `SparseVariable` subclass of Variable that represents - `SparseVariable` subclass of Variable that represents
a scipy.sparse.{csc,csr}_matrix object. a scipy.sparse.{csc,csr}_matrix object.
- `CudaNdarrayVariable` subclass of Variable that represents our object on - `GpuArrayVariable` subclass of Variable that represents our object on
the GPU that is a subset of numpy.ndarray. the GPU that is a subset of numpy.ndarray.
- `RandomVariable`. - `RandomVariable`.
......
...@@ -15,8 +15,6 @@ from theano.gof.graph import ( ...@@ -15,8 +15,6 @@ from theano.gof.graph import (
is_same_graph, Variable) is_same_graph, Variable)
from theano.gof.op import Op from theano.gof.op import Op
from theano.gof.type import Type from theano.gof.type import Type
from theano.sandbox.cuda.var import (
CudaNdarrayVariable, CudaNdarrayConstant, CudaNdarraySharedVariable)
def as_variable(x): def as_variable(x):
...@@ -386,22 +384,6 @@ class TestAutoName: ...@@ -386,22 +384,6 @@ class TestAutoName:
assert r2.auto_name == "auto_" + str(autoname_id + 1) assert r2.auto_name == "auto_" + str(autoname_id + 1)
assert r3.auto_name == "auto_" + str(autoname_id + 2) assert r3.auto_name == "auto_" + str(autoname_id + 2)
def test_cudandarrayvariable(self):
# Get counter value
autoname_id = next(Variable.__count__)
Variable.__count__ = count(autoname_id)
mytype = tensor.TensorType(dtype='int32', broadcastable=())
r1 = CudaNdarrayVariable(type='int32')
r2 = CudaNdarrayVariable(type='int32')
r3 = CudaNdarrayConstant(type=mytype,
data=1)
r4 = CudaNdarraySharedVariable(name='x', type=mytype,
value=1, strict=False)
assert r1.auto_name == "auto_" + str(autoname_id)
assert r2.auto_name == "auto_" + str(autoname_id + 1)
assert r3.auto_name == "auto_" + str(autoname_id + 2)
assert r4.auto_name == "auto_" + str(autoname_id + 3)
def test_randomvariable(self): def test_randomvariable(self):
# Get counter value # Get counter value
autoname_id = next(Variable.__count__) autoname_id = next(Variable.__count__)
......
...@@ -279,21 +279,6 @@ if run_memory_usage_tests: ...@@ -279,21 +279,6 @@ if run_memory_usage_tests:
# these are not normal unit tests, do not run them as part of standard # these are not normal unit tests, do not run them as part of standard
# suite. I ran them while looking at top, and stopped when memory usage # suite. I ran them while looking at top, and stopped when memory usage
# was stable. # was stable.
def test_leak2():
import theano.sandbox.cuda as cuda
for i in xrange(1000000):
n = np.asarray([2.3, 4.5], dtype='f')
c = sys.getrefcount(n)
a = cuda.CudaNdarray(n)
a.sum()
assert c == sys.getrefcount(n)
del a
if not i % 1000:
print('.', end=' ')
print(gc.collect(), end=' ')
print(gc.collect())
sys.stdout.flush()
def test_no_leak_many_graphs(): def test_no_leak_many_graphs():
# Verify no memory leaks when creating and deleting a lot of functions # Verify no memory leaks when creating and deleting a lot of functions
......
...@@ -313,8 +313,9 @@ class PureType(object): ...@@ -313,8 +313,9 @@ class PureType(object):
Convert a symbolic variable into this Type, if compatible. Convert a symbolic variable into this Type, if compatible.
For the moment, the only Types compatible with one another are For the moment, the only Types compatible with one another are
TensorType and CudaNdarrayType, provided they have the same TensorType and GpuArrayType, provided they have the same
number of dimensions, same broadcasting pattern, and same dtype. number of dimensions, same broadcasting pattern, and same
dtype.
If Types are not compatible, a TypeError should be raised. If Types are not compatible, a TypeError should be raised.
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论