提交 b8665e9f authored 作者: Frederic Bastien's avatar Frederic Bastien

better skip message and replace the theano_cuda_ndarray name to theano.sandbox.cuda in logger

上级 0d20ed2e
...@@ -4,7 +4,7 @@ from theano.compile import optdb ...@@ -4,7 +4,7 @@ from theano.compile import optdb
import theano.config as config import theano.config as config
import logging, copy import logging, copy
_logger_name = 'theano_cuda_ndarray' _logger_name = 'theano.sandbox.cuda'
_logger = logging.getLogger(_logger_name) _logger = logging.getLogger(_logger_name)
_logger.setLevel(logging.INFO) _logger.setLevel(logging.INFO)
_logger.addHandler(logging.StreamHandler()) _logger.addHandler(logging.StreamHandler())
...@@ -110,9 +110,9 @@ def use(device=config.THEANO_GPU): ...@@ -110,9 +110,9 @@ def use(device=config.THEANO_GPU):
handle_shared_float32(True) handle_shared_float32(True)
use.device_number = device use.device_number = device
except RuntimeError, e: except RuntimeError, e:
logging.getLogger('theano_cuda_ndarray').warning("WARNING: Won't use the GPU as the initialisation of device %i failed. %s" %(device, e)) logging.getLogger('theano.sandbox.cuda').warning("WARNING: Won't use the GPU as the initialisation of device %i failed. %s" %(device, e))
elif use.device_number != device: elif use.device_number != device:
logging.getLogger('theano_cuda_ndarray').warning("WARNING: ignoring call to use(%s), GPU number %i is already in use." %(str(device), use.device_number)) logging.getLogger('theano.sandbox.cuda').warning("WARNING: ignoring call to use(%s), GPU number %i is already in use." %(str(device), use.device_number))
optdb.add_tags('gpu', optdb.add_tags('gpu',
'fast_run', 'fast_run',
'inplace') 'inplace')
......
...@@ -10,7 +10,7 @@ from theano.sandbox.cuda import filter as type_support_filter ...@@ -10,7 +10,7 @@ from theano.sandbox.cuda import filter as type_support_filter
from theano.sandbox.cuda.elemwise import NaiveAlgo from theano.sandbox.cuda.elemwise import NaiveAlgo
import logging, copy import logging, copy
_logger_name = 'theano_cuda_ndarray.basic_ops' _logger_name = 'theano.sandbox.cuda.basic_ops'
_logger = logging.getLogger(_logger_name) _logger = logging.getLogger(_logger_name)
_logger.setLevel(logging.INFO) _logger.setLevel(logging.INFO)
_logger.addHandler(logging.StreamHandler()) #TO REMOVE _logger.addHandler(logging.StreamHandler()) #TO REMOVE
......
...@@ -11,7 +11,7 @@ from theano import Op, Type, Apply, Variable, Constant ...@@ -11,7 +11,7 @@ from theano import Op, Type, Apply, Variable, Constant
from theano import tensor, scalar from theano import tensor, scalar
import logging, copy import logging, copy
_logger_name = 'theano_cuda_ndarray.elemwise' _logger_name = 'theano.sandbox.cuda.elemwise'
_logger = logging.getLogger(_logger_name) _logger = logging.getLogger(_logger_name)
_logger.setLevel(logging.INFO) _logger.setLevel(logging.INFO)
_logger.addHandler(logging.StreamHandler()) #TO REMOVE _logger.addHandler(logging.StreamHandler()) #TO REMOVE
......
...@@ -3,7 +3,7 @@ from theano.gof.cmodule import (std_libs, std_lib_dirs, std_include_dirs, dlimpo ...@@ -3,7 +3,7 @@ from theano.gof.cmodule import (std_libs, std_lib_dirs, std_include_dirs, dlimpo
get_lib_extension) get_lib_extension)
import theano.config as config import theano.config as config
_logger=logging.getLogger("theano_cuda_ndarray.nvcc_compiler") _logger=logging.getLogger("theano.sandbox.cuda.nvcc_compiler")
_logger.setLevel(logging.WARN) _logger.setLevel(logging.WARN)
def error(*args): def error(*args):
......
...@@ -12,7 +12,7 @@ import theano.tensor as T ...@@ -12,7 +12,7 @@ import theano.tensor as T
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.enable_cuda == False: if cuda_ndarray.enable_cuda == False:
raise SkipTest('Optional package cuda_ndarray not available') raise SkipTest('Optional package cuda disabled')
import theano.sandbox.cuda as tcn import theano.sandbox.cuda as tcn
import theano.sandbox.cuda as cuda import theano.sandbox.cuda as cuda
......
...@@ -271,7 +271,7 @@ def test_bench_elemwise(n_iter=1000, **kwargs): ...@@ -271,7 +271,7 @@ def test_bench_elemwise(n_iter=1000, **kwargs):
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.enable_cuda == False: if cuda_ndarray.enable_cuda == False:
raise SkipTest('Optional package cuda_ndarray not available') raise SkipTest('Optional package cuda disabled')
import theano.sandbox.cuda import theano.sandbox.cuda
theano.sandbox.cuda.use() theano.sandbox.cuda.use()
......
...@@ -9,7 +9,7 @@ import numpy ...@@ -9,7 +9,7 @@ import numpy
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.enable_cuda == False: if cuda_ndarray.enable_cuda == False:
raise SkipTest('Optional package cuda_ndarray not available') raise SkipTest('Optional package cuda disabled')
import theano.sandbox.cuda as tcn import theano.sandbox.cuda as tcn
......
...@@ -4,7 +4,7 @@ import numpy ...@@ -4,7 +4,7 @@ import numpy
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.enable_cuda == False: if cuda_ndarray.enable_cuda == False:
raise SkipTest('Optional package cuda_ndarray not available') raise SkipTest('Optional package cuda disabled')
def py_conv_valid_numpy(img, kern): def py_conv_valid_numpy(img, kern):
assert img.shape[1] == kern.shape[1] assert img.shape[1] == kern.shape[1]
......
...@@ -3,7 +3,7 @@ import theano.sandbox.cuda as cuda_ndarray ...@@ -3,7 +3,7 @@ import theano.sandbox.cuda as cuda_ndarray
# Skip test if cuda_ndarray is not available. # Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
if cuda_ndarray.enable_cuda == False: if cuda_ndarray.enable_cuda == False:
raise SkipTest('Optional package cuda_ndarray not available') raise SkipTest('Optional package cuda disabled')
import numpy import numpy
def test_host_to_device(): def test_host_to_device():
......
...@@ -15,7 +15,7 @@ import numpy ...@@ -15,7 +15,7 @@ import numpy
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.enable_cuda == False: if cuda_ndarray.enable_cuda == False:
raise SkipTest('Optional package cuda_ndarray not available') raise SkipTest('Optional package cuda disabled')
import theano.sandbox.cuda as tcn import theano.sandbox.cuda as tcn
......
...@@ -9,7 +9,7 @@ import numpy ...@@ -9,7 +9,7 @@ import numpy
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.enable_cuda == False: if cuda_ndarray.enable_cuda == False:
raise SkipTest('Optional package cuda_ndarray not available') raise SkipTest('Optional package cuda disabled')
import theano.compile.mode import theano.compile.mode
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论