提交 74eb81d0 authored 作者: Frederic Bastien's avatar Frederic Bastien

create a new flag init_gpu_device that select the gpu to use, but don't…

create a new flag init_gpu_device that select the gpu to use, but don't automatically move computation to it.
上级 c2db6174
...@@ -78,7 +78,7 @@ import scalar ...@@ -78,7 +78,7 @@ import scalar
import gradient import gradient
import gof import gof
if config.device.startswith('gpu'): if config.device.startswith('gpu') or config.init_gpu_device.startswith('gpu'):
import theano.sandbox.cuda import theano.sandbox.cuda
## import scalar_opt ## import scalar_opt
......
...@@ -18,10 +18,15 @@ AddConfigVar('floatX', ...@@ -18,10 +18,15 @@ AddConfigVar('floatX',
#gpu mean let the driver select the gpu. Needed in case of gpu in exclusive mode. #gpu mean let the driver select the gpu. Needed in case of gpu in exclusive mode.
#gpuX mean use the gpu number X. #gpuX mean use the gpu number X.
AddConfigVar('device', AddConfigVar('device',
"Default device for computations", "Default device for computations. If gpu, try to move computation to it when possible.",
EnumStr('cpu', 'gpu',*['gpu%i'%i for i in range(4)]) EnumStr('cpu', 'gpu',*['gpu%i'%i for i in range(4)])
) )
AddConfigVar('init_gpu_device',
"Gpu device to use for computations, but don't automatically try to move the computation to this device. Usefull to run the test on a specific gpu.",
EnumStr('', *['gpu%i'%i for i in range(4)])
)
AddConfigVar('force_device', AddConfigVar('force_device',
"Raise an error if we can't use the specified device", "Raise an error if we can't use the specified device",
BoolParam(False) BoolParam(False)
......
...@@ -130,7 +130,7 @@ if cuda_available: ...@@ -130,7 +130,7 @@ if cuda_available:
import cuda_ndarray import cuda_ndarray
def use(device, force=False): def use(device, force=False, move_to_gpu_automatically = True):
global cuda_enabled, cuda_initialization_error_message global cuda_enabled, cuda_initialization_error_message
if force and not cuda_available and device.startswith('gpu'): if force and not cuda_available and device.startswith('gpu'):
raise EnvironmentError("You forced use of device %s, but CUDA initialization failed " raise EnvironmentError("You forced use of device %s, but CUDA initialization failed "
...@@ -172,6 +172,7 @@ def use(device, force=False): ...@@ -172,6 +172,7 @@ def use(device, force=False):
elif use.device_number != device: elif use.device_number != device:
_logger.warning("WARNING: ignoring call to use(%s), GPU number %i is already in use." %(str(device), use.device_number)) _logger.warning("WARNING: ignoring call to use(%s), GPU number %i is already in use." %(str(device), use.device_number))
if move_to_gpu_automatically:
optdb.add_tags('gpu', optdb.add_tags('gpu',
'fast_run', 'fast_run',
'inplace') 'inplace')
...@@ -201,3 +202,6 @@ def handle_shared_float32(tf): ...@@ -201,3 +202,6 @@ def handle_shared_float32(tf):
if config.device.startswith('gpu'): if config.device.startswith('gpu'):
use(config.device, config.force_device) use(config.device, config.force_device)
elif config.init_gpu_device:
print "Will init the gpu to use a specific gpu device. This don't move automatically cpu code to gpu. For that try the theano flags device."
use(config.init_gpu_device, config.force_device, False)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论