提交 1c9ac3df authored 作者: Frederic's avatar Frederic

Test the nvidia driver on reduction when loading the gpu backend.

raise an exception if the test fail. This is because some nvidia driver version don't correctly handle our reduction code.
上级 05b2b619
......@@ -99,6 +99,11 @@ import gof
if config.device.startswith('gpu') or config.init_gpu_device.startswith('gpu'):
import theano.sandbox.cuda
# We can't test the driver during import of theano.sandbox.cuda as
# this cause circular import dependency. So we also test it manually
# after the import
import theano.sandbox.cuda.tests.test_driver
theano.sandbox.cuda.tests.test_driver.test_nvidia_driver1()
# Use config.numpy to call numpy.seterr
import numpy
......
import atexit, logging, os, shutil, stat, sys
import numpy
import theano
from theano.compile import optdb
from theano.gof.cmodule import get_lib_extension
from theano.configparser import config, AddConfigVar, StrParam
......@@ -133,6 +137,9 @@ if cuda_available:
os.symlink(cuda_ndarray_so, libcuda_ndarray_so)
try:
# This only test if the cuda driver is available and if there
# is at least one GPU that support cuda. This do not select a
# device.
gpu_init()
cuda_available = True
cuda_initialization_error_message = ""
......@@ -184,7 +191,8 @@ def use(device,
force=False,
default_to_move_computation_to_gpu=True,
move_shared_float32_to_gpu=True,
enable_cuda=True):
enable_cuda=True,
test_driver=True):
"""
Error and warning about CUDA should be displayed only when this
function is called. We need to be able to load this module only
......@@ -246,10 +254,13 @@ def use(device,
try:
if device != 'gpu':
gpu_init(device)
use.device_number = device
if test_driver:
import theano.sandbox.cuda.tests.test_driver
theano.sandbox.cuda.tests.test_driver.test_nvidia_driver1()
if move_shared_float32_to_gpu:
handle_shared_float32(True)
use.device_number = device
if enable_cuda:
cuda_enabled = True
......@@ -302,8 +313,10 @@ def handle_shared_float32(tf):
else:
raise NotImplementedError('removing our handler')
# We can't test the driver during import here as this cause circular
# import dependency. So we also test it in the file theano/__init__.py
if config.device.startswith('gpu'):
use(device=config.device, force=config.force_device)
use(device=config.device, force=config.force_device, test_driver=False)
elif config.init_gpu_device:
assert config.device == "cpu", (
"We can use the Theano flag init_gpu_device"
......@@ -318,4 +331,4 @@ elif config.init_gpu_device:
force=config.force_device,
default_to_move_computation_to_gpu=False,
move_shared_float32_to_gpu=False,
enable_cuda=False)
enable_cuda=False, test_driver=False)
import numpy
import theano
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.cuda_available == False:
raise SkipTest('Optional package cuda disabled')
import theano.sandbox.cuda as cuda
import theano.sandbox.cuda.basic_ops as B
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
def test_nvidia_driver1():
""" Some nvidia driver give bad result for reduction
This execute some reduction test to ensure it run correctly
"""
a = numpy.random.rand(10000).astype("float32")
A = cuda.shared_constructor(a)
f = theano.function(inputs=[], outputs=A.sum(), mode=mode_with_gpu)
topo = f.maker.env.toposort()
assert len(topo) == 2
assert sum(isinstance(node.op, B.GpuSum) for node in topo) == 1
if not numpy.allclose(f(), a.sum()):
raise Exception("The nvidia driver version installed with the OS "
"don't give good result for reduction."
"Installing the nvidia driver available on the same "
"download page as the cuda package will fix the "
"problem: http://developer.nvidia.com/cuda-downloads")
def test_nvidia_driver2():
""" Test that the gpu device is initialized by theano when
we manually make a shared variable on the gpu.
The driver should always be tested during theano initialization
of the gpu device
"""
a = numpy.random.rand(10000).astype("float32")
cuda.shared_constructor(a)
assert theano.sandbox.cuda.use.device_number is not None
def test_nvidia_driver3():
""" Test that the gpu device is initialized by theano when
we build a function with gpu op.
The driver should always be tested during theano initialization
of the gpu device
"""
var = cuda.fvector()
f = theano.function([var], var + 1, mode=mode_with_gpu)
topo = f.maker.env.toposort()
assert any([isinstance(node.op, cuda.GpuElemwise) for node in topo])
assert theano.sandbox.cuda.use.device_number is not None
# TODO make sure the test_nvidia_driver test are executed when we make manually
# a CudaNdarray like this: cuda.CudaNdarray.zeros((5,4))
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论