提交 79f84e0f authored 作者: Frederic's avatar Frederic

[CRASH] skip cudnn tests on gpu that are too old.

上级 01e852ae
......@@ -7,7 +7,7 @@ from theano.gof.type import CDataType
from theano.compat import PY3
from theano.compat.six import StringIO
from theano.sandbox.cuda.type import CudaNdarrayType
from theano.sandbox.cuda import GpuOp
from theano.sandbox.cuda import GpuOp, active_device_number, device_properties
from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable,
gpu_contiguous)
from theano.sandbox.cuda.blas import GpuConv
......@@ -16,6 +16,23 @@ from theano.sandbox.cuda.nnet import GpuSoftmax
from theano.sandbox.cuda.nvcc_compiler import NVCC_compiler
def dnn_available():
if dnn_available.avail is None:
dev = active_device_number()
if device_properties(dev)['major'] < 3:
dnn_available.msg = "Device not supported by cuDNN"
dnn_available.avail = False
else:
dnn_available.msg = "Can not find the cuDNN library"
dnn_available.avail = theano.gof.cmodule.GCC_compiler.try_flags(
["-l", "cudnn"])
return dnn_available.avail
dnn_available.avail = None
dnn_available.msg = None
class DnnBase(GpuOp):
"""
Creates a handle for cudnn and pulls in the cudnn libraries and headers.
......
......@@ -578,8 +578,8 @@ def test_gemm_valid():
def test_dnn_valid():
if cuda.device_properties(cuda.active_device_number())['major'] < 3:
raise SkipTest('Current GPU too old')
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
for t in _test_valid(GpuDnnConv, mode=theano_mode.including("cudnn")):
yield t
......@@ -659,8 +659,8 @@ def test_gemm_full():
def test_dnn_full():
if cuda.device_properties(cuda.active_device_number())['major'] < 3:
raise SkipTest('Current GPU too old')
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
for t in _test_full(GpuDnnConv, mode=theano_mode.including("cudnn")):
yield t
......@@ -711,8 +711,8 @@ def test_gemm_subsample():
def test_dnn_subsample():
if cuda.device_properties(cuda.active_device_number())['major'] < 3:
raise SkipTest('Current GPU too old')
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
for t in _test_subsample(GpuDnnConv, theano_mode.including('cudnn')):
yield t
......@@ -909,6 +909,10 @@ def conv_grad(mode, bs, ch, nf, rImg1, rImg2, rFlt1, rFlt2, subsample, op):
def test_conv_grads():
if cuda.device_properties(cuda.active_device_number())['major'] < 3:
ops = [gemm_op]
else:
ops = [gemm_op, dnn_op]
for mode in 'valid', 'full':
for bs in [1, 5]:
for ch in [4]:
......@@ -918,7 +922,7 @@ def test_conv_grads():
for rFlt1 in [1, 2]:
for rFlt2 in [1, 2]:
for subsample in (1, 1), (1, 2), (2, 2):
for op in [gemm_op, dnn_op]:
for op in ops:
yield (conv_grad, mode, bs, ch, nf,
rImg1, rImg2, rFlt1, rFlt2,
subsample, op)
......
......@@ -301,6 +301,9 @@ class test_SoftMax(unittest.TestCase):
self._cmp(0, 10, f, f_gpu)
def test_cudnn_softmax(self):
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
def cmp(n, m, f, f_gpu):
data = numpy.arange(n * m, dtype='float32').reshape(n, m)
gdata = numpy.asarray(data)[:, :, None, None]
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论