提交 9a7c799b authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #6146 from slefrancois/pr_win

add windows pr tests
......@@ -17,6 +17,9 @@ export PATH=/usr/local/cuda/bin:${PATH}
export DYLD_LIBRARY_PATH=/usr/local/cuda/lib:${DYLD_LIBRARY_PATH}
export CPLUS_INCLUDE_PATH=/usr/local/cuda/include:${HOME}/cuda/include:${CPLUS_INCLUDE_PATH}
# CUDNN
export CUDNNPATH=${HOME}/cuda
# Build libgpuarray
GPUARRAY_CONFIG="Release"
DEVICE=cuda
......@@ -61,4 +64,4 @@ set -x
# Fast run and float32
FILE=${BUILDBOT_DIR}/theano_python_fastrun_f32_tests.xml
NAME=mac_fastrun_f32
THEANO_FLAGS=$THEANO_FLAGS,base_compiledir=$BASECOMPILEDIR,mode=FAST_RUN,warn.ignore_bug_before=all,on_opt_error=raise,on_shape_error=raise,floatX=float32,dnn.library_path=${HOME}/cuda/lib,gcc.cxxflags="-L${LIBDIR}/lib" python bin/theano-nose ${THEANO_PARAM} ${XUNIT}${FILE} ${SUITE}${NAME}
\ No newline at end of file
THEANO_FLAGS=$THEANO_FLAGS,base_compiledir=$BASECOMPILEDIR,mode=FAST_RUN,warn.ignore_bug_before=all,on_opt_error=raise,on_shape_error=raise,floatX=float32,dnn.base_path=${CUDNNPATH},gcc.cxxflags="-L${LIBDIR}/lib" python bin/theano-nose ${THEANO_PARAM} ${XUNIT}${FILE} ${SUITE}${NAME}
\ No newline at end of file
REM CUDNN PATH
set CUDNNPATH=C:\lib\cuda
REM Set conda python, cuda, cmake path
set PATH=%PATH%;C:\ProgramData\Miniconda2;%CUDNNPATH%\bin;C:\Program Files\CMake\bin
set BUILDBOT_DIR=%WORKSPACE%\nightly_build
set COMPILEDIR=C:\\Jenkins\\theano_cache\\buildbot_windows
set COMPILEDIR=C:\Jenkins\theano_cache\buildbot_windows
REM Set test reports using nosetests xunit
set XUNIT=--with-xunit --xunit-file=
......@@ -13,6 +19,7 @@ REM Build libgpuarray
set GPUARRAY_CONFIG="Release"
set DEVICE=cuda
set LIBDIR=%WORKSPACE%\local
set PATH=%PATH%;%LIBDIR%\bin
REM Make fresh clones of libgpuarray (with no history since we dont need it)
rmdir libgpuarray /s/q
......@@ -23,18 +30,14 @@ rmdir %LIBDIR% /s/q
mkdir %LIBDIR%
REM Build libgpuarray
set PATH=%PATH%;C:\Program Files\CMake\bin
mkdir libgpuarray\build
cd libgpuarray\build
cmake .. -DCMAKE_BUILD_TYPE=%GPUARRAY_CONFIG% -G "NMake Makefiles"
cmake .. -DCMAKE_BUILD_TYPE=%GPUARRAY_CONFIG% -G "NMake Makefiles" -DCMAKE_INSTALL_PREFIX=%LIBDIR%
nmake
cmake --build . --target install
cd ..\..
REM Copy lib and export paths
C:\Windows\System32\robocopy /E libgpuarray C:\lib\libgpuarray > nul
set PATH=%PATH%;C:\libgpuarray\lib;C:\lib\cuda\bin
REM Set conda python path
REM Set conda gcc path
set PATH=%PATH%;C:\ProgramData\Miniconda2;C:\ProgramData\Miniconda2\Library\mingw-w64\bin;C:\ProgramData\Miniconda2\Library\usr\bin;C:\ProgramData\Miniconda2\Library\bin;C:\ProgramData\Miniconda2\Scripts
REM Build the pygpu modules
......@@ -52,5 +55,5 @@ echo "Directory of stdout/stderr %BUILDBOT_DIR%"
REM Fast run and float32
set FILE=%BUILDBOT_DIR%\theano_python2_fastrun_f32_tests.xml
set NAME=win_fastrun_f32
set THEANO_FLAGS=%THEANO_FLAGS%,compiledir=%COMPILEDIR%,mode=FAST_RUN,warn.ignore_bug_before=all,on_opt_error=raise,on_shape_error=raise,floatX=float32,dnn.include_path=C:\\lib\\cuda\\include,dnn.library_path=C:\\lib\\cuda\\lib\\x64,gcc.cxxflags='-I"C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\include" -I"C:\\lib\\libgpuarray\\src" -L"C:\\Program Files\\NVIDIA GPU Computing Toolkit\CUDA\\v8.0\\lib\\x64" -LC:\\lib\\libgpuarray\\lib'
set THEANO_FLAGS=%THEANO_FLAGS%,compiledir=%COMPILEDIR:\=\\%,mode=FAST_RUN,warn.ignore_bug_before=all,on_opt_error=raise,on_shape_error=raise,floatX=float32,dnn.base_path="%CUDNNPATH%",gcc.cxxflags='-I%LIBDIR:\=\\%\\include -L%LIBDIR:\=\\%\\lib'
python bin\theano-nose %THEANO_PARAM% %XUNIT%%FILE% %SUITE%%NAME%
......@@ -18,6 +18,9 @@ export PATH=/usr/local/cuda/bin:${PATH}
export DYLD_LIBRARY_PATH=/usr/local/cuda/lib:${DYLD_LIBRARY_PATH}
export CPLUS_INCLUDE_PATH=/usr/local/cuda/include:${HOME}/cuda/include:${CPLUS_INCLUDE_PATH}
# CUDNN
export CUDNNPATH=${HOME}/cuda
# Build libgpuarray
GPUARRAY_CONFIG="Release"
DEVICE=cuda
......@@ -53,5 +56,5 @@ python -c 'import pygpu; print(pygpu.__file__)'
# Testing theano
THEANO_PARAM="theano --with-timer --timer-top-n 10 --with-xunit --xunit-file=theano_mac_pr_tests.xml"
FLAGS=init_gpu_device=$DEVICE,gpuarray.preallocate=1000,mode=FAST_RUN,on_opt_error=raise,on_shape_error=raise,cmodule.age_thresh_use=604800,base_compiledir=$BASECOMPILEDIR,dnn.library_path=$HOME/cuda/lib,gcc.cxxflags="-L${LIBDIR}/lib"
FLAGS=init_gpu_device=$DEVICE,gpuarray.preallocate=1000,mode=FAST_RUN,on_opt_error=raise,on_shape_error=raise,cmodule.age_thresh_use=604800,base_compiledir=$BASECOMPILEDIR,dnn.base_path=${CUDNNPATH},gcc.cxxflags="-L${LIBDIR}/lib"
THEANO_FLAGS=${FLAGS} python bin/theano-nose ${THEANO_PARAM}
REM CUDNN PATH
set CUDNNPATH=C:\lib\cuda
REM Set conda python, cuda, cmake path
set PATH=%PATH%;C:\ProgramData\Miniconda2;%CUDNNPATH%\bin;C:\Program Files\CMake\bin
REM Set cache dir and copy from master
set COMPILEDIR=%WORKSPACE%\cache
C:\Windows\System32\robocopy /E /purge C:\Jenkins\theano_cache\buildbot_windows %COMPILEDIR% > nul
set THEANO_FLAGS=init_gpu_device=cuda
REM Build libgpuarray
set GPUARRAY_CONFIG="Release"
set DEVICE=cuda
set LIBDIR=%WORKSPACE%\local
set PATH=%PATH%;%LIBDIR%\bin
REM Make fresh clones of libgpuarray (with no history since we dont need it)
rmdir libgpuarray /s/q
git clone --depth 1 "https://github.com/Theano/libgpuarray.git"
REM Clean up previous installs (to make sure no old files are left)
rmdir %LIBDIR% /s/q
mkdir %LIBDIR%
REM Build libgpuarray
rmdir libgpuarray\build /s/q
mkdir libgpuarray\build
cd libgpuarray\build
cmake .. -DCMAKE_BUILD_TYPE=%GPUARRAY_CONFIG% -G "NMake Makefiles" -DCMAKE_INSTALL_PREFIX=%LIBDIR%
nmake
cmake --build . --target install
cd ..\..
REM Add conda gcc toolchain path
set PATH=%PATH%;C:\ProgramData\Miniconda2\Library\mingw-w64\bin;C:\ProgramData\Miniconda2\Library\usr\bin;C:\ProgramData\Miniconda2\Library\bin;C:\ProgramData\Miniconda2\Scripts
REM Build the pygpu modules
cd libgpuarray
python setup.py build_ext --inplace
mkdir %LIBDIR%\lib\python
set PYTHONPATH=%PYTHONPATH%;%LIBDIR%\lib\python
REM Then install
python setup.py install --home=%LIBDIR%
cd ..
set THEANO_PARAM=theano --with-timer --timer-top-n 10 --with-xunit --xunit-file=theano_win_pr_tests.xml
set NAME=pr_win
set THEANO_FLAGS=%THEANO_FLAGS%,mode=FAST_RUN,floatX=float32,on_opt_error=raise,on_shape_error=raise,cmodule.age_thresh_use=604800,compiledir=%COMPILEDIR:\=\\%,dnn.base_path="%CUDNNPATH%",gcc.cxxflags='-I%LIBDIR:\=\\%\\include -L%LIBDIR:\=\\%\\lib'
python bin\theano-nose %THEANO_PARAM% --xunit-testsuite-name=%NAME%
......@@ -232,6 +232,41 @@ AddConfigVar('gpuarray.single_stream',
in_c_key=False)
def get_cuda_root():
# We look for the cuda path since we need headers from there
v = os.getenv('CUDA_ROOT', "")
if v:
return v
v = os.getenv('CUDA_PATH', "")
if v:
return v
s = os.getenv("PATH")
if not s:
return ''
for dir in s.split(os.path.pathsep):
if os.path.exists(os.path.join(dir, "nvcc")):
return os.path.dirname(os.path.abspath(dir))
return ''
AddConfigVar('cuda.root',
"Location of the cuda installation",
StrParam(get_cuda_root),
in_c_key=False)
def default_cuda_include():
if theano.config.cuda.root:
return os.path.join(theano.config.cuda.root, 'include')
return ''
AddConfigVar('cuda.include_path',
"Location of the cuda includes",
StrParam(default_cuda_include),
in_c_key=False)
def safe_no_dnn_workmem(workmem):
"""
Make sure the user is not attempting to use dnn.conv.workmem`.
......@@ -325,39 +360,68 @@ AddConfigVar('dnn.conv.precision',
in_c_key=False)
def get_cuda_root():
v = os.getenv('CUDA_ROOT', "")
if v:
return v
s = os.getenv("PATH")
if not s:
return ''
for dir in s.split(os.path.pathsep):
if os.path.exists(os.path.join(dir, "nvcc")):
return os.path.dirname(os.path.abspath(dir))
# We want to default to the cuda root if cudnn is installed there
def default_dnn_base_path():
root = get_cuda_root()
# The include doesn't change location between OS.
if root and os.path.exists(os.path.join(root, 'include', 'cudnn.h')):
return root
return ''
def default_dnn_include_path():
cuda_root = get_cuda_root()
if cuda_root == '':
return ''
return os.path.join(cuda_root, 'include')
AddConfigVar('dnn.base_path',
"Install location of cuDNN.",
StrParam(default_dnn_base_path),
in_c_key=False)
def default_dnn_inc_path():
if theano.config.dnn.base_path != '':
return os.path.join(theano.config.dnn.base_path, 'include')
return ''
AddConfigVar('dnn.include_path',
"Location of the cudnn header (defaults to the cuda root)",
# We keep the default here since cudnn needs headers from cuda
StrParam(default_dnn_include_path()),
# Added elsewhere in the c key only when needed.
"Location of the cudnn header",
StrParam(default_dnn_inc_path),
in_c_key=False)
def default_dnn_lib_path():
if theano.config.dnn.base_path != '':
path = os.path.join(theano.config.dnn.base_path, 'lib')
if sys.platform == 'win32':
path = os.path.join(path, 'x64')
return path
return ''
AddConfigVar('dnn.library_path',
"Location of the cudnn library (defaults to the cuda root)",
StrParam(''),
# Added elsewhere in the c key only when needed.
"Location of the cudnn link library.",
StrParam(default_dnn_lib_path),
in_c_key=False)
def default_dnn_bin_path():
if type(theano.config.dnn).base_path.is_default:
return ''
else:
if theano.config.dnn.base_path != '':
if sys.platform == 'win32':
return os.path.join(theano.config.dnn.base_path, 'bin')
else:
return theano.config.dnn.library_path
return ''
AddConfigVar('dnn.bin_path',
"Location of the cuDNN load library "
"(on non-windows platforms, "
"this is the same as dnn.library_path)",
StrParam(default_dnn_bin_path),
in_c_key=False)
AddConfigVar('dnn.enabled',
"'auto', use cuDNN if available, but silently fall back"
" to not using it if not present."
......
......@@ -1065,7 +1065,8 @@ class CLinker(link.Linker):
ret += x.c_header_dirs()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
# filter out empty strings/None
return [r for r in utils.uniq(ret) if r]
def libraries(self):
"""
......@@ -1107,7 +1108,8 @@ class CLinker(link.Linker):
ret += x.c_lib_dirs()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
# filter out empty strings/None
return [r for r in utils.uniq(ret) if r]
def __compile__(self, input_storage=None, output_storage=None,
storage_map=None, keep_lock=False):
......
......@@ -658,28 +658,21 @@ class CDataType(Type):
"""
__props__ = ('ctype', 'freefunc', 'headers', 'header_dirs',
'libraries', 'lib_dirs', 'extra_support_code',
'version')
'compile_args', 'version')
def __init__(self, ctype, freefunc=None, headers=None, header_dirs=None,
libraries=None, lib_dirs=None, extra_support_code="",
version=None):
def __init__(self, ctype, freefunc=None, headers=(), header_dirs=(),
libraries=(), lib_dirs=(), compile_args=(),
extra_support_code="", version=None):
assert isinstance(ctype, string_types)
self.ctype = ctype
if freefunc is not None:
assert isinstance(freefunc, string_types)
self.freefunc = freefunc
if headers is None:
headers = ()
self.headers = tuple(headers)
if header_dirs is None:
header_dirs = ()
self.header_dirs = tuple(header_dirs)
if libraries is None:
libraries = ()
self.libraries = tuple(libraries)
if lib_dirs is None:
lib_dirs = ()
self.lib_dirs = tuple(lib_dirs)
self.compile_args = tuple(compile_args)
self.extra_support_code = extra_support_code
self._fn = None
self.version = None
......@@ -787,6 +780,9 @@ if (py_%(name)s == NULL) { %(freefunc)s(%(name)s); }
def c_lib_dirs(self):
return self.lib_dirs
def c_compile_args(self):
return self.compile_args
def c_code_cache_version(self):
v = (3, )
if self.version is not None:
......
......@@ -72,16 +72,16 @@ def _dnn_lib():
if _dnn_lib.handle is None:
import ctypes.util
if config.dnn.library_path != "":
if config.dnn.bin_path != "":
if sys.platform == 'darwin':
dnn_handle = _load_lib(os.path.join(config.dnn.library_path, 'libcudnn.dylib'))
dnn_handle = _load_lib(os.path.join(config.dnn.bin_path, 'libcudnn.dylib'))
elif sys.platform == 'win32':
for name in WIN32_CUDNN_NAMES:
dnn_handle = _load_lib(os.path.join(config.dnn.library_path, name))
dnn_handle = _load_lib(os.path.join(config.dnn.bin_path, name))
if dnn_handle is not None:
break
else:
dnn_handle = _load_lib(os.path.join(config.dnn.library_path, 'libcudnn.so'))
dnn_handle = _load_lib(os.path.join(config.dnn.bin_path, 'libcudnn.so'))
else:
lib_name = ctypes.util.find_library('cudnn')
if lib_name is None and sys.platform == 'win32':
......@@ -135,12 +135,13 @@ if ((err = cudnnCreate(&_handle)) != CUDNN_STATUS_SUCCESS) {
}
"""
params = ["-l", "cudnn", "-I" + os.path.dirname(__file__)]
path_wrapper = "\"" if os.name == 'nt' else ""
params = ["-l", "cudnn"]
params.extend(['-I%s%s%s' % (path_wrapper, os.path.dirname(__file__), path_wrapper)])
if config.dnn.include_path:
params.extend(['-I%s%s%s' % (path_wrapper, config.dnn.include_path, path_wrapper)])
if config.cuda.include_path:
params.extend(['-I%s%s%s' % (path_wrapper, config.cuda.include_path, path_wrapper)])
if config.dnn.library_path:
params.extend(['-L%s%s%s' % (path_wrapper, config.dnn.library_path, path_wrapper)])
# Do not run here the test program. It would run on the
......@@ -223,6 +224,24 @@ def dnn_available(context_name):
dnn_available.msg = None
def CUDNNDataType(name, freefunc=None):
cargs = []
if config.dnn.bin_path:
if sys.platform == 'darwin':
cargs.append('-Wl,-rpath,' + config.dnn.bin_path)
else:
cargs.append('-Wl,-rpath,"' + config.dnn.bin_path + '"')
return CDataType(name, freefunc,
headers=['cudnn.h'],
header_dirs=[config.dnn.include_path,
config.cuda.include_path],
libraries=['cudnn'],
lib_dirs=[config.dnn.library_path],
compile_args=cargs,
version=version(raises=False))
class DnnVersion(Op):
__props__ = ()
......@@ -230,22 +249,20 @@ class DnnVersion(Op):
return ['cudnn.h']
def c_header_dirs(self):
return [config.dnn.include_path] if config.dnn.include_path else []
return [config.dnn.include_path, config.cuda.include_path]
def c_libraries(self):
return ['cudnn']
def c_lib_dirs(self):
if config.dnn.library_path:
return [config.dnn.library_path]
return []
return [config.dnn.library_path]
def c_compile_args(self):
if config.dnn.library_path:
if config.dnn.bin_path:
if sys.platform == 'darwin':
return ['-Wl,-rpath,' + config.dnn.library_path]
return ['-Wl,-rpath,' + config.dnn.bin_path]
else:
return ['-Wl,-rpath,"' + config.dnn.library_path + '"']
return ['-Wl,-rpath,"' + config.dnn.bin_path + '"']
return []
def c_support_code(self):
......@@ -305,12 +322,7 @@ def version(raises=True):
return version.v
version.v = None
handle_type = CDataType('cudnnHandle_t', 'cudnnDestroy',
headers=['cudnn.h'],
header_dirs=[config.dnn.include_path],
libraries=['cudnn'],
lib_dirs=[config.dnn.library_path],
version=version(raises=False))
handle_type = CUDNNDataType('cudnnHandle_t', 'cudnnDestroy')
# Get cuDNN definitions to be used.
cudnn = cudnn_defs.get_definitions(version(raises=False))
......@@ -368,25 +380,21 @@ class DnnBase(COp):
'gpuarray_helper.h']
def c_header_dirs(self):
dirs = [os.path.dirname(__file__), pygpu.get_include()]
if config.dnn.include_path:
dirs.append(config.dnn.include_path)
return dirs
return [os.path.dirname(__file__), pygpu.get_include(),
config.dnn.include_path, config.cuda.include_path]
def c_libraries(self):
return ['cudnn', 'gpuarray']
def c_lib_dirs(self):
if config.dnn.library_path:
return [config.dnn.library_path]
return []
return [config.dnn.library_path]
def c_compile_args(self):
if config.dnn.library_path:
if config.dnn.bin_path:
if sys.platform == 'darwin':
return ['-Wl,-rpath,' + config.dnn.library_path]
return ['-Wl,-rpath,' + config.dnn.bin_path]
else:
return ['-Wl,-rpath,"' + config.dnn.library_path + '"']
return ['-Wl,-rpath,"' + config.dnn.bin_path + '"']
return []
def c_code_cache_version(self):
......@@ -418,7 +426,8 @@ class GpuDnnConvDesc(COp):
return ['cudnn.h', 'cudnn_helper.h']
def c_header_dirs(self):
return [os.path.dirname(__file__), config.dnn.include_path]
return [os.path.dirname(__file__), config.dnn.include_path,
config.cuda.include_path]
def c_libraries(self):
return ['cudnn']
......@@ -426,6 +435,14 @@ class GpuDnnConvDesc(COp):
def c_lib_dirs(self):
return [config.dnn.library_path]
def c_compile_args(self):
if config.dnn.bin_path:
if sys.platform == 'darwin':
return ['-Wl,-rpath,' + config.dnn.bin_path]
else:
return ['-Wl,-rpath,"' + config.dnn.bin_path + '"']
return []
def do_constant_folding(self, node):
return False
......@@ -466,9 +483,8 @@ class GpuDnnConvDesc(COp):
kern_shape = theano.tensor.basic.cast(kern_shape, 'int64')
node = Apply(self, [kern_shape],
[CDataType("cudnnConvolutionDescriptor_t",
freefunc="cudnnDestroyConvolutionDescriptor",
version=version(raises=False))()])
[CUDNNDataType("cudnnConvolutionDescriptor_t",
freefunc="cudnnDestroyConvolutionDescriptor")()])
# DebugMode cannot compare the values of CDataType variables, so by
# default it returns False all the time. To prevent DebugMode from
# complaining because of the MergeOptimizer, we make this variable
......@@ -1278,9 +1294,8 @@ class GpuDnnPoolDesc(Op):
def make_node(self):
node = Apply(self, [],
[CDataType("cudnnPoolingDescriptor_t",
freefunc="cudnnDestroyPoolingDescriptor",
version=version(raises=False))()])
[CUDNNDataType("cudnnPoolingDescriptor_t",
freefunc="cudnnDestroyPoolingDescriptor")()])
# DebugMode cannot compare the values of CDataType variables, so by
# default it returns False all the time. To prevent DebugMode from
# complaining because of the MergeOptimizer, we make this variable
......@@ -1938,9 +1953,8 @@ class GpuDnnBatchNormGrad(DnnBase):
return [shape[0], shape[2], shape[2]]
gpudata_type = CDataType('gpudata *', 'gpudata_release')
dropoutdesc_type = CDataType('cudnnDropoutDescriptor_t',
'cudnnDestroyDropoutDescriptor',
version=version(raises=False))
dropoutdesc_type = CUDNNDataType('cudnnDropoutDescriptor_t',
'cudnnDestroyDropoutDescriptor')
class GpuDnnDropoutOp(DnnBase):
......@@ -2008,9 +2022,8 @@ def dropout(x, dropout=0.0, seed=4242):
y, odesc = GpuDnnDropoutOp()(x, desc)
return y, desc, odesc, states
rnndesc_type = CDataType('cudnnRNNDescriptor_t',
'cudnnDestroyRNNDescriptor',
version=version(raises=False))
rnndesc_type = CUDNNDataType('cudnnRNNDescriptor_t',
'cudnnDestroyRNNDescriptor')
def as_i32(v):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论