Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
1e500bd6
提交
1e500bd6
authored
9月 29, 2015
作者:
Arnaud Bergeron
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Move the cuda and dnn flags in configdefaults since the later depends on the former.
Otherwise it is possible to get into a situation where we use the dnn flags, but the cuda ones aren't defined and stuff breaks.
上级
201b4610
显示空白字符变更
内嵌
并排
正在显示
5 个修改的文件
包含
101 行增加
和
116 行删除
+101
-116
configdefaults.py
theano/configdefaults.py
+98
-0
dnn.py
theano/sandbox/cuda/dnn.py
+0
-2
nvcc_compiler.py
theano/sandbox/cuda/nvcc_compiler.py
+3
-70
dnn_flags.py
theano/sandbox/dnn_flags.py
+0
-42
dnn.py
theano/sandbox/gpuarray/dnn.py
+0
-2
没有找到文件。
theano/configdefaults.py
浏览文件 @
1e500bd6
...
@@ -118,6 +118,104 @@ AddConfigVar(
...
@@ -118,6 +118,104 @@ AddConfigVar(
in_c_key
=
False
)
in_c_key
=
False
)
def
default_cuda_root
():
v
=
os
.
getenv
(
'CUDA_ROOT'
,
""
)
if
v
:
return
v
s
=
os
.
getenv
(
"PATH"
)
if
not
s
:
return
''
for
dir
in
s
.
split
(
os
.
path
.
pathsep
):
if
os
.
path
.
exists
(
os
.
path
.
join
(
dir
,
"nvcc"
)):
return
os
.
path
.
split
(
dir
)[
0
]
return
''
AddConfigVar
(
'cuda.root'
,
"""directory with bin/, lib/, include/ for cuda utilities.
This directory is included via -L and -rpath when linking
dynamically compiled modules. If AUTO and nvcc is in the
path, it will use one of nvcc parent directory. Otherwise
/usr/local/cuda will be used. Leave empty to prevent extra
linker directives. Default: environment variable "CUDA_ROOT"
or else "AUTO".
"""
,
StrParam
(
default_cuda_root
),
in_c_key
=
False
)
def
filter_nvcc_flags
(
s
):
assert
isinstance
(
s
,
str
)
flags
=
[
flag
for
flag
in
s
.
split
(
' '
)
if
flag
]
if
any
([
f
for
f
in
flags
if
not
f
.
startswith
(
"-"
)]):
raise
ValueError
(
"Theano nvcc.flags support only parameter/value pairs without"
" space between them. e.g.: '--machine 64' is not supported,"
" but '--machine=64' is supported. Please add the '=' symbol."
" nvcc.flags value is '
%
s'"
%
s
)
return
' '
.
join
(
flags
)
AddConfigVar
(
'nvcc.flags'
,
"Extra compiler flags for nvcc"
,
ConfigParam
(
""
,
filter_nvcc_flags
),
# Not needed in c key as it is already added.
# We remove it as we don't make the md5 of config to change
# if theano.sandbox.cuda is loaded or not.
in_c_key
=
False
)
AddConfigVar
(
'nvcc.compiler_bindir'
,
"If defined, nvcc compiler driver will seek g++ and gcc"
" in this directory"
,
StrParam
(
""
),
in_c_key
=
False
)
AddConfigVar
(
'nvcc.fastmath'
,
""
,
BoolParam
(
False
),
# Not needed in c key as it is already added.
# We remove it as we don't make the md5 of config to change
# if theano.sandbox.cuda is loaded or not.
in_c_key
=
False
)
AddConfigVar
(
'dnn.conv.workmem'
,
"This flag is deprecated; use dnn.conv.algo_fwd."
,
EnumStr
(
''
),
in_c_key
=
False
)
AddConfigVar
(
'dnn.conv.workmem_bwd'
,
"This flag is deprecated; use dnn.conv.algo_bwd."
,
EnumStr
(
''
),
in_c_key
=
False
)
AddConfigVar
(
'dnn.conv.algo_fwd'
,
"Default implementation to use for CuDNN forward convolution."
,
EnumStr
(
'small'
,
'none'
,
'large'
,
'fft'
,
'guess_once'
,
'guess_on_shape_change'
,
'time_once'
,
'time_on_shape_change'
),
in_c_key
=
False
)
AddConfigVar
(
'dnn.conv.algo_bwd'
,
"Default implementation to use for CuDNN backward convolution."
,
EnumStr
(
'none'
,
'deterministic'
,
'fft'
,
'guess_once'
,
'guess_on_shape_change'
,
'time_once'
,
'time_on_shape_change'
),
in_c_key
=
False
)
def
default_dnn_path
(
suffix
):
def
f
(
suffix
=
suffix
):
if
config
.
cuda
.
root
==
''
:
return
''
return
os
.
path
.
join
(
config
.
cuda
.
root
,
suffix
)
return
f
AddConfigVar
(
'dnn.include_path'
,
"Location of the cudnn header (defaults to the cuda root)"
,
StrParam
(
default_dnn_path
(
'include'
)))
AddConfigVar
(
'dnn.library_path'
,
"Location of the cudnn header (defaults to the cuda root)"
,
StrParam
(
default_dnn_path
(
'lib64'
)))
# This flag determines whether or not to raise error/warning message if
# This flag determines whether or not to raise error/warning message if
# there is a CPU Op in the computational graph.
# there is a CPU Op in the computational graph.
AddConfigVar
(
AddConfigVar
(
...
...
theano/sandbox/cuda/dnn.py
浏览文件 @
1e500bd6
...
@@ -27,8 +27,6 @@ from theano.sandbox.cuda import gpu_seqopt, register_opt
...
@@ -27,8 +27,6 @@ from theano.sandbox.cuda import gpu_seqopt, register_opt
from
theano.sandbox.cuda.nvcc_compiler
import
NVCC_compiler
from
theano.sandbox.cuda.nvcc_compiler
import
NVCC_compiler
import
theano.sandbox.dnn_flags
def
dnn_available
():
def
dnn_available
():
if
dnn_available
.
avail
is
None
:
if
dnn_available
.
avail
is
None
:
...
...
theano/sandbox/cuda/nvcc_compiler.py
浏览文件 @
1e500bd6
...
@@ -8,6 +8,7 @@ import warnings
...
@@ -8,6 +8,7 @@ import warnings
import
numpy
import
numpy
from
theano
import
config
from
theano.compat
import
decode
,
decode_iter
from
theano.compat
import
decode
,
decode_iter
from
theano.gof
import
local_bitwidth
from
theano.gof
import
local_bitwidth
from
theano.gof.utils
import
hash_from_file
from
theano.gof.utils
import
hash_from_file
...
@@ -19,67 +20,6 @@ from theano.misc.windows import output_subprocess_Popen
...
@@ -19,67 +20,6 @@ from theano.misc.windows import output_subprocess_Popen
_logger
=
logging
.
getLogger
(
"theano.sandbox.cuda.nvcc_compiler"
)
_logger
=
logging
.
getLogger
(
"theano.sandbox.cuda.nvcc_compiler"
)
from
theano.configparser
import
(
config
,
AddConfigVar
,
StrParam
,
BoolParam
,
ConfigParam
)
AddConfigVar
(
'nvcc.compiler_bindir'
,
"If defined, nvcc compiler driver will seek g++ and gcc"
" in this directory"
,
StrParam
(
""
),
in_c_key
=
False
)
user_provided_cuda_root
=
True
def
default_cuda_root
():
global
user_provided_cuda_root
v
=
os
.
getenv
(
'CUDA_ROOT'
,
""
)
user_provided_cuda_root
=
False
if
v
:
return
v
return
find_cuda_root
()
AddConfigVar
(
'cuda.root'
,
"""directory with bin/, lib/, include/ for cuda utilities.
This directory is included via -L and -rpath when linking
dynamically compiled modules. If AUTO and nvcc is in the
path, it will use one of nvcc parent directory. Otherwise
/usr/local/cuda will be used. Leave empty to prevent extra
linker directives. Default: environment variable "CUDA_ROOT"
or else "AUTO".
"""
,
StrParam
(
default_cuda_root
),
in_c_key
=
False
)
def
filter_nvcc_flags
(
s
):
assert
isinstance
(
s
,
str
)
flags
=
[
flag
for
flag
in
s
.
split
(
' '
)
if
flag
]
if
any
([
f
for
f
in
flags
if
not
f
.
startswith
(
"-"
)]):
raise
ValueError
(
"Theano nvcc.flags support only parameter/value pairs without"
" space between them. e.g.: '--machine 64' is not supported,"
" but '--machine=64' is supported. Please add the '=' symbol."
" nvcc.flags value is '
%
s'"
%
s
)
return
' '
.
join
(
flags
)
AddConfigVar
(
'nvcc.flags'
,
"Extra compiler flags for nvcc"
,
ConfigParam
(
""
,
filter_nvcc_flags
),
# Not needed in c key as it is already added.
# We remove it as we don't make the md5 of config to change
# if theano.sandbox.cuda is loaded or not.
in_c_key
=
False
)
AddConfigVar
(
'nvcc.fastmath'
,
""
,
BoolParam
(
False
),
# Not needed in c key as it is already added.
# We remove it as we don't make the md5 of config to change
# if theano.sandbox.cuda is loaded or not.
in_c_key
=
False
)
nvcc_path
=
'nvcc'
nvcc_path
=
'nvcc'
nvcc_version
=
None
nvcc_version
=
None
...
@@ -115,14 +55,6 @@ def is_nvcc_available():
...
@@ -115,14 +55,6 @@ def is_nvcc_available():
return
False
return
False
def
find_cuda_root
():
s
=
os
.
getenv
(
"PATH"
)
if
not
s
:
return
for
dir
in
s
.
split
(
os
.
path
.
pathsep
):
if
os
.
path
.
exists
(
os
.
path
.
join
(
dir
,
"nvcc"
)):
return
os
.
path
.
split
(
dir
)[
0
]
rpath_defaults
=
[]
rpath_defaults
=
[]
...
@@ -357,7 +289,8 @@ class NVCC_compiler(Compiler):
...
@@ -357,7 +289,8 @@ class NVCC_compiler(Compiler):
# provided an cuda.root flag, we need to add one, but
# provided an cuda.root flag, we need to add one, but
# otherwise, we don't add it. See gh-1540 and
# otherwise, we don't add it. See gh-1540 and
# https://wiki.debian.org/RpathIssue for details.
# https://wiki.debian.org/RpathIssue for details.
if
(
user_provided_cuda_root
and
if
(
not
type
(
config
.
cuda
)
.
root
.
is_default
and
os
.
path
.
exists
(
os
.
path
.
join
(
config
.
cuda
.
root
,
'lib'
))):
os
.
path
.
exists
(
os
.
path
.
join
(
config
.
cuda
.
root
,
'lib'
))):
rpaths
.
append
(
os
.
path
.
join
(
config
.
cuda
.
root
,
'lib'
))
rpaths
.
append
(
os
.
path
.
join
(
config
.
cuda
.
root
,
'lib'
))
...
...
theano/sandbox/dnn_flags.py
deleted
100644 → 0
浏览文件 @
201b4610
"""
This module contains the configuration flags for cudnn support.
Those are shared between the cuda and gpuarray backend which is why
they are in this file.
"""
import
os.path
from
theano.configparser
import
AddConfigVar
,
EnumStr
,
StrParam
from
theano
import
config
AddConfigVar
(
'dnn.conv.workmem'
,
"This flag is deprecated; use dnn.conv.algo_fwd."
,
EnumStr
(
''
),
in_c_key
=
False
)
AddConfigVar
(
'dnn.conv.workmem_bwd'
,
"This flag is deprecated; use dnn.conv.algo_bwd."
,
EnumStr
(
''
),
in_c_key
=
False
)
AddConfigVar
(
'dnn.conv.algo_fwd'
,
"Default implementation to use for CuDNN forward convolution."
,
EnumStr
(
'small'
,
'none'
,
'large'
,
'fft'
,
'guess_once'
,
'guess_on_shape_change'
,
'time_once'
,
'time_on_shape_change'
),
in_c_key
=
False
)
AddConfigVar
(
'dnn.conv.algo_bwd'
,
"Default implementation to use for CuDNN backward convolution."
,
EnumStr
(
'none'
,
'deterministic'
,
'fft'
,
'guess_once'
,
'guess_on_shape_change'
,
'time_once'
,
'time_on_shape_change'
),
in_c_key
=
False
)
AddConfigVar
(
'dnn.include_path'
,
"Location of the cudnn header (defaults to the cuda root)"
,
StrParam
(
lambda
:
os
.
path
.
join
(
config
.
cuda
.
root
,
'include'
)))
AddConfigVar
(
'dnn.library_path'
,
"Location of the cudnn header (defaults to the cuda root)"
,
StrParam
(
lambda
:
os
.
path
.
join
(
config
.
cuda
.
root
,
'lib64'
)))
theano/sandbox/gpuarray/dnn.py
浏览文件 @
1e500bd6
...
@@ -28,8 +28,6 @@ from .nnet import GpuSoftmax
...
@@ -28,8 +28,6 @@ from .nnet import GpuSoftmax
from
.opt
import
gpu_seqopt
,
register_opt
,
conv_groupopt
,
op_lifter
from
.opt
import
gpu_seqopt
,
register_opt
,
conv_groupopt
,
op_lifter
from
.opt_util
import
alpha_merge
,
output_merge
from
.opt_util
import
alpha_merge
,
output_merge
# We need to import this to define the flags.
from
theano.sandbox
import
dnn_flags
# noqa
def
dnn_available
():
def
dnn_available
():
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论