Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
d6903d00
提交
d6903d00
authored
9月 21, 2016
作者:
Arnaud Bergeron
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Some changes from review.
上级
ab154880
隐藏空白字符变更
内嵌
并排
正在显示
7 个修改的文件
包含
57 行增加
和
29 行删除
+57
-29
test_nanguardmode.py
theano/compile/tests/test_nanguardmode.py
+3
-5
configparser.py
theano/configparser.py
+3
-1
type.py
theano/gof/type.py
+15
-0
dnn.py
theano/gpuarray/dnn.py
+21
-10
rnn_support.py
theano/gpuarray/tests/rnn_support.py
+1
-1
test_dnn.py
theano/gpuarray/tests/test_dnn.py
+0
-3
type.py
theano/gpuarray/type.py
+14
-9
没有找到文件。
theano/compile/tests/test_nanguardmode.py
浏览文件 @
d6903d00
...
@@ -14,11 +14,9 @@ import theano.tensor as T
...
@@ -14,11 +14,9 @@ import theano.tensor as T
def
test_NanGuardMode
():
def
test_NanGuardMode
():
"""
# Tests if NanGuardMode is working by feeding in numpy.inf and numpy.nans
Tests if NanGuardMode is working by feeding in numpy.inf and numpy.nans
# intentionally. A working implementation should be able to capture all
intentionally. A working implementation should be able to capture all
# the abnormalties.
the abnormalties.
"""
x
=
T
.
matrix
()
x
=
T
.
matrix
()
w
=
theano
.
shared
(
numpy
.
random
.
randn
(
5
,
7
)
.
astype
(
theano
.
config
.
floatX
))
w
=
theano
.
shared
(
numpy
.
random
.
randn
(
5
,
7
)
.
astype
(
theano
.
config
.
floatX
))
y
=
T
.
dot
(
x
,
w
)
y
=
T
.
dot
(
x
,
w
)
...
...
theano/configparser.py
浏览文件 @
d6903d00
...
@@ -90,10 +90,11 @@ theano_cfg.read(config_files)
...
@@ -90,10 +90,11 @@ theano_cfg.read(config_files)
theano_raw_cfg
=
ConfigParser
.
RawConfigParser
()
theano_raw_cfg
=
ConfigParser
.
RawConfigParser
()
theano_raw_cfg
.
read
(
config_files
)
theano_raw_cfg
.
read
(
config_files
)
class
change_flags
(
object
):
class
change_flags
(
object
):
"""
"""
Use this as a decorator or context manager to change the value of
Use this as a decorator or context manager to change the value of
Theano config variable.
Theano config variable
s
.
Useful during tests.
Useful during tests.
"""
"""
...
@@ -112,6 +113,7 @@ class change_flags(object):
...
@@ -112,6 +113,7 @@ class change_flags(object):
def
res
(
*
args
,
**
kwargs
):
def
res
(
*
args
,
**
kwargs
):
with
self
:
with
self
:
return
f
(
*
args
,
**
kwargs
)
return
f
(
*
args
,
**
kwargs
)
return
res
def
__enter__
(
self
):
def
__enter__
(
self
):
self
.
old_vals
=
{}
self
.
old_vals
=
{}
...
...
theano/gof/type.py
浏览文件 @
d6903d00
...
@@ -670,6 +670,12 @@ class CDataType(Type):
...
@@ -670,6 +670,12 @@ class CDataType(Type):
return
data
return
data
def
_get_func
(
self
):
def
_get_func
(
self
):
"""
Return a function that makes a value from an integer.
The integer value is assumed to be a valid pointer for the
type and no check is done to ensure that.
"""
from
theano.scalar
import
get_scalar_type
from
theano.scalar
import
get_scalar_type
if
self
.
_fn
is
None
:
if
self
.
_fn
is
None
:
...
@@ -680,6 +686,15 @@ class CDataType(Type):
...
@@ -680,6 +686,15 @@ class CDataType(Type):
return
self
.
_fn
return
self
.
_fn
def
make_value
(
self
,
ptr
):
def
make_value
(
self
,
ptr
):
"""
Make a value of this type.
Parameters
----------
ptr : int
Integer representation of a valid pointer value
"""
return
self
.
_get_func
()(
ptr
)
return
self
.
_get_func
()(
ptr
)
def
c_declare
(
self
,
name
,
sub
,
check_input
=
True
):
def
c_declare
(
self
,
name
,
sub
,
check_input
=
True
):
...
...
theano/gpuarray/dnn.py
浏览文件 @
d6903d00
...
@@ -58,11 +58,12 @@ def _dnn_lib():
...
@@ -58,11 +58,12 @@ def _dnn_lib():
lib_name
=
ctypes
.
util
.
find_library
(
'cudnn'
)
lib_name
=
ctypes
.
util
.
find_library
(
'cudnn'
)
if
lib_name
is
None
and
sys
.
platform
==
'win32'
:
if
lib_name
is
None
and
sys
.
platform
==
'win32'
:
# Update these names when new versions of cudnn are supported.
# Update these names when new versions of cudnn are supported.
lib_name
=
ctypes
.
util
.
find_library
(
'cudnn64_5.dll'
)
for
name
in
[
'cudnn64_5.dll'
,
'cudnn64_4.dll'
]:
if
lib_name
is
None
:
lib_name
=
ctypes
.
util
.
find_library
(
name
)
lib_name
=
ctypes
.
util
.
find_library
(
'cudnn64_4.dll'
)
if
lib_name
:
break
if
lib_name
is
None
:
if
lib_name
is
None
:
raise
RuntimeError
(
'Could not find cudnn library'
)
raise
RuntimeError
(
'Could not find cudnn library
(maybe your are using a )
'
)
_dnn_lib
.
handle
=
ctypes
.
cdll
.
LoadLibrary
(
lib_name
)
_dnn_lib
.
handle
=
ctypes
.
cdll
.
LoadLibrary
(
lib_name
)
cudnn
=
_dnn_lib
.
handle
cudnn
=
_dnn_lib
.
handle
cudnn
.
cudnnCreate
.
argtypes
=
[
ctypes
.
POINTER
(
ctypes
.
c_void_p
)]
cudnn
.
cudnnCreate
.
argtypes
=
[
ctypes
.
POINTER
(
ctypes
.
c_void_p
)]
...
@@ -1978,8 +1979,10 @@ class _RNNSplitParams(DnnBase):
...
@@ -1978,8 +1979,10 @@ class _RNNSplitParams(DnnBase):
def
make_node
(
self
,
w
,
desc
,
layer
,
isize
,
typecode
):
def
make_node
(
self
,
w
,
desc
,
layer
,
isize
,
typecode
):
w
=
as_gpuarray_variable
(
w
,
infer_context_name
(
w
))
w
=
as_gpuarray_variable
(
w
,
infer_context_name
(
w
))
assert
w
.
ndim
==
1
layer
=
as_scalar
(
layer
)
.
astype
(
'int32'
)
layer
=
as_scalar
(
layer
)
.
astype
(
'int32'
)
isize
=
as_tensor_variable
(
isize
)
.
astype
(
'uint64'
)
isize
=
as_tensor_variable
(
isize
)
.
astype
(
'uint64'
)
assert
isize
.
ndim
==
2
typecode
=
as_scalar
(
typecode
)
.
astype
(
'int32'
)
typecode
=
as_scalar
(
typecode
)
.
astype
(
'int32'
)
_1d
=
GpuArrayType
(
w
.
type
.
dtype
,
[
False
],
_1d
=
GpuArrayType
(
w
.
type
.
dtype
,
[
False
],
context_name
=
w
.
type
.
context_name
)
context_name
=
w
.
type
.
context_name
)
...
@@ -2087,6 +2090,7 @@ class _RNNSplitParams(DnnBase):
...
@@ -2087,6 +2090,7 @@ class _RNNSplitParams(DnnBase):
nshp[0] = PyGpuArray_DIM(
%(w)
s, 0);
nshp[0] = PyGpuArray_DIM(
%(w)
s, 0);
nshp[1] = 1;
nshp[1] = 1;
"""
%
kw
"""
%
kw
def
get_params
(
id
,
m
,
b
):
def
get_params
(
id
,
m
,
b
):
kw2
=
kw
.
copy
()
kw2
=
kw
.
copy
()
kw2
[
'id'
]
=
id
kw2
[
'id'
]
=
id
...
@@ -2152,7 +2156,7 @@ class _RNNSplitParams(DnnBase):
...
@@ -2152,7 +2156,7 @@ class _RNNSplitParams(DnnBase):
%(m)
s->ga.strides[1] =
%(m)
s->ga.dimensions[0] * gpuarray_get_elsize(
%(m)
s->ga.typecode);
%(m)
s->ga.strides[1] =
%(m)
s->ga.dimensions[0] * gpuarray_get_elsize(
%(m)
s->ga.typecode);
"""
%
kw2
"""
%
kw2
for
i
in
range
(
len
(
outputs
)
//
2
):
for
i
in
range
(
len
(
outputs
)
//
2
):
code
+=
get_params
(
i
,
outputs
[
2
*
i
],
outputs
[(
2
*
i
)
+
1
])
code
+=
get_params
(
i
,
outputs
[
2
*
i
],
outputs
[(
2
*
i
)
+
1
])
code
+=
"""
code
+=
"""
...
@@ -2189,16 +2193,16 @@ class GpuDnnRNNOp(DnnBase):
...
@@ -2189,16 +2193,16 @@ class GpuDnnRNNOp(DnnBase):
elif
direction_mode
==
'unidirectional'
:
elif
direction_mode
==
'unidirectional'
:
self
.
num_dirs
=
1
self
.
num_dirs
=
1
else
:
else
:
raise
ValueError
(
'direction_mode
'
)
raise
ValueError
(
'direction_mode
is invalid (got
%
s)'
%
(
direction_mode
,)
)
def
dnn_context
(
self
,
node
):
def
dnn_context
(
self
,
node
):
return
node
.
outputs
[
1
]
.
type
.
context_name
return
node
.
outputs
[
1
]
.
type
.
context_name
def
make_node
(
self
,
desc
,
w
,
x
,
hx
,
cx
=
None
):
def
make_node
(
self
,
desc
,
w
,
x
,
hx
,
cx
=
None
):
if
cx
is
None
:
if
cx
is
None
:
context_name
=
infer_context_name
(
w
,
x
,
hx
,
cx
)
else
:
context_name
=
infer_context_name
(
w
,
x
,
hx
)
context_name
=
infer_context_name
(
w
,
x
,
hx
)
else
:
context_name
=
infer_context_name
(
w
,
x
,
hx
,
cx
)
w
=
as_gpuarray_variable
(
w
,
context_name
)
w
=
as_gpuarray_variable
(
w
,
context_name
)
x
=
as_gpuarray_variable
(
x
,
context_name
)
x
=
as_gpuarray_variable
(
x
,
context_name
)
...
@@ -2232,8 +2236,15 @@ class GpuDnnRNNOp(DnnBase):
...
@@ -2232,8 +2236,15 @@ class GpuDnnRNNOp(DnnBase):
reserve
,
y
,
hy
=
outputs
[:
3
]
reserve
,
y
,
hy
=
outputs
[:
3
]
_
,
dy
,
dhy
=
output_grads
[:
3
]
_
,
dy
,
dhy
=
output_grads
[:
3
]
dcy
=
output_grads
[
3
]
if
len
(
output_grads
)
==
4
else
None
dcy
=
output_grads
[
3
]
if
len
(
output_grads
)
==
4
else
None
# If both dy and dhy are disconnected, then this will error
# Since the op return two outputs which contain essentially
# out, but it is indeed an error.
# the same information, the user will most likely only use one
# of them. This leads to the situation that the other is
# considered "disconnected" by theano in the gradient.
# However we know that this isn't really the case so we fix it
# up here.
# If both dy and dhy are disconnected the fixup will fail, but
# that's ok as in that case we really are disconnected.
if
isinstance
(
dy
.
type
,
DisconnectedType
):
if
isinstance
(
dy
.
type
,
DisconnectedType
):
dy
=
as_gpuarray_variable
(
dhy
[
-
1
],
dy
=
as_gpuarray_variable
(
dhy
[
-
1
],
context_name
=
dhy
.
type
.
context_name
)
context_name
=
dhy
.
type
.
context_name
)
...
...
theano/gpuarray/tests/rnn_support.py
浏览文件 @
d6903d00
from
__future__
import
absolute_import
,
print_function
from
__future__
import
absolute_import
,
print_function
,
division
import
theano
import
theano
import
theano.tensor
as
T
import
theano.tensor
as
T
...
...
theano/gpuarray/tests/test_dnn.py
浏览文件 @
d6903d00
...
@@ -1448,11 +1448,8 @@ def test_dnn_rnn_gru():
...
@@ -1448,11 +1448,8 @@ def test_dnn_rnn_gru():
# test code
# test code
X
=
T
.
tensor3
(
'X'
)
X
=
T
.
tensor3
(
'X'
)
X
.
tag
.
test_value
=
numpy
.
zeros
((
timesteps
,
batch_size
,
input_dim
),
dtype
=
theano
.
config
.
floatX
)
Y
=
T
.
tensor3
(
'Y'
)
Y
=
T
.
tensor3
(
'Y'
)
Y
.
tag
.
test_value
=
numpy
.
zeros
((
timesteps
,
batch_size
,
hidden_dim
),
dtype
=
theano
.
config
.
floatX
)
h0
=
T
.
tensor3
(
'h0'
)
h0
=
T
.
tensor3
(
'h0'
)
h0
.
tag
.
test_value
=
numpy
.
zeros
((
depth
,
batch_size
,
hidden_dim
),
dtype
=
theano
.
config
.
floatX
)
rnnb
=
dnn
.
RNNBlock
(
theano
.
config
.
floatX
,
hidden_dim
,
depth
,
'gru'
)
rnnb
=
dnn
.
RNNBlock
(
theano
.
config
.
floatX
,
hidden_dim
,
depth
,
'gru'
)
psize
=
rnnb
.
get_param_size
([
batch_size
,
input_dim
])
psize
=
rnnb
.
get_param_size
([
batch_size
,
input_dim
])
...
...
theano/gpuarray/type.py
浏览文件 @
d6903d00
...
@@ -20,7 +20,6 @@ except ImportError:
...
@@ -20,7 +20,6 @@ except ImportError:
pygpu
=
None
pygpu
=
None
_context_reg
=
{}
_context_reg
=
{}
_props_map
=
{}
def
move_to_gpu
(
data
):
def
move_to_gpu
(
data
):
...
@@ -91,6 +90,20 @@ def get_context(name):
...
@@ -91,6 +90,20 @@ def get_context(name):
return
_context_reg
[
name
]
return
_context_reg
[
name
]
def
list_contexts
():
"""
Return an iterable of all the registered context names.
"""
return
_context_reg
.
keys
()
# Mappings of properties to contexts. Please never use this if you
# can avoid it.
# This is basically a way to store "global" variables that depend on
# the context.
_props_map
=
{}
def
_get_props
(
name
):
def
_get_props
(
name
):
ctx
=
get_context
(
name
)
ctx
=
get_context
(
name
)
return
_props_map
[
ctx
]
return
_props_map
[
ctx
]
...
@@ -104,14 +117,6 @@ def set_prop(name, k, v):
...
@@ -104,14 +117,6 @@ def set_prop(name, k, v):
_get_props
(
name
)[
k
]
=
v
_get_props
(
name
)[
k
]
=
v
def
list_contexts
():
"""
Return an iterable of all the registered context names.
"""
return
_context_reg
.
keys
()
# Private method
# Private method
def
_name_for_ctx
(
ctx
):
def
_name_for_ctx
(
ctx
):
for
k
,
v
in
iteritems
(
_context_reg
):
for
k
,
v
in
iteritems
(
_context_reg
):
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论