Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
63306840
提交
63306840
authored
2月 21, 2017
作者:
Reyhane Askari
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
new centralized caching mechanisme
上级
cc9b423a
隐藏空白字符变更
内嵌
并排
正在显示
9 个修改的文件
包含
109 行增加
和
47 行删除
+109
-47
test_utils.py
theano/gof/tests/test_utils.py
+15
-0
test_vm.py
theano/gof/tests/test_vm.py
+0
-3
utils.py
theano/gof/utils.py
+68
-0
basic_ops.py
theano/gpuarray/basic_ops.py
+4
-4
dnn.py
theano/gpuarray/dnn.py
+10
-9
opt.py
theano/tensor/opt.py
+9
-1
test_utils.py
theano/tensor/tests/test_utils.py
+2
-2
utils.py
theano/tensor/utils.py
+0
-26
var.py
theano/tensor/var.py
+1
-2
没有找到文件。
theano/gof/tests/test_utils.py
浏览文件 @
63306840
from
__future__
import
absolute_import
,
print_function
,
division
from
__future__
import
absolute_import
,
print_function
,
division
import
theano
import
theano
from
theano.gof.utils
import
CacheClass
from
theano.gof.utils
import
(
from
theano.gof.utils
import
(
give_variables_names
,
remove
,
unique
)
give_variables_names
,
remove
,
unique
)
...
@@ -61,3 +62,17 @@ def test_stack_trace():
...
@@ -61,3 +62,17 @@ def test_stack_trace():
assert
len
(
v
.
tag
.
trace
[
0
])
==
2
assert
len
(
v
.
tag
.
trace
[
0
])
==
2
finally
:
finally
:
theano
.
config
.
traceback
.
limit
=
orig
theano
.
config
.
traceback
.
limit
=
orig
class
CachingClassExample
(
CacheClass
,
theano
.
Op
):
__props__
=
(
'value'
,)
__cache_instance__
=
True
def
__init__
(
self
,
value
):
self
.
value
=
value
def
test_caching
():
obj_1
=
CachingClassExample
(
3
)
obj_2
=
CachingClassExample
(
3
)
assert
obj_1
is
obj_2
theano/gof/tests/test_vm.py
浏览文件 @
63306840
...
@@ -386,9 +386,6 @@ if run_memory_usage_tests:
...
@@ -386,9 +386,6 @@ if run_memory_usage_tests:
class
RunOnce
(
theano
.
Op
):
class
RunOnce
(
theano
.
Op
):
__props__
=
(
"nb_run"
,)
def
__init__
(
self
):
def
__init__
(
self
):
self
.
nb_run
=
0
self
.
nb_run
=
0
...
...
theano/gof/utils.py
浏览文件 @
63306840
...
@@ -9,6 +9,7 @@ from six.moves import StringIO
...
@@ -9,6 +9,7 @@ from six.moves import StringIO
from
theano
import
config
from
theano
import
config
from
theano.compat
import
PY3
from
theano.compat
import
PY3
from
theano.misc.frozendict
import
frozendict
def
simple_extract_stack
(
f
=
None
,
limit
=
None
,
skips
=
[]):
def
simple_extract_stack
(
f
=
None
,
limit
=
None
,
skips
=
[]):
...
@@ -211,6 +212,49 @@ class MetaObject(type):
...
@@ -211,6 +212,49 @@ class MetaObject(type):
return
type
.
__new__
(
cls
,
name
,
bases
,
dct
)
return
type
.
__new__
(
cls
,
name
,
bases
,
dct
)
class
MetaObject_caching
(
MetaObject
):
_cache
=
{}
def
__call__
(
cls
,
*
args
,
**
kwargs
):
key
=
[
cls
]
for
arg
in
args
:
arg
=
make_hashable
(
arg
)
key
.
append
(
arg
)
for
k
,
v
in
sorted
(
kwargs
.
items
()):
key
.
append
(
k
)
v
=
make_hashable
(
v
)
key
.
append
(
v
)
key
=
tuple
(
key
)
if
key
not
in
cls
.
_cache
:
cls
.
_cache
[
key
]
=
super
(
MetaObject_caching
,
cls
)
.
__call__
(
*
args
,
**
kwargs
)
return
cls
.
_cache
[
key
]
class
CacheClass
(
with_metaclass
(
MetaObject_caching
,
object
)):
pass
def
make_hashable
(
argument
):
try
:
hash
(
argument
)
return
argument
except
:
if
isinstance
(
argument
,
(
list
,
tuple
)):
argument
=
list
(
argument
)
for
index
,
instance
in
enumerate
(
argument
):
argument
[
index
]
=
make_hashable
(
instance
)
argument
=
tuple
(
argument
)
elif
isinstance
(
argument
,
dict
):
for
k
,
v
in
argument
.
items
():
argument
[
k
]
=
make_hashable
(
v
)
argument
=
frozendict
(
argument
)
elif
isinstance
(
argument
,
slice
):
argument
=
argument
.
__reduce__
()
elif
isinstance
(
argument
,
np
.
ndarray
):
argument
=
hash_from_ndarray
(
argument
)
return
argument
class
object2
(
with_metaclass
(
MetaObject
,
object
)):
class
object2
(
with_metaclass
(
MetaObject
,
object
)):
__slots__
=
[]
__slots__
=
[]
...
@@ -565,6 +609,30 @@ else:
...
@@ -565,6 +609,30 @@ else:
return
hashlib
.
md5
(
np
.
getbuffer
(
msg
))
.
hexdigest
()
return
hashlib
.
md5
(
np
.
getbuffer
(
msg
))
.
hexdigest
()
def
hash_from_ndarray
(
data
):
"""
Return a hash from an ndarray.
It takes care of the data, shapes, strides and dtype.
"""
# We need to hash the shapes and strides as hash_from_code only hashes
# the data buffer. Otherwise, this will cause problem with shapes like:
# (1, 0) and (2, 0) and problem with inplace transpose.
# We also need to add the dtype to make the distinction between
# uint32 and int32 of zeros with the same shape and strides.
# python hash are not strong, so I always use md5 in order not to have a
# too long hash, I call it again on the concatenation of all parts.
if
not
data
.
flags
[
"C_CONTIGUOUS"
]:
# hash_from_code needs a C-contiguous array.
data
=
np
.
ascontiguousarray
(
data
)
return
hash_from_code
(
hash_from_code
(
data
)
+
hash_from_code
(
str
(
data
.
shape
))
+
hash_from_code
(
str
(
data
.
strides
))
+
hash_from_code
(
str
(
data
.
dtype
)))
def
hash_from_file
(
file_path
):
def
hash_from_file
(
file_path
):
"""
"""
Return the MD5 hash of a file.
Return the MD5 hash of a file.
...
...
theano/gpuarray/basic_ops.py
浏览文件 @
63306840
...
@@ -12,7 +12,7 @@ from theano.tensor.basic import (
...
@@ -12,7 +12,7 @@ from theano.tensor.basic import (
Alloc
,
AllocEmpty
,
alloc_validate_shape
,
Join
,
Split
)
Alloc
,
AllocEmpty
,
alloc_validate_shape
,
Join
,
Split
)
from
theano.gof
import
HideC
,
COp
from
theano.gof
import
HideC
,
COp
from
theano.gof.utils
import
MethodNotDefined
from
theano.gof.utils
import
MethodNotDefined
,
CacheClass
from
collections
import
deque
from
collections
import
deque
...
@@ -629,7 +629,7 @@ class HostFromGpu(Op):
...
@@ -629,7 +629,7 @@ class HostFromGpu(Op):
host_from_gpu
=
HostFromGpu
()
host_from_gpu
=
HostFromGpu
()
class
GpuFromHost
(
Op
):
class
GpuFromHost
(
CacheClass
,
Op
):
"""
"""
Transfer data to GPU.
Transfer data to GPU.
...
@@ -783,7 +783,7 @@ class GpuToGpu(Op):
...
@@ -783,7 +783,7 @@ class GpuToGpu(Op):
return
(
1
,)
return
(
1
,)
class
GpuAlloc
(
HideC
,
Alloc
):
class
GpuAlloc
(
CacheClass
,
HideC
,
Alloc
):
"""
"""
Allocate initialized memory on the GPU.
Allocate initialized memory on the GPU.
...
@@ -945,7 +945,7 @@ class GpuAlloc(HideC, Alloc):
...
@@ -945,7 +945,7 @@ class GpuAlloc(HideC, Alloc):
return
True
return
True
class
GpuAllocEmpty
(
HideC
,
AllocEmpty
):
class
GpuAllocEmpty
(
CacheClass
,
HideC
,
AllocEmpty
):
"""
"""
Allocate uninitialized memory on the GPU.
Allocate uninitialized memory on the GPU.
...
...
theano/gpuarray/dnn.py
浏览文件 @
63306840
...
@@ -13,6 +13,7 @@ from theano.scalar import as_scalar, constant, Log, get_scalar_type
...
@@ -13,6 +13,7 @@ from theano.scalar import as_scalar, constant, Log, get_scalar_type
from
theano.tensor
import
as_tensor_variable
from
theano.tensor
import
as_tensor_variable
from
theano.gradient
import
DisconnectedType
,
grad_not_implemented
from
theano.gradient
import
DisconnectedType
,
grad_not_implemented
from
theano.gof
import
Optimizer
,
local_optimizer
,
COp
from
theano.gof
import
Optimizer
,
local_optimizer
,
COp
from
theano.gof.utils
import
CacheClass
from
theano.gof.cmodule
import
GCC_compiler
from
theano.gof.cmodule
import
GCC_compiler
from
theano.gof.type
import
CDataType
,
Generic
from
theano.gof.type
import
CDataType
,
Generic
from
theano.compile
import
optdb
from
theano.compile
import
optdb
...
@@ -348,7 +349,7 @@ def version(raises=True):
...
@@ -348,7 +349,7 @@ def version(raises=True):
version
.
v
=
None
version
.
v
=
None
class
GpuDnnConvDesc
(
COp
):
class
GpuDnnConvDesc
(
C
acheClass
,
C
Op
):
"""
"""
This Op builds a convolution descriptor for use in the other convolution
This Op builds a convolution descriptor for use in the other convolution
...
@@ -639,7 +640,7 @@ class GpuDnnConv(DnnBase):
...
@@ -639,7 +640,7 @@ class GpuDnnConv(DnnBase):
return
[
shape
[
2
]]
return
[
shape
[
2
]]
class
GpuDnnConvGradW
(
DnnBase
):
class
GpuDnnConvGradW
(
CacheClass
,
DnnBase
):
"""
"""
The convolution gradient with respect to the weights.
The convolution gradient with respect to the weights.
...
@@ -770,7 +771,7 @@ class GpuDnnConvGradW(DnnBase):
...
@@ -770,7 +771,7 @@ class GpuDnnConvGradW(DnnBase):
return
[
shape
[
2
]]
return
[
shape
[
2
]]
class
GpuDnnConvGradI
(
DnnBase
):
class
GpuDnnConvGradI
(
CacheClass
,
DnnBase
):
"""
"""
The convolution gradient with respect to the inputs.
The convolution gradient with respect to the inputs.
...
@@ -1137,9 +1138,8 @@ def dnn_gradweight(img, topgrad, kerns_shp, border_mode='valid',
...
@@ -1137,9 +1138,8 @@ def dnn_gradweight(img, topgrad, kerns_shp, border_mode='valid',
precision
=
get_precision
(
precision
,
[
img
,
topgrad
])
precision
=
get_precision
(
precision
,
[
img
,
topgrad
])
desc
=
GpuDnnConvDesc
(
border_mode
=
border_mode
,
subsample
=
subsample
,
desc
=
GpuDnnConvDesc
(
border_mode
=
border_mode
,
subsample
=
subsample
,
conv_mode
=
conv_mode
,
precision
=
precision
)(
conv_mode
=
conv_mode
,
precision
=
precision
)(
kerns_shp
)
kerns_shp
)
out
=
GpuAllocEmpty
(
dtype
=
img
.
dtype
,
context_name
=
ctx_name
)(
*
kerns_shp
)
out
=
GpuAllocEmpty
(
ctx_name
,
dtype
=
img
.
dtype
)(
*
kerns_shp
)
return
GpuDnnConvGradW
()(
img
,
topgrad
,
out
,
desc
)
return
GpuDnnConvGradW
()(
img
,
topgrad
,
out
,
desc
)
...
@@ -1151,6 +1151,7 @@ def dnn_gradweight3d(img, topgrad, kerns_shp, border_mode='valid',
...
@@ -1151,6 +1151,7 @@ def dnn_gradweight3d(img, topgrad, kerns_shp, border_mode='valid',
return
dnn_gradweight
(
img
,
topgrad
,
kerns_shp
,
border_mode
,
return
dnn_gradweight
(
img
,
topgrad
,
kerns_shp
,
border_mode
,
subsample
,
conv_mode
,
precision
)
subsample
,
conv_mode
,
precision
)
def
dnn_gradinput
(
kerns
,
topgrad
,
img_shp
,
border_mode
=
'valid'
,
def
dnn_gradinput
(
kerns
,
topgrad
,
img_shp
,
border_mode
=
'valid'
,
subsample
=
(
1
,
1
),
conv_mode
=
'conv'
,
precision
=
None
):
subsample
=
(
1
,
1
),
conv_mode
=
'conv'
,
precision
=
None
):
"""
"""
...
@@ -1165,9 +1166,8 @@ def dnn_gradinput(kerns, topgrad, img_shp, border_mode='valid',
...
@@ -1165,9 +1166,8 @@ def dnn_gradinput(kerns, topgrad, img_shp, border_mode='valid',
precision
=
get_precision
(
precision
,
[
kerns
,
topgrad
])
precision
=
get_precision
(
precision
,
[
kerns
,
topgrad
])
desc
=
GpuDnnConvDesc
(
border_mode
=
border_mode
,
subsample
=
subsample
,
desc
=
GpuDnnConvDesc
(
border_mode
=
border_mode
,
subsample
=
subsample
,
conv_mode
=
conv_mode
,
precision
=
precision
)(
conv_mode
=
conv_mode
,
precision
=
precision
)(
kerns
.
shape
)
kerns
.
shape
)
out
=
GpuAllocEmpty
(
dtype
=
kerns
.
dtype
,
context_name
=
ctx_name
)(
*
img_shp
)
out
=
GpuAllocEmpty
(
ctx_name
,
kerns
.
dtype
)(
*
img_shp
)
return
GpuDnnConvGradI
()(
kerns
,
topgrad
,
out
,
desc
)
return
GpuDnnConvGradI
()(
kerns
,
topgrad
,
out
,
desc
)
...
@@ -1179,6 +1179,7 @@ def dnn_gradinput3d(kerns, topgrad, img_shp, border_mode='valid',
...
@@ -1179,6 +1179,7 @@ def dnn_gradinput3d(kerns, topgrad, img_shp, border_mode='valid',
return
dnn_gradinput
(
kerns
,
topgrad
,
img_shp
,
border_mode
,
subsample
,
return
dnn_gradinput
(
kerns
,
topgrad
,
img_shp
,
border_mode
,
subsample
,
conv_mode
,
precision
)
conv_mode
,
precision
)
class
GpuDnnPoolDesc
(
Op
):
class
GpuDnnPoolDesc
(
Op
):
"""
"""
...
...
theano/tensor/opt.py
浏览文件 @
63306840
...
@@ -346,6 +346,7 @@ class InplaceElemwiseOptimizer(Optimizer):
...
@@ -346,6 +346,7 @@ class InplaceElemwiseOptimizer(Optimizer):
inplace_pattern
=
dict
(
baseline
)
inplace_pattern
=
dict
(
baseline
)
inplace_pattern
[
candidate_output
]
=
candidate_input
inplace_pattern
[
candidate_output
]
=
candidate_input
inplace_pattern
=
theano
.
misc
.
frozendict
.
frozendict
(
inplace_pattern
)
try
:
try
:
if
hasattr
(
op
.
scalar_op
,
"make_new_inplace"
):
if
hasattr
(
op
.
scalar_op
,
"make_new_inplace"
):
new_scal
=
op
.
scalar_op
.
make_new_inplace
(
new_scal
=
op
.
scalar_op
.
make_new_inplace
(
...
@@ -7270,7 +7271,14 @@ your code will run correctly, but may be slower.""")
...
@@ -7270,7 +7271,14 @@ your code will run correctly, but may be slower.""")
# Do not call make_node to have test_value
# Do not call make_node to have test_value
n
=
maker
(
node
,
C
)(
*
inputs
)
.
owner
n
=
maker
(
node
,
C
)(
*
inputs
)
.
owner
assert
len
(
n
.
outputs
)
==
1
assert
len
(
n
.
outputs
)
==
1
assert
node
.
outputs
[
0
]
.
dtype
==
n
.
outputs
[
0
]
.
dtype
assert
node
.
outputs
[
0
]
.
dtype
==
n
.
outputs
[
0
]
.
dtype
,
(
"node.outputs[0].dtype:
%
r"
%
node
.
outputs
[
0
]
.
dtype
,
"n.outputs[0].dtype:
%
r"
%
n
.
outputs
[
0
]
.
dtype
,
"n.op:
%
r"
%
n
.
op
,
"node.op:
%
r"
%
node
.
op
,
"n.outputs
%
r"
%
n
.
outputs
,
"n:
%
r"
%
n
,
"node.outputs
%
r"
%
node
.
outputs
,
"node
%
r"
%
node
,
"inputs:
%
r"
%
n
.
inputs
,
"s_inputs:
%
r"
%
s_inputs
,
"s_new_out:
%
r"
%
s_new_out
,
"OP:
%
r"
%
OP
)
if
len
(
n
.
inputs
)
>
max_nb_input
:
if
len
(
n
.
inputs
)
>
max_nb_input
:
_logger
.
info
(
'loop fusion failed because Op would exceed'
_logger
.
info
(
'loop fusion failed because Op would exceed'
...
...
theano/tensor/tests/test_utils.py
浏览文件 @
63306840
...
@@ -4,8 +4,8 @@ import unittest
...
@@ -4,8 +4,8 @@ import unittest
import
numpy
import
numpy
import
theano
import
theano
from
theano.tensor.utils
import
(
hash_from_ndarray
,
shape_of_variables
)
from
theano.tensor.utils
import
shape_of_variables
from
theano.gof.utils
import
hash_from_ndarray
def
test_hash_from_ndarray
():
def
test_hash_from_ndarray
():
hashs
=
[]
hashs
=
[]
...
...
theano/tensor/utils.py
浏览文件 @
63306840
from
__future__
import
absolute_import
,
print_function
,
division
from
__future__
import
absolute_import
,
print_function
,
division
import
numpy
import
theano
import
theano
from
theano.compat
import
izip
from
theano.compat
import
izip
from
theano.gof.utils
import
hash_from_code
def
hash_from_ndarray
(
data
):
"""
Return a hash from an ndarray.
It takes care of the data, shapes, strides and dtype.
"""
# We need to hash the shapes and strides as hash_from_code only hashes
# the data buffer. Otherwise, this will cause problem with shapes like:
# (1, 0) and (2, 0) and problem with inplace transpose.
# We also need to add the dtype to make the distinction between
# uint32 and int32 of zeros with the same shape and strides.
# python hash are not strong, so I always use md5 in order not to have a
# too long hash, I call it again on the concatenation of all parts.
if
not
data
.
flags
[
"C_CONTIGUOUS"
]:
# hash_from_code needs a C-contiguous array.
data
=
numpy
.
ascontiguousarray
(
data
)
return
hash_from_code
(
hash_from_code
(
data
)
+
hash_from_code
(
str
(
data
.
shape
))
+
hash_from_code
(
str
(
data
.
strides
))
+
hash_from_code
(
str
(
data
.
dtype
)))
def
shape_of_variables
(
fgraph
,
input_shapes
):
def
shape_of_variables
(
fgraph
,
input_shapes
):
...
...
theano/tensor/var.py
浏览文件 @
63306840
...
@@ -12,8 +12,7 @@ import theano
...
@@ -12,8 +12,7 @@ import theano
from
theano.compat
import
PY3
from
theano.compat
import
PY3
from
theano.scalar
import
ComplexError
,
IntegerDivisionError
from
theano.scalar
import
ComplexError
,
IntegerDivisionError
from
theano.gof
import
Constant
,
Variable
from
theano.gof
import
Constant
,
Variable
from
theano.gof.utils
import
hashtype
from
theano.gof.utils
import
hashtype
,
hash_from_ndarray
from
theano.tensor.utils
import
hash_from_ndarray
from
theano.tensor.type
import
TensorType
from
theano.tensor.type
import
TensorType
from
theano.configparser
import
config
from
theano.configparser
import
config
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论