Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
f000e752
提交
f000e752
authored
12月 14, 2015
作者:
abergeron
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #3682 from nouiz/abs_conv
Better error message and docstring
上级
6ec28cf2
f5143371
隐藏空白字符变更
内嵌
并排
正在显示
6 个修改的文件
包含
52 行增加
和
21 行删除
+52
-21
basic.txt
doc/library/tensor/basic.txt
+1
-1
check_blas.py
theano/misc/check_blas.py
+1
-0
basic_ops.py
theano/sandbox/gpuarray/basic_ops.py
+1
-0
abstract_conv.py
theano/tensor/nnet/abstract_conv.py
+42
-17
conv.py
theano/tensor/nnet/conv.py
+3
-2
var.py
theano/tensor/var.py
+4
-1
没有找到文件。
doc/library/tensor/basic.txt
浏览文件 @
f000e752
...
@@ -510,7 +510,7 @@ TensorVariable
...
@@ -510,7 +510,7 @@ TensorVariable
.. method:: diagonal(offset=0, axis1=0, axis2=1)
.. method:: diagonal(offset=0, axis1=0, axis2=1)
.. method:: astype(dtype)
.. method:: astype(dtype)
.. method:: take(indices, axis=None, mode='raise')
.. method:: take(indices, axis=None, mode='raise')
.. method:: copy()
.. method:: copy()
Return a new symbolic variable that is a copy of the variable. Does not copy the tag.
.. method:: norm(L, axis=None)
.. method:: norm(L, axis=None)
.. method:: nonzero(self, return_matrix=False)
.. method:: nonzero(self, return_matrix=False)
.. method:: nonzero_values(self)
.. method:: nonzero_values(self)
...
...
theano/misc/check_blas.py
浏览文件 @
f000e752
...
@@ -262,6 +262,7 @@ if __name__ == "__main__":
...
@@ -262,6 +262,7 @@ if __name__ == "__main__":
GTX Titan Black 0.64s 0.64s
GTX Titan Black 0.64s 0.64s
GTX Titan(D15U-50)
GTX Titan(D15U-50)
GTX 780
GTX 780
GTX 980 Ti 0.41s
GTX 980
GTX 980
GTX 970
GTX 970
GTX 680 1.57s
GTX 680 1.57s
...
...
theano/sandbox/gpuarray/basic_ops.py
浏览文件 @
f000e752
...
@@ -422,6 +422,7 @@ class GpuFromHost(Op):
...
@@ -422,6 +422,7 @@ class GpuFromHost(Op):
}
}
} else {
} else {
Py_XDECREF(
%(out)
s);
Py_XDECREF(
%(out)
s);
// This method will release the GIL when needed.
%(out)
s = pygpu_fromhostdata(PyArray_DATA(
%(name)
s_tmp),
%(out)
s = pygpu_fromhostdata(PyArray_DATA(
%(name)
s_tmp),
get_typecode((PyObject *)PyArray_DESCR(
%(name)
s_tmp)),
get_typecode((PyObject *)PyArray_DESCR(
%(name)
s_tmp)),
PyArray_NDIM(
%(name)
s_tmp),
PyArray_NDIM(
%(name)
s_tmp),
...
...
theano/tensor/nnet/abstract_conv.py
浏览文件 @
f000e752
...
@@ -103,10 +103,10 @@ def conv2d(input,
...
@@ -103,10 +103,10 @@ def conv2d(input,
border_mode
=
'valid'
,
border_mode
=
'valid'
,
subsample
=
(
1
,
1
),
subsample
=
(
1
,
1
),
filter_flip
=
True
):
filter_flip
=
True
):
"""
"""
This function will build the symbolic graph for convolving a
This function will build the symbolic graph for convolving a mini-batch of a
mini-batch of a stack of 2D inputs with a set of 2D filters. The
stack of 2D inputs with a set of 2D filters. The implementation is modelled
implementation is modelled after Convolutional Neural Networks
after Convolutional Neural Networks
(CNN).
(CNN).
:type input: symbolic 4D tensor
:type input: symbolic 4D tensor
:param input: mini-batch of feature map stacks, of shape
:param input: mini-batch of feature map stacks, of shape
...
@@ -153,11 +153,20 @@ def conv2d(input,
...
@@ -153,11 +153,20 @@ def conv2d(input,
:param filter_flip: If ``True``, will flip the filter rows and columns
:param filter_flip: If ``True``, will flip the filter rows and columns
before sliding them over the input. This operation is normally referred
before sliding them over the input. This operation is normally referred
to as a convolution, and this is the default. If ``False``, the filters
to as a convolution, and this is the default. If ``False``, the filters
are not flipped and the operation is referred to as a cross-correlation.
are not flipped and the operation is referred to as a
cross-correlation.
:rtype: symbolic 4D tensor
:rtype: symbolic 4D tensor
:return: set of feature maps generated by convolutional layer. Tensor is
:return: set of feature maps generated by convolutional layer. Tensor is
of shape (batch size, output channels, output rows, output columns)
of shape (batch size, output channels, output rows, output columns)
:note: If CuDNN is available, it will be used on the
GPU. Otherwise, it is the *CorrMM* convolution that will be used
"caffe style convolution".
:note: This is only supported in Theano 0.8 or the development
version until it is released.
"""
"""
conv_op
=
AbstractConv2d
(
imshp
=
input_shape
,
conv_op
=
AbstractConv2d
(
imshp
=
input_shape
,
...
@@ -169,9 +178,10 @@ def conv2d(input,
...
@@ -169,9 +178,10 @@ def conv2d(input,
class
BaseAbstractConv2d
(
Op
):
class
BaseAbstractConv2d
(
Op
):
"""
"""Base class for AbstractConv
Base class for AbstractConv
Define an abstract convolution op that will be replaced with the appropriate implementation
Define an abstract convolution op that will be replaced with the
appropriate implementation
:type imshp: None, tuple/list of len 4 of int or Constant variable
:type imshp: None, tuple/list of len 4 of int or Constant variable
:param imshp: The shape of the input parameter.
:param imshp: The shape of the input parameter.
...
@@ -211,7 +221,9 @@ class BaseAbstractConv2d(Op):
...
@@ -211,7 +221,9 @@ class BaseAbstractConv2d(Op):
:param filter_flip: If ``True``, will flip the filter rows and columns
:param filter_flip: If ``True``, will flip the filter rows and columns
before sliding them over the input. This operation is normally referred
before sliding them over the input. This operation is normally referred
to as a convolution, and this is the default. If ``False``, the filters
to as a convolution, and this is the default. If ``False``, the filters
are not flipped and the operation is referred to as a cross-correlation.
are not flipped and the operation is referred to as a
cross-correlation.
"""
"""
check_broadcast
=
False
check_broadcast
=
False
__props__
=
(
'border_mode'
,
'subsample'
,
'filter_flip'
,
'imshp'
,
'kshp'
)
__props__
=
(
'border_mode'
,
'subsample'
,
'filter_flip'
,
'imshp'
,
'kshp'
)
...
@@ -270,7 +282,8 @@ class AbstractConv2d(BaseAbstractConv2d):
...
@@ -270,7 +282,8 @@ class AbstractConv2d(BaseAbstractConv2d):
subsample
=
(
1
,
1
),
subsample
=
(
1
,
1
),
filter_flip
=
True
):
filter_flip
=
True
):
super
(
AbstractConv2d
,
self
)
.
__init__
(
imshp
,
kshp
,
super
(
AbstractConv2d
,
self
)
.
__init__
(
imshp
,
kshp
,
border_mode
,
subsample
,
filter_flip
)
border_mode
,
subsample
,
filter_flip
)
def
make_node
(
self
,
img
,
kern
):
def
make_node
(
self
,
img
,
kern
):
if
img
.
type
.
ndim
!=
4
:
if
img
.
type
.
ndim
!=
4
:
...
@@ -319,7 +332,9 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
...
@@ -319,7 +332,9 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
subsample
=
(
1
,
1
),
subsample
=
(
1
,
1
),
filter_flip
=
True
):
filter_flip
=
True
):
super
(
AbstractConv2d_gradWeights
,
self
)
.
__init__
(
imshp
,
kshp
,
super
(
AbstractConv2d_gradWeights
,
self
)
.
__init__
(
imshp
,
kshp
,
border_mode
,
subsample
,
filter_flip
)
border_mode
,
subsample
,
filter_flip
)
# Update shape/height_width
# Update shape/height_width
def
make_node
(
self
,
img
,
topgrad
,
shape
):
def
make_node
(
self
,
img
,
topgrad
,
shape
):
...
@@ -336,7 +351,8 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
...
@@ -336,7 +351,8 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
return
Apply
(
self
,
[
img
,
topgrad
,
shape
],
[
output
])
return
Apply
(
self
,
[
img
,
topgrad
,
shape
],
[
output
])
def
perform
(
self
,
node
,
inp
,
out_
):
def
perform
(
self
,
node
,
inp
,
out_
):
raise
NotImplementedError
(
'AbstractConv2d_gradWeight theano optimization failed'
)
raise
NotImplementedError
(
'AbstractConv2d_gradWeight theano optimization failed'
)
def
grad
(
self
,
inp
,
grads
):
def
grad
(
self
,
inp
,
grads
):
bottom
,
top
=
inp
[:
2
]
bottom
,
top
=
inp
[:
2
]
...
@@ -344,7 +360,10 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
...
@@ -344,7 +360,10 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
d_bottom
=
AbstractConv2d_gradInputs
(
self
.
imshp
,
self
.
kshp
,
d_bottom
=
AbstractConv2d_gradInputs
(
self
.
imshp
,
self
.
kshp
,
self
.
border_mode
,
self
.
border_mode
,
self
.
subsample
,
self
.
subsample
,
self
.
filter_flip
)(
weights
,
top
,
bottom
.
shape
[
-
2
:])
self
.
filter_flip
)(
weights
,
top
,
bottom
.
shape
[
-
2
:])
d_top
=
AbstractConv2d
(
self
.
imshp
,
d_top
=
AbstractConv2d
(
self
.
imshp
,
self
.
kshp
,
self
.
kshp
,
self
.
border_mode
,
self
.
border_mode
,
...
@@ -373,7 +392,9 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d):
...
@@ -373,7 +392,9 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d):
subsample
=
(
1
,
1
),
subsample
=
(
1
,
1
),
filter_flip
=
True
):
filter_flip
=
True
):
super
(
AbstractConv2d_gradInputs
,
self
)
.
__init__
(
imshp
,
kshp
,
super
(
AbstractConv2d_gradInputs
,
self
)
.
__init__
(
imshp
,
kshp
,
border_mode
,
subsample
,
filter_flip
)
border_mode
,
subsample
,
filter_flip
)
# Update shape/height_width
# Update shape/height_width
def
make_node
(
self
,
kern
,
topgrad
,
shape
):
def
make_node
(
self
,
kern
,
topgrad
,
shape
):
...
@@ -390,16 +411,20 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d):
...
@@ -390,16 +411,20 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d):
return
Apply
(
self
,
[
kern
,
topgrad
,
shape
],
[
output
])
return
Apply
(
self
,
[
kern
,
topgrad
,
shape
],
[
output
])
def
perform
(
self
,
node
,
inp
,
out_
):
def
perform
(
self
,
node
,
inp
,
out_
):
raise
NotImplementedError
(
'AbstractConv2d_gradWeight theano optimization failed'
)
raise
NotImplementedError
(
'AbstractConv2d_gradWeight theano optimization failed'
)
def
grad
(
self
,
inp
,
grads
):
def
grad
(
self
,
inp
,
grads
):
weights
,
top
=
inp
[:
2
]
weights
,
top
=
inp
[:
2
]
bottom
,
=
grads
bottom
,
=
grads
d_weights
=
AbstractConv2d_gradWeights
(
self
.
imshp
,
self
.
kshp
,
d_weights
=
AbstractConv2d_gradWeights
(
self
.
imshp
,
self
.
kshp
,
self
.
border_mode
,
self
.
border_mode
,
self
.
subsample
)(
bottom
,
top
,
weights
.
shape
[
-
2
:])
self
.
subsample
)(
bottom
,
top
,
weights
.
shape
[
-
2
:])
d_top
=
AbstractConv2d
(
self
.
imshp
,
self
.
kshp
,
d_top
=
AbstractConv2d
(
self
.
imshp
,
self
.
kshp
,
self
.
border_mode
,
self
.
subsample
)(
bottom
,
weights
)
self
.
border_mode
,
self
.
subsample
)(
bottom
,
weights
)
d_height_width
=
(
theano
.
gradient
.
DisconnectedType
()(),)
d_height_width
=
(
theano
.
gradient
.
DisconnectedType
()(),)
return
(
d_weights
,
d_top
)
+
d_height_width
return
(
d_weights
,
d_top
)
+
d_height_width
...
...
theano/tensor/nnet/conv.py
浏览文件 @
f000e752
...
@@ -881,8 +881,9 @@ class ConvOp(OpenMPOp):
...
@@ -881,8 +881,9 @@ class ConvOp(OpenMPOp):
if
self
.
dx
not
in
(
1
,
2
)
or
self
.
dy
not
in
(
1
,
2
):
if
self
.
dx
not
in
(
1
,
2
)
or
self
.
dy
not
in
(
1
,
2
):
raise
NotImplementedError
(
raise
NotImplementedError
(
"ERROR: We disable ConvOp.grad now when dx or "
"ERROR: We disable ConvOp.grad now when output_mode is not"
"dy are different from 1 and 2, as there is a bug in it."
)
" 'valid' and dx or dy are greater than 2, as there is a bug"
" in it. See `abstract_conv2d <>`_ for a version that support this."
)
all_shape
=
self
.
has_all_shape
(
self
.
imshp
,
self
.
kshp
,
all_shape
=
self
.
has_all_shape
(
self
.
imshp
,
self
.
kshp
,
self
.
nkern
,
self
.
bsize
)
self
.
nkern
,
self
.
bsize
)
...
...
theano/tensor/var.py
浏览文件 @
f000e752
...
@@ -535,7 +535,10 @@ class _tensor_py_operators(object):
...
@@ -535,7 +535,10 @@ class _tensor_py_operators(object):
# COPYING
# COPYING
def
copy
(
self
,
name
=
None
):
def
copy
(
self
,
name
=
None
):
"""Copy a variable and optionally assign a name."""
"""Return a symbolic copy and optionally assign a name.
Does not copy the tags.
"""
copied_variable
=
theano
.
tensor
.
basic
.
tensor_copy
(
self
)
copied_variable
=
theano
.
tensor
.
basic
.
tensor_copy
(
self
)
copied_variable
.
name
=
name
copied_variable
.
name
=
name
return
copied_variable
return
copied_variable
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论