提交 c739cc86 authored 作者: Iban Harlouchet's avatar Iban Harlouchet

numpydoc for theano/sandbox/gpuarray/dnn.py

上级 45f18bad
...@@ -160,6 +160,7 @@ def c_set_tensor4d(var, desc, err, fail): ...@@ -160,6 +160,7 @@ def c_set_tensor4d(var, desc, err, fail):
class DnnBase(COp): class DnnBase(COp):
""" """
Creates a handle for cudnn and pulls in the cudnn libraries and headers. Creates a handle for cudnn and pulls in the cudnn libraries and headers.
""" """
# dnn does not know about broadcasting, so we do not need to assert # dnn does not know about broadcasting, so we do not need to assert
# the input broadcasting pattern. # the input broadcasting pattern.
...@@ -230,11 +231,11 @@ class DnnVersion(Op): ...@@ -230,11 +231,11 @@ class DnnVersion(Op):
def version(): def version():
"""return the current cuDNN version we compile with. """
Return the current cuDNN version we compile with.
This return a tuple with the header version and the library This return a tuple with the header version and the library version we link
version we link with. For older cudnn version without version with. For older cudnn version without version information, we return -1.
information, we return -1.
""" """
if not dnn_available(): if not dnn_available():
...@@ -252,12 +253,14 @@ version.v = None ...@@ -252,12 +253,14 @@ version.v = None
class GpuDnnConvDesc(Op): class GpuDnnConvDesc(Op):
"""This Op builds a convolution descriptor for use in the other """
convolution operations. This Op builds a convolution descriptor for use in the other convolution
operations.
see the doc of :func:`dnn_conv` for a description of the parameters See the doc of :func:`dnn_conv` for a description of the parameters
""" """
__props__ = ('border_mode', 'subsample', 'conv_mode') __props__ = ('border_mode', 'subsample', 'conv_mode')
def c_headers(self): def c_headers(self):
...@@ -408,17 +411,21 @@ class GpuDnnConv(DnnBase, COp): ...@@ -408,17 +411,21 @@ class GpuDnnConv(DnnBase, COp):
""" """
The forward convolution. The forward convolution.
:param image: Parameters
:param kernel: ----------
:param descr: the convolution descriptor image
kernel
descr
The convolution descriptor.
workmem
Either 'none', 'small' or 'large'. Default is the value of
:attr:`config.dnn.conv.workmem`.
""" """
__props__ = ('workmem', 'inplace') __props__ = ('workmem', 'inplace')
def __init__(self, workmem=None, inplace=False): def __init__(self, workmem=None, inplace=False):
"""
:param workmem: either 'none', 'small' or 'large'. Default is
the value of :attr:`config.dnn.conv.workmem`.
"""
COp.__init__(self, ["dnn_base.c", "dnn_conv_base.c", "dnn_fwd.c"], COp.__init__(self, ["dnn_base.c", "dnn_conv_base.c", "dnn_fwd.c"],
"APPLY_SPECIFIC(conv_fwd)") "APPLY_SPECIFIC(conv_fwd)")
if workmem is None: if workmem is None:
...@@ -489,8 +496,9 @@ class GpuDnnConv(DnnBase, COp): ...@@ -489,8 +496,9 @@ class GpuDnnConv(DnnBase, COp):
def get_out_shape(ishape, kshape, border_mode, subsample): def get_out_shape(ishape, kshape, border_mode, subsample):
""" """
This function computes the output shape for a convolution with This function computes the output shape for a convolution with
the specified parameters. `ishape` and `kshape` can be symbolic the specified parameters. `ishape` and `kshape` can be symbolic
or scalar. or scalar.
""" """
b = ishape[0] # Number of inputs b = ishape[0] # Number of inputs
h = ishape[2] # Height of input feature maps h = ishape[2] # Height of input feature maps
...@@ -524,11 +532,15 @@ class GpuDnnConvGradW(DnnBase, COp): ...@@ -524,11 +532,15 @@ class GpuDnnConvGradW(DnnBase, COp):
""" """
The convolution gradient with respect to the weights. The convolution gradient with respect to the weights.
:param image: Parameters
:param kernel: ----------
:param descr: the convolution descriptor image
kernel
descr
The convolution descriptor.
""" """
__props__ = ('inplace',) __props__ = ('inplace',)
def __init__(self, inplace=False): def __init__(self, inplace=False):
...@@ -596,11 +608,15 @@ class GpuDnnConvGradI(DnnBase): ...@@ -596,11 +608,15 @@ class GpuDnnConvGradI(DnnBase):
""" """
The convolution gradient with respect to the inputs. The convolution gradient with respect to the inputs.
:param image: Parameters
:param kernel: ----------
:param descr: the convolution descriptor image
kernel
descr
The convolution descriptor.
""" """
__props__ = ('inplace',) __props__ = ('inplace',)
def __init__(self, inplace=False): def __init__(self, inplace=False):
...@@ -667,14 +683,22 @@ def dnn_conv(img, kerns, border_mode='valid', subsample=(1, 1), ...@@ -667,14 +683,22 @@ def dnn_conv(img, kerns, border_mode='valid', subsample=(1, 1),
The memory layout to use is 'bc01', that is 'batch', 'channel', The memory layout to use is 'bc01', that is 'batch', 'channel',
'first dim', 'second dim' in that order. 'first dim', 'second dim' in that order.
:param img: images to do the convolution over Parameters
:param kerns: convolution filters ----------
:param border_mode: one of 'valid', 'full'; additionally, the padding size img
could be directly specified by an integer or a pair of integers Images to do the convolution over.
:param subsample: perform subsampling of the output (default: (1, 1)) kerns
:param conv_mode: perform convolution (kernels flipped) or cross-correlation. Convolution filters.
One of 'conv', 'cross'. (default: 'conv') border_mode
:param direction_hint: Used by graph optimizers to change algorithm choice. One of 'valid', 'full'; additionally, the padding size
could be directly specified by an integer or a pair of integers.
subsample
Perform subsampling of the output (default: (1, 1)).
conv_mode
Perform convolution (kernels flipped) or cross-correlation.
One of 'conv', 'cross' (default: 'conv').
direction_hint
Used by graph optimizers to change algorithm choice.
By default, GpuDnnConv will be used to carry out the convolution. By default, GpuDnnConv will be used to carry out the convolution.
If border_mode is 'valid', subsample is (1,1) and direction_hint is If border_mode is 'valid', subsample is (1,1) and direction_hint is
'bprop weights', it will use GpuDnnConvGradW. 'bprop weights', it will use GpuDnnConvGradW.
...@@ -682,15 +706,15 @@ def dnn_conv(img, kerns, border_mode='valid', subsample=(1, 1), ...@@ -682,15 +706,15 @@ def dnn_conv(img, kerns, border_mode='valid', subsample=(1, 1),
*not* 'forward!', it will use GpuDnnConvGradI. *not* 'forward!', it will use GpuDnnConvGradI.
This parameter is used internally by graph optimizers and may be This parameter is used internally by graph optimizers and may be
removed at any time without a deprecation period. You have been warned. removed at any time without a deprecation period. You have been warned.
:param workmem: Specify the amount of working memory allowed. workmem
More memory is usually faster. One of 'none', 'small' or Specify the amount of working memory allowed. More memory is usually
'large'. (default is None which takes its value from faster. One of 'none', 'small' or 'large' (default is None which takes
:attr:`config.dnn.conv.workmem`) its value from :attr:`config.dnn.conv.workmem`).
.. warning:: The cuDNN library only works with GPU that have a compute
capability of 3.0 or higer. This means that older GPU will not
work with this Op.
:warning: The cuDNN library only works with GPU that have a compute
capability of 3.0 or higer. This means that older GPU will not
work with this Op.
""" """
fgraph = getattr(img, 'fgraph', None) or getattr(kerns, 'fgraph', None) fgraph = getattr(img, 'fgraph', None) or getattr(kerns, 'fgraph', None)
if (border_mode == 'valid' and subsample == (1, 1) and if (border_mode == 'valid' and subsample == (1, 1) and
...@@ -749,14 +773,21 @@ class GpuDnnPoolDesc(Op): ...@@ -749,14 +773,21 @@ class GpuDnnPoolDesc(Op):
This Op builds a pooling descriptor for use in the other This Op builds a pooling descriptor for use in the other
pooling operations. pooling operations.
:param ws: windows size Parameters
:param stride: (dx, dy) ----------
:param mode: 'max', 'average_inc_pad' or 'average_exc_pad' ws
The old deprecated name 'average' correspond to 'average_inc_pad' Windows size.
:param pad: (padX, padY) padding information. stride
(dx, dy).
mode : {'max', 'average_inc_pad', 'average_exc_pad'}
The old deprecated name 'average' correspond to 'average_inc_pad'.
pad
(padX, padY) padding information.
padX is the size of the left and right borders, padX is the size of the left and right borders,
padY is the size of the top and bottom borders. padY is the size of the top and bottom borders.
""" """
__props__ = ('ws', 'stride', 'mode', 'pad') __props__ = ('ws', 'stride', 'mode', 'pad')
def c_headers(self): def c_headers(self):
...@@ -859,9 +890,15 @@ class GpuDnnPool(DnnBase): ...@@ -859,9 +890,15 @@ class GpuDnnPool(DnnBase):
""" """
Pooling. Pooling.
:param img: the image 4d tensor. Parameters
:param desc: the pooling descriptor. ----------
img
The image 4d tensor.
desc
The pooling descriptor.
""" """
__props__ = () __props__ = ()
def make_node(self, img, desc): def make_node(self, img, desc):
...@@ -1029,11 +1066,19 @@ class GpuDnnPoolGrad(DnnBase): ...@@ -1029,11 +1066,19 @@ class GpuDnnPoolGrad(DnnBase):
""" """
The pooling gradient. The pooling gradient.
:param inp: the input of the pooling. Parameters
:param out: the output of the pooling in the forward. ----------
:param inp_grad: same size as out, but is the corresponding gradient information. inp
:param desc: The pooling descriptor. The input of the pooling.
out
The output of the pooling in the forward.
inp_grad
Same size as out, but is the corresponding gradient information.
desc
The pooling descriptor.
""" """
__props__ = () __props__ = ()
def make_node(self, inp, out, inp_grad, desc): def make_node(self, inp, out, inp_grad, desc):
...@@ -1209,19 +1254,28 @@ def dnn_pool(img, ws, stride=(1, 1), mode='max', pad=(0, 0)): ...@@ -1209,19 +1254,28 @@ def dnn_pool(img, ws, stride=(1, 1), mode='max', pad=(0, 0)):
The memory layout to use is 'bc01', that is 'batch', 'channel', The memory layout to use is 'bc01', that is 'batch', 'channel',
'first dim', 'second dim' in that order. 'first dim', 'second dim' in that order.
:param img: images to do the pooling over Parameters
:param ws: subsampling window size ----------
:param stride: subsampling stride (default: (1, 1)) img
:param mode: one of 'max', 'average_inc_pad' or 'average_exc_pad Images to do the pooling over.
(default: 'max') ws
:param pad: (padX, padY) padding information. Subsampling window size.
stride
Subsampling stride (default: (1, 1)).
mode : {'max', 'average_inc_pad', 'average_exc_pad'}
pad
(padX, padY) padding information.
padX is the size of the left and right borders, padX is the size of the left and right borders,
padY is the size of the top and bottom borders. padY is the size of the top and bottom borders.
:warning: The cuDNN library only works with GPU that have a compute .. warning:: The cuDNN library only works with GPU that have a compute
capability of 3.0 or higer. This means that older GPU will not capability of 3.0 or higer. This means that older GPU will not
work with this Op. work with this Op.
:note: This Op implements the ignore_border=True of max_pool_2d.
Notes
-----
This Op implements the ignore_border=True of max_pool_2d.
""" """
img = gpu_contiguous(img) img = gpu_contiguous(img)
desc = GpuDnnPoolDesc(ws=ws, stride=stride, mode=mode, pad=pad)() desc = GpuDnnPoolDesc(ws=ws, stride=stride, mode=mode, pad=pad)()
...@@ -1232,12 +1286,18 @@ class GpuDnnSoftmaxBase(DnnBase): ...@@ -1232,12 +1286,18 @@ class GpuDnnSoftmaxBase(DnnBase):
""" """
Op for the cuDNN Softmax. Op for the cuDNN Softmax.
:param tensor_format: Whether the data format is 'bc01' or 'b01c'. Parameters
:param algo: 'fast' or 'accurate' indicating whether computations should be ----------
tensor_format
Whether the data format is 'bc01' or 'b01c'.
algo
'fast' or 'accurate' indicating whether computations should be
optimized for speed or accuracy respectively. optimized for speed or accuracy respectively.
:param mode: 'instance' or 'channel' indicating whether the softmax should mode
be computed per image across 'c01' or per spatial location '01' per 'instance' or 'channel' indicating whether the softmax should be
computed per image across 'c01' or per spatial location '01' per
image across 'c'. image across 'c'.
""" """
__props__ = ('tensor_format', 'mode', 'algo') __props__ = ('tensor_format', 'mode', 'algo')
...@@ -1381,13 +1441,18 @@ class GpuDnnSoftmax(GpuDnnSoftmaxBase): ...@@ -1381,13 +1441,18 @@ class GpuDnnSoftmax(GpuDnnSoftmaxBase):
""" """
Op for the cuDNN Softmax. Op for the cuDNN Softmax.
:param tensor_format: Whether the data format is 'bc01' or 'b01c'. tensor_format
:param algo: 'fast' or 'accurate' indicating whether computations should be Whether the data format is 'bc01' or 'b01c'.
algo
'fast' or 'accurate' indicating whether computations should be
optimized for speed or accuracy respectively. optimized for speed or accuracy respectively.
:param mode: 'instance' or 'channel' indicating whether the softmax should mode
be computed per image across 'c01' or per spatial location '01' per 'instance' or 'channel' indicating whether the softmax should be
computed per image across 'c01' or per spatial location '01' per
image across 'c'. image across 'c'.
""" """
direction = 'forward' direction = 'forward'
softmax_inputs = ['softmax_input'] softmax_inputs = ['softmax_input']
...@@ -1442,12 +1507,18 @@ class GpuDnnSoftmaxGrad(GpuDnnSoftmaxBase): ...@@ -1442,12 +1507,18 @@ class GpuDnnSoftmaxGrad(GpuDnnSoftmaxBase):
""" """
Op for the cuDNN SoftmaxGrad. Op for the cuDNN SoftmaxGrad.
:param tensor_format: Whether the data format is 'bc01' or 'b01c'. Parameters
:param algo: 'fast' or 'accurate' indicating whether computations should be ----------
tensor_format
Whether the data format is 'bc01' or 'b01c'.
algo
'fast' or 'accurate' indicating whether computations should be
optimized for speed or accuracy respectively. optimized for speed or accuracy respectively.
:param mode: 'instance' or 'channel' indicating whether the softmax should mode
'instance' or 'channel' indicating whether the softmax should
be computed per image across 'c01' or per spatial location '01' per be computed per image across 'c01' or per spatial location '01' per
image across 'c'. image across 'c'.
""" """
direction = 'backward' direction = 'backward'
softmax_inputs = ['softmax_gout', 'softmax_input'] softmax_inputs = ['softmax_gout', 'softmax_input']
...@@ -1713,7 +1784,10 @@ def local_softmax_dnn(node): ...@@ -1713,7 +1784,10 @@ def local_softmax_dnn(node):
class NoCuDNNRaise(Optimizer): class NoCuDNNRaise(Optimizer):
def apply(self, fgraph): def apply(self, fgraph):
""" Raise a RuntimeError if cudnn can't be used""" """
Raise a RuntimeError if cudnn can't be used.
"""
if not dnn_available(): if not dnn_available():
# Make an assert error as we want Theano to fail, not # Make an assert error as we want Theano to fail, not
# just skip this optimization. # just skip this optimization.
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论