提交 e16273c9 authored 作者: Iban Harlouchet's avatar Iban Harlouchet

numpydoc for theano/tensor/nnet/conv3d2d.py

上级 61280f0a
...@@ -6,10 +6,13 @@ import theano.sandbox.cuda as cuda ...@@ -6,10 +6,13 @@ import theano.sandbox.cuda as cuda
def get_diagonal_subtensor_view(x, i0, i1): def get_diagonal_subtensor_view(x, i0, i1):
"""Helper function for DiagonalSubtensor and """
IncDiagonalSubtensor Helper function for DiagonalSubtensor and IncDiagonalSubtensor.
Notes
-----
It returns a partial view of x, not a partial copy.
:note: it return a partial view of x, not a partial copy.
""" """
# We have to cast i0 and i0 to int because python 2.4 (and maybe later) # We have to cast i0 and i0 to int because python 2.4 (and maybe later)
# do not support indexing with 0-dim, 'int*' ndarrays. # do not support indexing with 0-dim, 'int*' ndarrays.
...@@ -27,13 +30,24 @@ def get_diagonal_subtensor_view(x, i0, i1): ...@@ -27,13 +30,24 @@ def get_diagonal_subtensor_view(x, i0, i1):
class DiagonalSubtensor(Op): class DiagonalSubtensor(Op):
"""Return a form a nd diagonal subtensor. """
Return a form a nd diagonal subtensor.
:param x: n-d tensor
:param i0: axis index in x Parameters
:param i1: axis index in x ----------
:note: Work on the GPU. x
n-d tensor
i0
Axis index in x
i1
Axis index in x
Notes
-----
Work on the GPU.
Extended summary
----------------
``x`` is some n-dimensional tensor, but this Op only deals with a ``x`` is some n-dimensional tensor, but this Op only deals with a
matrix-shaped slice, using axes i0 and i1. Without loss of matrix-shaped slice, using axes i0 and i1. Without loss of
generality, suppose that ``i0`` picks out our ``row`` dimension, generality, suppose that ``i0`` picks out our ``row`` dimension,
...@@ -73,6 +87,7 @@ class DiagonalSubtensor(Op): ...@@ -73,6 +87,7 @@ class DiagonalSubtensor(Op):
see what's necessary at that point. see what's necessary at that point.
""" """
__props__ = ("inplace",) __props__ = ("inplace",)
def __str__(self): def __str__(self):
...@@ -111,8 +126,10 @@ diagonal_subtensor = DiagonalSubtensor(False) ...@@ -111,8 +126,10 @@ diagonal_subtensor = DiagonalSubtensor(False)
class IncDiagonalSubtensor(Op): class IncDiagonalSubtensor(Op):
""" """
The gradient of DiagonalSubtensor The gradient of DiagonalSubtensor.
""" """
__props__ = ("inplace",) __props__ = ("inplace",)
def __str__(self): def __str__(self):
...@@ -153,26 +170,39 @@ inc_diagonal_subtensor = IncDiagonalSubtensor(False) ...@@ -153,26 +170,39 @@ inc_diagonal_subtensor = IncDiagonalSubtensor(False)
def conv3d(signals, filters, def conv3d(signals, filters,
signals_shape=None, filters_shape=None, signals_shape=None, filters_shape=None,
border_mode='valid'): border_mode='valid'):
"""Convolve spatio-temporal filters with a movie. """
Convolve spatio-temporal filters with a movie.
It flips the filters. It flips the filters.
:param signals: timeseries of images whose pixels have color channels. Parameters
shape: [Ns, Ts, C, Hs, Ws] ----------
:param filters: spatio-temporal filters signals
shape: [Nf, Tf, C, Hf, Wf] Timeseries of images whose pixels have color channels.
:param signals_shape: None or a tuple/list with the shape of signals Shape: [Ns, Ts, C, Hs, Ws].
:param filters_shape: None or a tuple/list with the shape of filters filters
:param border_mode: The only one tested is 'valid'. Spatio-temporal filters.
Shape: [Nf, Tf, C, Hf, Wf].
:note: Another way to define signals: (batch, time, in channel, row, column) signals_shape
Another way to define filters: (out channel,time,in channel, row, column) None or a tuple/list with the shape of signals.
:note: For the GPU, you can use this implementation or filters_shape
:func:`conv3d_fft <theano.sandbox.cuda.fftconv.conv3d_fft>`. None or a tuple/list with the shape of filters.
border_mode
:see: Someone made a script that shows how to swap the axes between The only one tested is 'valid'.
both 3d convolution implementations in Theano. See the last
`attachment <https://groups.google.com/d/msg/theano-users/1S9_bZgHxVw/0cQR9a4riFUJ>`_. Notes
-----
Another way to define signals: (batch, time, in channel, row, column)
Another way to define filters: (out channel,time,in channel, row, column)
For the GPU, you can use this implementation or
:func:`conv3d_fft <theano.sandbox.cuda.fftconv.conv3d_fft>`.
See Also
--------
Someone made a script that shows how to swap the axes between
both 3d convolution implementations in Theano. See the last
`attachment <https://groups.google.com/d/msg/theano-users/1S9_bZgHxVw/0cQR9a4riFUJ>`_.
""" """
...@@ -264,7 +294,8 @@ def conv3d(signals, filters, ...@@ -264,7 +294,8 @@ def conv3d(signals, filters,
def make_gpu_optimizer(op, to_gpu): def make_gpu_optimizer(op, to_gpu):
"""This function create optimizer that move some inputs to the GPU """
This function create optimizer that move some inputs to the GPU
for op that work on both CPU and GPU. for op that work on both CPU and GPU.
The op object is created by calling op(), so good default value The op object is created by calling op(), so good default value
...@@ -272,8 +303,12 @@ def make_gpu_optimizer(op, to_gpu): ...@@ -272,8 +303,12 @@ def make_gpu_optimizer(op, to_gpu):
We suppose the same op work with CPU and GPU inputs. We suppose the same op work with CPU and GPU inputs.
:param op: the op that support GPU inputs Parameters
:param to_gpu: a list of op inputs that are moved to the GPU. ----------
op
The op that support GPU inputs.
to_gpu
A list of op inputs that are moved to the GPU.
""" """
@theano.gof.local_optimizer([op, cuda.gpu_from_host]) @theano.gof.local_optimizer([op, cuda.gpu_from_host])
...@@ -281,6 +316,7 @@ def make_gpu_optimizer(op, to_gpu): ...@@ -281,6 +316,7 @@ def make_gpu_optimizer(op, to_gpu):
""" """
op(host_from_gpu()) -> host_from_gpu(op) op(host_from_gpu()) -> host_from_gpu(op)
gpu_from_host(op) -> op(gpu_from_host) gpu_from_host(op) -> op(gpu_from_host)
""" """
if isinstance(node.op, op): if isinstance(node.op, op):
# op(host_from_gpu()) -> host_from_gpu(op) # op(host_from_gpu()) -> host_from_gpu(op)
...@@ -314,7 +350,7 @@ if cuda.cuda_available: ...@@ -314,7 +350,7 @@ if cuda.cuda_available:
@theano.gof.local_optimizer([DiagonalSubtensor, IncDiagonalSubtensor]) @theano.gof.local_optimizer([DiagonalSubtensor, IncDiagonalSubtensor])
def local_inplace_DiagonalSubtensor(node): def local_inplace_DiagonalSubtensor(node):
""" also work for IncDiagonalSubtensor """ """Also work for IncDiagonalSubtensor."""
if (isinstance(node.op, (DiagonalSubtensor, IncDiagonalSubtensor)) and if (isinstance(node.op, (DiagonalSubtensor, IncDiagonalSubtensor)) and
not node.op.inplace): not node.op.inplace):
new_op = node.op.__class__(inplace=True) new_op = node.op.__class__(inplace=True)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论