提交 5c31ea6b authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Merge pull request #3885 from abergeron/faster_travis

Faster travis
...@@ -23,19 +23,25 @@ before_install: ...@@ -23,19 +23,25 @@ before_install:
- export PATH=/home/travis/miniconda2/bin:$PATH - export PATH=/home/travis/miniconda2/bin:$PATH
- conda update --yes conda - conda update --yes conda
addons:
apt_packages:
- texlive-latex-recommended
- texlive-latex-extra
- texlive-fonts-recommended
- dvipng
install: install:
- if [[ $TRAVIS_PYTHON_VERSION == '2.6' ]]; then conda create --yes -q -n pyenv mkl python=2.6 numpy=1.7.1 scipy=0.11 nose=1.3.0 pyparsing=1.5 pip flake8=2.3 six=1.9.0 pep8=1.6.2 pyflakes=0.8.1; fi - if [[ $TRAVIS_PYTHON_VERSION == '2.6' ]]; then conda create --yes -q -n pyenv mkl python=2.6 numpy=1.7.1 scipy=0.11 nose=1.3.0 pyparsing=1.5 pip flake8=2.3 six=1.9.0 pep8=1.6.2 pyflakes=0.8.1 sphinx; fi
- if [[ $TRAVIS_PYTHON_VERSION == '3.3' ]]; then conda create --yes -q -n pyenv mkl python=3.3 numpy=1.9.1 scipy=0.14.0 nose=1.3.4 pyparsing=1.5 pip flake8=2.3 six=1.9.0 pep8=1.6.2 pyflakes=0.8.1; fi - if [[ $TRAVIS_PYTHON_VERSION == '3.3' ]]; then conda create --yes -q -n pyenv mkl python=3.3 numpy=1.9.1 scipy=0.14.0 nose=1.3.4 pyparsing=1.5 pip flake8=2.3 six=1.9.0 pep8=1.6.2 pyflakes=0.8.1 sphinx; fi
- source activate pyenv - source activate pyenv
- if [[ $TRAVIS_PYTHON_VERSION == '2.6' ]]; then pip install pydot; fi - if [[ $TRAVIS_PYTHON_VERSION == '2.6' ]]; then pip install pydot; fi
- pip install . --no-deps - pip install . --no-deps
- pip install -U Sphinx
# command to run tests # command to run tests
env: env:
- PART="theano/sandbox theano/sparse theano/scalar theano/tensor/nnet theano/scan_module" - PART="theano/tests/test_flake8.py" DOC=1
- PART="theano/tensor/tests/test_basic.py theano/tensor/signal theano/compile theano/gof theano/misc theano/tests theano/compat" - PART="-e theano/tests/test_flake8.py theano/compat theano/compile theano/d3viz theano/gof theano/misc theano/sandbox theano/scalar theano/scan_module theano/sparse theano/tests theano/typed_list"
- PART="-e test_basic.py theano/tensor/tests theano/d3viz" - PART="theano/tensor"
matrix: matrix:
fast_finish: true fast_finish: true
...@@ -44,8 +50,6 @@ matrix: ...@@ -44,8 +50,6 @@ matrix:
env: PART="." THEANO_FLAGS="mode=FAST_COMPILE" env: PART="." THEANO_FLAGS="mode=FAST_COMPILE"
- python: "2.6" - python: "2.6"
env: PART="." THEANO_FLAGS="mode=FAST_COMPILE,floatX=float32" env: PART="." THEANO_FLAGS="mode=FAST_COMPILE,floatX=float32"
- python: 2.6
env: PART="." DOC=1
script: script:
- export THEANO_FLAGS=$THEANO_FLAGS,warn.ignore_bug_before=all,on_opt_error=raise,on_shape_error=raise,gcc.cxxflags=-pipe - export THEANO_FLAGS=$THEANO_FLAGS,warn.ignore_bug_before=all,on_opt_error=raise,on_shape_error=raise,gcc.cxxflags=-pipe
...@@ -60,13 +64,9 @@ script: ...@@ -60,13 +64,9 @@ script:
- cd $(python -c 'import theano; import os; print(os.path.split(theano.__file__)[0])') - cd $(python -c 'import theano; import os; print(os.path.split(theano.__file__)[0])')
- echo "$PART" - echo "$PART"
- cd -; cd Theano - cd -; cd Theano
- if [[ $DOC == "1" ]]; then python doc/scripts/docgen.py --nopdf; else theano-nose -v $PART; fi - theano-nose -v $PART
- if [[ $DOC == "1" ]]; then python doc/scripts/docgen.py --test; fi - if [[ $DOC == "1" ]]; then python doc/scripts/docgen.py --nopdf --check; fi
# - theano-cache list # - if [[ $DOC == "1" ]]; then python doc/scripts/docgen.py --test --check; fi
#after_script:
after_failure: after_failure:
- cat /home/travis/.pip/pip.log - cat /home/travis/.pip/pip.log
#after_success:
cache: apt
...@@ -132,7 +132,7 @@ The following ResultBase represents a double (we only care about the C part). ...@@ -132,7 +132,7 @@ The following ResultBase represents a double (we only care about the C part).
.. code-block:: python .. code-block:: python
class Double(ResultBase): class Double(ResultBase):
<snip> # <snip>
def c_declare(self): def c_declare(self):
return "double %(name)s;" return "double %(name)s;"
def c_init(self): def c_init(self):
...@@ -153,7 +153,7 @@ The following ResultBase represents addition of two nonnegative doubles (we only ...@@ -153,7 +153,7 @@ The following ResultBase represents addition of two nonnegative doubles (we only
.. code-block:: python .. code-block:: python
class Add(Op): class Add(Op):
<snip> # <snip>
def c_var_names(self): def c_var_names(self):
return "[['x', 'y'], ['z']]" return "[['x', 'y'], ['z']]"
def c_validate_update(self): def c_validate_update(self):
...@@ -170,7 +170,7 @@ Generating a C function ...@@ -170,7 +170,7 @@ Generating a C function
For the example Op, the generated C function will typically look like this: For the example Op, the generated C function will typically look like this:
.. code-block:: python .. code-block:: cpp
void add(PyObject* storage_x, PyObject* storage_y, PyObject* storage_z) { void add(PyObject* storage_x, PyObject* storage_y, PyObject* storage_z) {
PyObject* py_x = PyList_GET_ITEM(storage_x, 0); Py_XINCREF(py_x); // automatic PyObject* py_x = PyList_GET_ITEM(storage_x, 0); Py_XINCREF(py_x); // automatic
...@@ -232,16 +232,16 @@ Here is a sketch of the struct equivalent of the previous function: ...@@ -232,16 +232,16 @@ Here is a sketch of the struct equivalent of the previous function:
double z; // z.c_declare double z; // z.c_declare
void init(PyObject* storage_x, PyObject* storage_y, PyObject* storage_z) { void init(PyObject* storage_x, PyObject* storage_y, PyObject* storage_z) {
<set the struct members of the same names> // <set the struct members of the same names>
<init the struct members corresponding to z> // <init the struct members corresponding to z>
} }
void cleanup(void) { void cleanup(void) {
<cleanup z> // <cleanup z>
} }
void run(void) { void run(void) {
<same code as before minus z's cleanup> // <same code as before minus z's cleanup>
} }
add() { this->init(); } add() { this->init(); }
......
...@@ -16,7 +16,7 @@ if __name__ == '__main__': ...@@ -16,7 +16,7 @@ if __name__ == '__main__':
opts, args = getopt.getopt( opts, args = getopt.getopt(
sys.argv[1:], sys.argv[1:],
'o:f:', 'o:f:',
['rst', 'help', 'nopdf', 'cache', 'test']) ['rst', 'help', 'nopdf', 'cache', 'check', 'test'])
options.update(dict([x, y or True] for x, y in opts)) options.update(dict([x, y or True] for x, y in opts))
if options['--help']: if options['--help']:
print('Usage: %s [OPTIONS] [files...]' % sys.argv[0]) print('Usage: %s [OPTIONS] [files...]' % sys.argv[0])
...@@ -25,6 +25,7 @@ if __name__ == '__main__': ...@@ -25,6 +25,7 @@ if __name__ == '__main__':
print(' --rst: only compile the doc (requires sphinx)') print(' --rst: only compile the doc (requires sphinx)')
print(' --nopdf: do not produce a PDF file from the doc, only HTML') print(' --nopdf: do not produce a PDF file from the doc, only HTML')
print(' --test: run all the code samples in the documentaton') print(' --test: run all the code samples in the documentaton')
print(' --check: treat warnings as errors')
print(' --help: this help') print(' --help: this help')
print('If one or more files are specified after the options then only ' print('If one or more files are specified after the options then only '
'those files will be built. Otherwise the whole tree is ' 'those files will be built. Otherwise the whole tree is '
...@@ -54,17 +55,21 @@ if __name__ == '__main__': ...@@ -54,17 +55,21 @@ if __name__ == '__main__':
pythonpath = os.pathsep.join([throot, pythonpath]) pythonpath = os.pathsep.join([throot, pythonpath])
sys.path[0:0] = [throot] # We must not use os.environ. sys.path[0:0] = [throot] # We must not use os.environ.
def call_sphinx(builder, workdir, extraopts=None): def call_sphinx(builder, workdir):
import sphinx import sphinx
if extraopts is None: if options['--check']:
extraopts = ['-W'] extraopts = ['-W']
else:
extraopts = []
if not options['--cache'] and files is None: if not options['--cache'] and files is None:
extraopts.append('-E') extraopts.append('-E')
docpath = os.path.join(throot, 'doc') docpath = os.path.join(throot, 'doc')
inopt = [docpath, workdir] inopt = [docpath, workdir]
if files is not None: if files is not None:
inopt.extend(files) inopt.extend(files)
sphinx.build_main(['', '-b', builder] + extraopts + inopt) ret = sphinx.build_main(['', '-b', builder] + extraopts + inopt)
if ret != 0:
sys.exit(ret)
if options['--all'] or options['--rst']: if options['--all'] or options['--rst']:
mkdir("doc") mkdir("doc")
...@@ -92,5 +97,6 @@ if __name__ == '__main__': ...@@ -92,5 +97,6 @@ if __name__ == '__main__':
mkdir("doc") mkdir("doc")
sys.path[0:0] = [os.path.join(throot, 'doc')] sys.path[0:0] = [os.path.join(throot, 'doc')]
call_sphinx('doctest', '.') call_sphinx('doctest', '.')
# To go back to the original current directory. # To go back to the original current directory.
os.chdir(currentdir) os.chdir(currentdir)
...@@ -997,7 +997,7 @@ down computation on the GPU, but it is enabled by default on the CPU). ...@@ -997,7 +997,7 @@ down computation on the GPU, but it is enabled by default on the CPU).
Then you must start Python inside GDB and in it start your Python Then you must start Python inside GDB and in it start your Python
process (e.g. theano-nose): process (e.g. theano-nose):
.. code-block:: .. code-block:: sh
$gdb python $gdb python
(gdb)r bin/theano-nose theano/ (gdb)r bin/theano-nose theano/
......
...@@ -124,12 +124,15 @@ if PY3: ...@@ -124,12 +124,15 @@ if PY3:
Examples Examples
-------- --------
::
with open(fname, 'rb') as fp: with open(fname, 'rb') as fp:
if PY3: if PY3:
u = CompatUnpickler(fp, encoding="latin1") u = CompatUnpickler(fp, encoding="latin1")
else: else:
u = CompatUnpickler(fp) u = CompatUnpickler(fp)
mat = u.load() mat = u.load()
""" """
pass pass
...@@ -145,13 +148,15 @@ else: ...@@ -145,13 +148,15 @@ else:
Examples Examples
-------- --------
::
with open(fname, 'rb') as fp: with open(fname, 'rb') as fp:
if PY3: if PY3:
u = CompatUnpickler(fp, encoding="latin1") u = CompatUnpickler(fp, encoding="latin1")
else: else:
u = CompatUnpickler(fp) u = CompatUnpickler(fp)
mat = u.load() mat = u.load()
""" """
pass pass
......
...@@ -790,7 +790,7 @@ shape ...@@ -790,7 +790,7 @@ shape
Returns Returns
------- -------
matrix sparse matrix
A sparse matrix having the properties specified by the inputs. A sparse matrix having the properties specified by the inputs.
Notes Notes
...@@ -820,7 +820,7 @@ shape ...@@ -820,7 +820,7 @@ shape
Returns Returns
------- -------
matrix sparse matrix
A sparse matrix having the properties specified by the inputs. A sparse matrix having the properties specified by the inputs.
Notes Notes
...@@ -1053,7 +1053,7 @@ x ...@@ -1053,7 +1053,7 @@ x
Returns Returns
------- -------
matrix theano.tensor.matrix
A dense matrix, the same as `x`. A dense matrix, the same as `x`.
Notes Notes
...@@ -1121,7 +1121,7 @@ x ...@@ -1121,7 +1121,7 @@ x
Returns Returns
------- -------
matrix sparse matrix
The same as `x` in a sparse csr matrix format. The same as `x` in a sparse csr matrix format.
""" """
...@@ -1137,7 +1137,7 @@ x ...@@ -1137,7 +1137,7 @@ x
Returns Returns
------- -------
matrix sparse matrix
The same as `x` in a sparse csc matrix format. The same as `x` in a sparse csc matrix format.
""" """
...@@ -1187,7 +1187,7 @@ index ...@@ -1187,7 +1187,7 @@ index
Returns Returns
------- -------
matrix sparse matrix
The corresponding rows in `x`. The corresponding rows in `x`.
""" """
...@@ -1284,7 +1284,7 @@ index ...@@ -1284,7 +1284,7 @@ index
Returns Returns
------- -------
vector theano.tensor.vector
The corresponding elements in `x`. The corresponding elements in `x`.
""" """
...@@ -1436,7 +1436,8 @@ index ...@@ -1436,7 +1436,8 @@ index
Returns Returns
------- -------
The corresponding slice in `x`. sparse matrix
The corresponding slice in `x`.
Notes Notes
...@@ -1511,7 +1512,7 @@ index ...@@ -1511,7 +1512,7 @@ index
Returns Returns
------- -------
scalar theano.tensor.scalar
The corresponding item in `x`. The corresponding item in `x`.
Notes Notes
...@@ -1566,7 +1567,7 @@ x ...@@ -1566,7 +1567,7 @@ x
Returns Returns
------- -------
matrix sparse matrix
`x` transposed. `x` transposed.
Notes Notes
...@@ -1617,7 +1618,7 @@ x ...@@ -1617,7 +1618,7 @@ x
Returns Returns
------- -------
matrix sparse matrix
-`x`. -`x`.
Notes Notes
...@@ -1930,7 +1931,7 @@ x ...@@ -1930,7 +1931,7 @@ x
Returns Returns
------- -------
vector theano.tensor.vector
A dense vector representing the diagonal elements. A dense vector representing the diagonal elements.
Notes Notes
...@@ -1985,7 +1986,7 @@ x ...@@ -1985,7 +1986,7 @@ x
Returns Returns
------- -------
matrix sparse matrix
A sparse matrix having `x` as diagonal. A sparse matrix having `x` as diagonal.
Notes Notes
...@@ -2044,7 +2045,7 @@ x ...@@ -2044,7 +2045,7 @@ x
Returns Returns
------- -------
matrix sparse matrix
The same as `x` with indices sorted. The same as `x` with indices sorted.
Notes Notes
...@@ -2854,7 +2855,7 @@ y ...@@ -2854,7 +2855,7 @@ y
Returns Returns
------- -------
matrix matrix variable
`x` == `y` `x` == `y`
Notes Notes
...@@ -2875,7 +2876,7 @@ y ...@@ -2875,7 +2876,7 @@ y
Returns Returns
------- -------
matrix matrix variable
`x` != `y` `x` != `y`
Notes Notes
...@@ -2896,7 +2897,7 @@ y ...@@ -2896,7 +2897,7 @@ y
Returns Returns
------- -------
matrix matrix variable
`x` < `y` `x` < `y`
Notes Notes
...@@ -2917,7 +2918,7 @@ y ...@@ -2917,7 +2918,7 @@ y
Returns Returns
------- -------
matrix matrix variable
`x` > `y` `x` > `y`
Notes Notes
...@@ -2937,6 +2938,7 @@ y ...@@ -2937,6 +2938,7 @@ y
Returns Returns
------- -------
matrix variable
`x` <= `y` `x` <= `y`
Notes Notes
...@@ -2957,7 +2959,7 @@ y ...@@ -2957,7 +2959,7 @@ y
Returns Returns
------- -------
matrix matrix variable
`x` >= `y` `x` >= `y`
Notes Notes
...@@ -3199,7 +3201,7 @@ x ...@@ -3199,7 +3201,7 @@ x
Returns Returns
------- -------
matrix sparse matrix
Exactly `x` but with a data attribute exempt of zeros. Exactly `x` but with a data attribute exempt of zeros.
Notes Notes
...@@ -4112,7 +4114,7 @@ p ...@@ -4112,7 +4114,7 @@ p
Returns Returns
------- -------
matrix sparse matrix
A dense matrix containing the dot product of `x` by `y`.T only A dense matrix containing the dot product of `x` by `y`.T only
where `p` is 1. where `p` is 1.
......
...@@ -132,68 +132,80 @@ def conv2d_grad_wrt_inputs(output_grad, ...@@ -132,68 +132,80 @@ def conv2d_grad_wrt_inputs(output_grad,
used by the convolution, such that the output_grad is upsampled used by the convolution, such that the output_grad is upsampled
to the input shape. to the input shape.
:type output_grad: symbolic 4D tensor. Parameters
:param output_grad: mini-batch of feature map stacks, of shape ----------
(batch size, input channels, input rows, input columns). output_grad : symbolic 4D tensor
This is the tensor that will be upsampled or the output mini-batch of feature map stacks, of shape (batch size, input
gradient of the convolution whose gradient will be taken channels, input rows, input columns). This is the tensor that
with respect to the input of the convolution. will be upsampled or the output gradient of the convolution
See the optional parameter ``output_grad_shape``. whose gradient will be taken with respect to the input of the
convolution. See the optional parameter
:type filters: symbolic 4D tensor. ``output_grad_shape``.
:param filters: set of filters used in CNN layer of shape filters : symbolic 4D tensor
(output channels, input channels, filter rows, filter columns). set of filters used in CNN layer of shape (output channels,
See the optional parameter ``filter_shape``. input channels, filter rows, filter columns). See the
optional parameter ``filter_shape``.
:type output_grad_shape: None, tuple/list of len 4 of int or output_grad_shape : list of 4 symbolic or real ints
Constant variable. The shape of the output_grad parameter. Optional, possibly
:param output_grad_shape: The shape of the output_grad parameter. used to choose an optimal implementation. You can give
Optional, possibly used to choose an optimal implementation. ``None`` for any element of the list to specify that this
You can give ``None`` for any element of the list to specify that this
element is not known at compile time. element is not known at compile time.
input_shape : list of 2 symbolic or real ints
The shape (row and column size) of the input (upsampled)
parameter. Not Optional, since given the output_grad_shape
and the subsample values, multiple input_shape may be
plausible.
filter_shape : list of 4 symbolic or real ints
The shape of the filters parameter. Optional, possibly used
to choose an optimal implementation. You can give ``None``
for any element of the list to specify that this element is
not known at compile time.
border_mode : str, int or tuple of two int
Either of the following:
:type input_shape: tuple/list of len 2 of int or Constant variable. ``'valid'``
:param input_shape: The shape (row and column size) of the apply filter wherever it completely overlaps with the
input (upsampled) parameter. input. Generates output of shape: input shape - filter
Not Optional, since given the output_grad_shape and the subsample values, shape + 1
multiple input_shape may be plausible.
:type filter_shape: None, tuple/list of len 4 of int or Constant variable ``'full'``
:param filter_shape: The shape of the filters parameter. apply filter wherever it partly overlaps with the input.
Optional, possibly used to choose an optimal implementation.
You can give ``None`` for any element of the list to specify that this
element is not known at compile time.
:type border_mode: str, int or tuple of two int
:param border_mode: Either of the following:
* ``'valid'``: apply filter wherever it completely overlaps with the
input. Generates output of shape: input shape - filter shape + 1
* ``'full'``: apply filter wherever it partly overlaps with the input.
Generates output of shape: input shape + filter shape - 1 Generates output of shape: input shape + filter shape - 1
* ``'half'``: pad input with a symmetric border of ``filter rows // 2``
rows and ``filter columns // 2`` columns, then perform a valid ``'half'``
convolution. For filters with an odd number of rows and columns, this pad input with a symmetric border of ``filter rows // 2``
leads to the output shape being equal to the input shape. rows and ``filter columns // 2`` columns, then perform a
* ``int``: pad input with a symmetric border of zeros of the given valid convolution. For filters with an odd number of rows
and columns, this leads to the output shape being equal to
the input shape.
``int``
pad input with a symmetric border of zeros of the given
width, then perform a valid convolution. width, then perform a valid convolution.
* ``(int1, int2)``: pad input with a symmetric border of ``int1`` rows
and ``int2`` columns, then perform a valid convolution.
:type subsample: tuple of len 2, the subsampling used in the forward pass ``(int1, int2)``
of the convolutional operation. pad input with a symmetric border of ``int1`` rows and
:param subsample: factor by which to subsample the output. ``int2`` columns, then perform a valid convolution.
Also called strides elsewhere.
:type filter_flip: bool subsample : tuple of len 2
:param filter_flip: If ``True``, will flip the filter rows and columns The subsampling used in the forward pass. Also called strides
before sliding them over the input. This operation is normally referred elsewhere.
to as a convolution, and this is the default. If ``False``, the filters filter_flip : bool
are not flipped and the operation is referred to as a If ``True``, will flip the filter rows and columns before
cross-correlation. sliding them over the input. This operation is normally
referred to as a convolution, and this is the default. If
``False``, the filters are not flipped and the operation is
referred to as a cross-correlation.
Returns
-------
symbolic 4D tensor
set of feature maps generated by convolutional layer. Tensor
is of shape (batch size, output channels, output rows, output
columns)
:rtype: symbolic 4D tensor. Notes
:return: set of feature maps generated by convolutional layer. Tensor is -----
of shape (batch size, output channels, output rows, output columns)
:note: If CuDNN is available, it will be used on the :note: If CuDNN is available, it will be used on the
GPU. Otherwise, it is the *CorrMM* convolution that will be used GPU. Otherwise, it is the *CorrMM* convolution that will be used
...@@ -224,71 +236,81 @@ def conv2d_grad_wrt_weights(input, ...@@ -224,71 +236,81 @@ def conv2d_grad_wrt_weights(input,
"""This function will build the symbolic graph for getting the """This function will build the symbolic graph for getting the
gradient of the output of a convolution (output_grad) w.r.t its wights. gradient of the output of a convolution (output_grad) w.r.t its wights.
:type input: symbolic 4D tensor. Parameters
:param input: mini-batch of feature map stacks, of shape ----------
(batch size, input channels, input rows, input columns). input : symbolic 4D tensor
This is the input of the convolution in the forward pass. mini-batch of feature map stacks, of shape (batch size, input
channels, input rows, input columns). This is the input of
:type output_grad: symbolic 4D tensor. the convolution in the forward pass.
:param output_grad: mini-batch of feature map stacks, of shape output_grad : symbolic 4D tensor
(batch size, input channels, input rows, input columns). mini-batch of feature map stacks, of shape (batch size, input
This is the gradient of the output of convolution. channels, input rows, input columns). This is the gradient of
the output of convolution.
:type filters: symbolic 4D tensor. filters : symbolic 4D tensor.
:param filters: set of filters used in CNN layer of shape set of filters used in CNN layer of shape (output channels,
(output channels, input channels, filter rows, filter columns). input channels, filter rows, filter columns). See the
See the optional parameter ``filter_shape``. optional parameter ``filter_shape``.
output_grad_shape : list of 4 ints or Constant variables
:type output_grad_shape: None, tuple/list of len 4 of int The shape of the input parameter. Optional, possibly used to
or Constant variable. choose an optimal implementation. You can give ``None`` for
:param output_grad_shape: The shape of the input parameter. any element of the list to specify that this element is not
Optional, possibly used to choose an optimal implementation. known at compile time.
You can give ``None`` for any element of the list to specify that this input_shape : list of 2 ints or Constant variables
element is not known at compile time. The shape of the input parameter. This parameter indicates
the row and column size of the input in the forward pass.
:type input_shape: tuple/list of len 2 of int or Constant variable.
:param input_shape: The shape of the input parameter.
This parameter indicates the row and column size of the input
in the forward pass.
Optional, possibly used to choose an optimal implementation. Optional, possibly used to choose an optimal implementation.
You can give ``None`` for any element of the list to specify that this You can give ``None`` for any element of the list to specify
element is not known at compile time. that this element is not known at compile time.
filter_shape : list of 4 ints or Constant variables
The shape of the filters parameter. Not Optional, since given
the output_grad_shape and the input_shape, multiple
filter_shape may be plausible.
border_mode : str, int or tuple of two ints
Either of the following:
:type filter_shape: None, tuple/list of len 4 of int or Constant variable. ``'valid'``
:param filter_shape: The shape of the filters parameter. apply filter wherever it completely overlaps with the
Not Optional, since given the output_grad_shape and the input_shape, input. Generates output of shape: input shape - filter
multiple filter_shape may be plausible. shape + 1
:type border_mode: str, int or tuple of two int ``'full'``
:param border_mode: Either of the following: apply filter wherever it partly overlaps with the input.
* ``'valid'``: apply filter wherever it completely overlaps with the
input. Generates output of shape: input shape - filter shape + 1
* ``'full'``: apply filter wherever it partly overlaps with the input.
Generates output of shape: input shape + filter shape - 1 Generates output of shape: input shape + filter shape - 1
* ``'half'``: pad input with a symmetric border of ``filter rows // 2``
rows and ``filter columns // 2`` columns, then perform a valid ``'half'``
convolution. For filters with an odd number of rows and columns, this pad input with a symmetric border of ``filter rows // 2``
leads to the output shape being equal to the input shape. rows and ``filter columns // 2`` columns, then perform a
* ``int``: pad input with a symmetric border of zeros of the given valid convolution. For filters with an odd number of rows
and columns, this leads to the output shape being equal to
the input shape.
``int``
pad input with a symmetric border of zeros of the given
width, then perform a valid convolution. width, then perform a valid convolution.
* ``(int1, int2)``: pad input with a symmetric border of ``int1`` rows
and ``int2`` columns, then perform a valid convolution.
:type subsample: tuple of len 2, the subsampling used in the forward pass ``(int1, int2)``
of the convolutional operation. pad input with a symmetric border of ``int1`` rows and
:param subsample: factor by which to subsample the output. ``int2`` columns, then perform a valid convolution.
Also called strides elsewhere.
:type filter_flip: bool subsample : tuple of len 2
:param filter_flip: If ``True``, will flip the filter rows and columns The subsampling used in the forward pass of the convolutional
before sliding them over the input. This operation is normally referred operation. Also called strides elsewhere.
to as a convolution, and this is the default. If ``False``, the filters filter_flip : bool
are not flipped and the operation is referred to as a If ``True``, will flip the filter rows and columns before
cross-correlation. sliding them over the input. This operation is normally
referred to as a convolution, and this is the default. If
``False``, the filters are not flipped and the operation is
referred to as a cross-correlation.
Returns
-------
symbolic 4D tensor
set of feature maps generated by convolutional layer. Tensor
is of shape (batch size, output channels, output rows, output
columns)
:rtype: symbolic 4D tensor. Notes
:return: set of feature maps generated by convolutional layer. Tensor is -----
of shape (batch size, output channels, output rows, output columns)
:note: If CuDNN is available, it will be used on the :note: If CuDNN is available, it will be used on the
GPU. Otherwise, it is the *CorrMM* convolution that will be used GPU. Otherwise, it is the *CorrMM* convolution that will be used
......
...@@ -30,6 +30,7 @@ def max_pool_2d_same_size(input, patch_size): ...@@ -30,6 +30,7 @@ def max_pool_2d_same_size(input, patch_size):
of non-overlapping patches of size (patch_size[0],patch_size[1]) to zero, of non-overlapping patches of size (patch_size[0],patch_size[1]) to zero,
keeping only the maximum values. The output has the same dimensions as keeping only the maximum values. The output has the same dimensions as
the input. the input.
Parameters Parameters
---------- ----------
input : 4-D theano tensor of input images input : 4-D theano tensor of input images
...@@ -37,6 +38,7 @@ def max_pool_2d_same_size(input, patch_size): ...@@ -37,6 +38,7 @@ def max_pool_2d_same_size(input, patch_size):
patch_size : tuple of length 2 patch_size : tuple of length 2
Size of the patch (patch height, patch width). Size of the patch (patch height, patch width).
(2,2) will retain only one non-zero value per patch of 4 values. (2,2) will retain only one non-zero value per patch of 4 values.
""" """
output = Pool(patch_size, True)(input) output = Pool(patch_size, True)(input)
outs = MaxPoolGrad(patch_size, True)(input, output, output) outs = MaxPoolGrad(patch_size, True)(input, output, output)
...@@ -49,6 +51,7 @@ def pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0), ...@@ -49,6 +51,7 @@ def pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0),
Takes as input a N-D tensor, where N >= 2. It downscales the input image by Takes as input a N-D tensor, where N >= 2. It downscales the input image by
the specified factor, by keeping only the maximum value of non-overlapping the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1]) patches of size (ds[0],ds[1])
Parameters Parameters
---------- ----------
input : N-D theano tensor of input images input : N-D theano tensor of input images
...@@ -64,13 +67,14 @@ def pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0), ...@@ -64,13 +67,14 @@ def pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0),
next pool region. If st is None, it is considered equal to ds next pool region. If st is None, it is considered equal to ds
(no overlap on pooling regions). (no overlap on pooling regions).
padding : tuple of two ints padding : tuple of two ints
(pad_h, pad_w), pad zeros to extend beyond four borders (pad_h, pad_w), pad zeros to extend beyond four borders of the
of the images, pad_h is the size of the top and bottom margins, images, pad_h is the size of the top and bottom margins, and
and pad_w is the size of the left and right margins. pad_w is the size of the left and right margins.
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'} mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
Operation executed on each window. `max` and `sum` always exclude Operation executed on each window. `max` and `sum` always exclude
the padding in the computation. `average` gives you the choice to the padding in the computation. `average` gives you the choice to
include or exclude it. include or exclude it.
""" """
if input.ndim < 2: if input.ndim < 2:
raise NotImplementedError('pool_2d requires a dimension >= 2') raise NotImplementedError('pool_2d requires a dimension >= 2')
...@@ -121,15 +125,17 @@ class Pool(Op): ...@@ -121,15 +125,17 @@ class Pool(Op):
For N-dimensional tensors, consider that the last two dimensions span For N-dimensional tensors, consider that the last two dimensions span
images. This Op downsamples these images by taking the max, sum or average images. This Op downsamples these images by taking the max, sum or average
over different patch. over different patch.
The constructor takes the max, sum or average or different input patches. The constructor takes the max, sum or average or different input patches.
Parameters Parameters
---------- ----------
ds : list or tuple of two ints ds : list or tuple of two ints
Downsample factor over rows and column. Downsample factor over rows and column.
ds indicates the pool region size. ds indicates the pool region size.
ignore_border : bool ignore_border : bool
If ds doesn't divide imgshape, do we include an extra row/col of partial If ds doesn't divide imgshape, do we include an extra row/col
downsampling (False) or ignore it (True). of partial downsampling (False) or ignore it (True).
st : list or tuple of two ints or None st : list or tuple of two ints or None
Stride size, which is the number of shifts over rows/cols to get the Stride size, which is the number of shifts over rows/cols to get the
next pool region. If st is None, it is considered equal to ds next pool region. If st is None, it is considered equal to ds
...@@ -141,6 +147,7 @@ class Pool(Op): ...@@ -141,6 +147,7 @@ class Pool(Op):
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'} mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
('average_inc_pad' excludes the padding from the count, ('average_inc_pad' excludes the padding from the count,
'average_exc_pad' include it) 'average_exc_pad' include it)
""" """
__props__ = ('ds', 'ignore_border', 'st', 'padding', 'mode') __props__ = ('ds', 'ignore_border', 'st', 'padding', 'mode')
...@@ -150,6 +157,7 @@ class Pool(Op): ...@@ -150,6 +157,7 @@ class Pool(Op):
""" """
Return the shape of the output from this op, for input of given Return the shape of the output from this op, for input of given
shape and flags. shape and flags.
Parameters Parameters
---------- ----------
imgshape : tuple, list, or similar of integer or scalar Theano variable imgshape : tuple, list, or similar of integer or scalar Theano variable
...@@ -168,12 +176,14 @@ class Pool(Op): ...@@ -168,12 +176,14 @@ class Pool(Op):
(pad_h, pad_w), pad zeros to extend beyond four borders (pad_h, pad_w), pad zeros to extend beyond four borders
of the images, pad_h is the size of the top and bottom margins, of the images, pad_h is the size of the top and bottom margins,
and pad_w is the size of the left and right margins. and pad_w is the size of the left and right margins.
Returns Returns
------- -------
list list
The shape of the output from this op, for input of given shape. The shape of the output from this op, for input of given shape.
This will have the same length as imgshape, but with last two This will have the same length as imgshape, but with last two
elements reduced as per the downsampling & ignore_border flags. elements reduced as per the downsampling & ignore_border flags.
""" """
if len(imgshape) < 2: if len(imgshape) < 2:
raise TypeError('imgshape must have at least two elements ' raise TypeError('imgshape must have at least two elements '
...@@ -528,27 +538,36 @@ class PoolGrad(Op): ...@@ -528,27 +538,36 @@ class PoolGrad(Op):
def out_shape(imgshape, ds, ignore_border=False, st=None, padding=(0, 0)): def out_shape(imgshape, ds, ignore_border=False, st=None, padding=(0, 0)):
"""Return the shape of the output from this op, for input of given """Return the shape of the output from this op, for input of given
shape and flags. shape and flags.
:param imgshape: the shape of a tensor of images. The last two elements
are interpreted as the number of rows, and the number of cols. Parameters
:type imgshape: tuple, list, or similar of integer or ----------
scalar Theano variable. imgshape : tuple of integers or scalar Theano variables
:param ds: downsample factor over rows and columns the shape of a tensor of images. The last two elements are
this parameter indicates the size of the pooling region interpreted as the number of rows, and the number of cols.
:type ds: list or tuple of two ints ds : tuple of two ints
:param st: the stride size. This is the distance between the pooling downsample factor over rows and columns this parameter
indicates the size of the pooling region
st : tuple of two ints
the stride size. This is the distance between the pooling
regions. If it's set to None, in which case it equlas ds. regions. If it's set to None, in which case it equlas ds.
:type st: list or tuple of two ints ignore_border : bool
:param ignore_border: if ds doesn't divide imgshape, do we include an if ds doesn't divide imgshape, do we include an extra
extra row/col of partial downsampling (False) or ignore it (True). row/col of partial downsampling (False) or ignore it
:type ignore_border: bool (True).
:param padding: (pad_h, pad_w), pad zeros to extend beyond four borders padding : tuple of two ints
of the images, pad_h is the size of the top and bottom margins, (pad_h, pad_w), pad zeros to extend beyond four borders of
and pad_w is the size of the left and right margins. the images, pad_h is the size of the top and bottom
:type padding: tuple of two ints margins, and pad_w is the size of the left and right
:rtype: list margins.
:returns: the shape of the output from this op, for input of given
shape. This will have the same length as imgshape, but with last Returns
two elements reduced as per the downsampling & ignore_border flags. -------
list :
the shape of the output from this op, for input of given
shape. This will have the same length as imgshape, but
with last two elements reduced as per the downsampling &
ignore_border flags.
""" """
if len(imgshape) < 2: if len(imgshape) < 2:
raise TypeError('imgshape must have at least two elements ' raise TypeError('imgshape must have at least two elements '
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论