提交 ae36be01 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #5331 from gvtulder/f-sphinx-latex_font_size

Sphinx doc/conf: latex_font_size is deprecated
......@@ -222,11 +222,16 @@ def linkcode_resolve(domain, info):
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
latex_elements = {
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter',
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '11pt'
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
#latex_preamble = '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
......@@ -245,9 +250,6 @@ latex_logo = 'images/theano_logo_allblue_200x46.png'
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
......
......@@ -1582,7 +1582,7 @@ Linear Algebra
:param Y: right term
:type X: symbolic tensor
:type Y: symbolic tensor
:rtype: symbolic matrix or vector
:rtype: `symbolic matrix or vector`
:return: the inner product of `X` and `Y`.
.. function:: outer(X, Y)
......
......@@ -945,6 +945,7 @@ def dnn_conv(img, kerns, border_mode='valid', subsample=(1, 1),
and 'float64'. Default is the value of
:attr:`config.dnn.conv.precision`.
.. warning:: The cuDNN library only works with GPUs that have a compute
capability of 3.0 or higer. This means that older GPUs will not
work with this Op.
......@@ -1064,6 +1065,7 @@ def dnn_conv3d(img, kerns, border_mode='valid', subsample=(1, 1, 1),
and 'float64'. Default is the value of
:attr:`config.dnn.conv.precision`.
.. warning:: The cuDNN library only works with GPUs that have a compute
capability of 3.0 or higer. This means that older GPUs will not
work with this Op.
......@@ -1497,6 +1499,7 @@ def dnn_pool(img, ws, stride=None, mode='max', pad=None):
(padX, padY) or (padX, padY, padZ)
default: (0, 0) or (0, 0, 0)
.. warning:: The cuDNN library only works with GPU that have a compute
capability of 3.0 or higer. This means that older GPU will not
work with this Op.
......
......@@ -173,7 +173,7 @@ def Rop(f, wrt, eval_points):
described by `f`
:type eval_points: Variable or list of Variables
evalutation points for each of the variables in `wrt`
:rtype: Variable or list/tuple of Variables depending on type of f
:rtype: :class:`~theano.gof.Variable` or list/tuple of Variables depending on type of f
:return: symbolic expression such that
R_op[i] = sum_j ( d f[i] / d wrt[j]) eval_point[j]
where the indices in that expression are magic multidimensional
......@@ -320,7 +320,7 @@ def Lop(f, wrt, eval_points, consider_constant=None,
:type eval_points: Variable or list of Variables
evalutation points for each of the variables in `f`
:rtype: Variable or list/tuple of Variables depending on type of f
:rtype: :class:`~theano.gof.Variable` or list/tuple of Variables depending on type of f
:return: symbolic expression such that
L_op[i] = sum_i ( d f[i] / d wrt[j]) eval_point[i]
where the indices in that expression are magic multidimensional
......@@ -372,10 +372,10 @@ def grad(cost, wrt, consider_constant=None,
Parameters
----------
cost : scalar (0-dimensional) tensor variable or None
cost : :class:`~theano.gof.Variable` scalar (0-dimensional) tensor variable or None
Value with respect to which we are differentiating. May be
`None` if known_grads is provided.
wrt : variable or list of variables
wrt : :class:`~theano.gof.Variable` or list of Variables
term[s] for which we want gradients
consider_constant : list of variables
expressions not to backpropagate through
......@@ -646,7 +646,7 @@ def subgraph_grad(wrt, end, start=None, cost=None, details=False):
to the variables in `end` (they are used as known_grad in
theano.grad).
:type cost: scalar (0-dimensional) variable
:type cost: :class:`~theano.gof.Variable` scalar (0-dimensional) variable
:param cost:
Additional costs for which to compute the gradients. For
example, these could be weight decay, an l1 constraint, MSE,
......
......@@ -60,7 +60,7 @@ def debugprint(obj, depth=-1, print_type=False,
used_ids=None):
"""Print a computation graph as text to stdout or a file.
:type obj: Variable, Apply, or Function instance
:type obj: :class:`~theano.gof.Variable`, Apply, or Function instance
:param obj: symbolic thing to print
:type depth: integer
:param depth: print graph to this depth (-1 for unlimited)
......
......@@ -2029,6 +2029,7 @@ def dnn_pool(img, ws, stride=None, mode='max', pad=None):
pad_d is the number of zero-valued pixels added to each of the front
and back borders (3D pooling only).
.. warning:: The cuDNN library only works with GPU that have a compute
capability of 3.0 or higer. This means that older GPU will not
work with this Op.
......
......@@ -358,6 +358,7 @@ def cumsum(x, axis=None):
The axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
.. versionadded:: 0.7
"""
......@@ -483,6 +484,7 @@ def cumprod(x, axis=None):
The axis along which the cumulative product is computed.
The default (None) is to compute the cumprod over the flattened array.
.. versionadded:: 0.7
"""
......@@ -554,6 +556,7 @@ def diff(x, n=1, axis=-1):
axis
The axis along which the difference is taken, default is the last axis.
.. versionadded:: 0.6
"""
......@@ -582,6 +585,7 @@ def bincount(x, weights=None, minlength=None, assert_nonneg=False):
every input x is nonnegative.
Optional.
.. versionadded:: 0.6
"""
......@@ -788,7 +792,8 @@ def repeat(x, repeats, axis=None):
----------
x
Input data, tensor variable.
repeats : int, scalar or tensor variable
repeats
int, scalar or tensor variable
axis : int, optional
See Also
......
......@@ -864,7 +864,7 @@ def bilinear_upsampling(input,
mini-batch of feature map stacks, of shape (batch size,
input channels, input rows, input columns) that will be upsampled.
ratio: int or Constant or Scalar Tensor of int* dtype
ratio: `int or Constant or Scalar Tensor of int* dtype`
the ratio by which the input is upsampled in the 2D space (row and
col size).
......
......@@ -2181,7 +2181,7 @@ def relu(x, alpha=0):
----------
x : symbolic tensor
Tensor to compute the activation function for.
alpha : scalar or tensor, optional
alpha : `scalar or tensor, optional`
Slope for negative input, usually between 0 and 1. The default value
of 0 will lead to the standard rectifier, 1 will lead to
a linear activation function, and any value in between will give a
......
......@@ -278,15 +278,15 @@ Note
Parameters
----------
a : (M, M) symbolix matrix
a : `(M, M) symbolix matrix`
A square matrix
b : (M,) or (M, N) symbolic vector or matrix
b : `(M,) or (M, N) symbolic vector or matrix`
Right hand side matrix in ``a x = b``
Returns
-------
x : (M, ) or (M, N) symbolic vector or matrix
x : `(M, ) or (M, N) symbolic vector or matrix`
x will have the same shape as b
"""
# lower and upper triangular solves
......
......@@ -314,6 +314,7 @@ class _tensor_py_operators(object):
The length of the shape. Passing None here means for
Theano to try and guess the length of `shape`.
.. warning:: This has a different signature than numpy's
ndarray.reshape!
In numpy you do not need to wrap the shape arguments
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论