提交 e789f49c authored 作者: Razvan Pascanu's avatar Razvan Pascanu

tutorial fix

<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://web.resource.org/cc/"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="345.86591"
height="115.13724"
id="svg2"
sodipodi:version="0.32"
inkscape:version="0.45.1"
sodipodi:docbase="/home/olivier/hg/theano"
sodipodi:docname="theano_logo.svg"
inkscape:output_extension="org.inkscape.output.svg.inkscape"
version="1.0"
inkscape:export-filename="/home/olivier/hg/theano/theano_logo_big.png"
inkscape:export-xdpi="273.58655"
inkscape:export-ydpi="273.58655">
<defs
id="defs4" />
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
gridtolerance="10000"
guidetolerance="10"
objecttolerance="10"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.979899"
inkscape:cx="248.50886"
inkscape:cy="97.530852"
inkscape:document-units="px"
inkscape:current-layer="layer1"
inkscape:window-width="1680"
inkscape:window-height="1030"
inkscape:window-x="0"
inkscape:window-y="0"
showguides="true"
inkscape:guide-bbox="true" />
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(-219.06115,-88.23416)">
<path
id="path5572"
d="M 245.99986,202.38198 C 235.76172,199.76305 230.3317,195.18454 224.56469,184.30815 C 220.37775,176.41173 219.14676,170.92373 219.06742,159.80009 C 219.02952,154.48681 219.14363,153.33451 219.96737,150.71192 L 220.91072,147.70853 L 222.03485,150.91475 C 223.32792,154.60284 224.5932,157.2101 225.42491,157.90035 C 225.91931,158.31066 226.04839,157.45384 226.31509,151.99127 C 226.48664,148.47733 226.74177,144.59829 226.88203,143.3712 C 227.13637,141.14611 227.14306,141.13711 229.37079,140.02194 C 233.6165,137.89661 241.51289,137.62549 255.7355,139.11671 C 262.25557,139.80033 276.27711,139.65881 278.302,138.88894 C 280.15154,138.18575 280.55926,136.52884 280.07117,131.69921 C 279.49474,125.99537 279.0548,124.08561 277.22091,119.32634 C 272.4649,106.98367 264.75123,100.69911 254.31572,100.6648 C 244.91721,100.6339 237.20308,106.18784 232.64521,116.26692 C 228.63554,125.13371 226.84755,134.63837 225.79128,152.70119 L 225.49476,157.77183 L 224.6018,156.08339 C 220.32764,148.00176 218.55416,134.3005 220.39244,123.56361 C 221.81624,115.24763 224.72248,108.02444 229.43922,101.07873 C 233.51167,95.08179 239.33503,91.22689 247.37024,89.20891 C 252.54529,87.90924 256.08615,87.90924 261.2612,89.20891 C 269.29641,91.22689 275.11977,95.08179 279.19222,101.07873 C 283.85913,107.95107 286.81123,115.24029 288.1872,123.28884 C 289.11587,128.72102 289.26704,136.96138 288.48572,139.5625 C 287.80095,141.84221 282.75423,149.25874 282.58446,148.23482 C 282.51467,147.81394 282.66002,147.09129 282.90745,146.62895 C 283.60255,145.33016 282.97412,144.79606 281.91813,145.78812 C 281.09814,146.55845 280.95497,146.57992 280.4772,146.00425 C 279.46931,144.78981 279.09827,146.0508 280.02317,147.54731 C 281.09294,149.27824 281.11194,149.86163 280.09855,149.86163 C 279.6655,149.86163 279.2114,150.02307 279.08945,150.2204 C 278.12451,151.78171 263.15706,152.14918 251.27333,150.90331 C 242.48708,149.98217 235.49959,150.17874 233.86598,151.393 C 232.52086,152.39282 230.73981,155.92513 230.13832,158.78596 C 229.56685,161.50406 229.89814,169.75383 230.71167,173.06316 C 231.53272,176.40313 234.44347,181.26714 237.48117,184.37536 C 245.97324,193.06457 259.99042,193.16426 268.52866,184.59618 C 272.82158,180.28826 276.28725,173.36771 275.26986,171.13477 C 275.01206,170.56897 274.80113,169.46845 274.80113,168.68918 C 274.80113,167.27252 276.03299,164.34881 276.84003,163.85004 C 277.97809,163.14668 279.2633,160.34344 279.2633,158.56453 C 279.2633,156.50464 279.81574,155.1351 280.64665,155.1351 C 281.94053,155.1351 281.78744,149.84815 280.42796,147.58266 C 279.38328,145.84176 279.47773,145.48404 280.68309,146.61641 C 281.46075,147.34699 281.69721,147.42235 281.69721,146.93962 C 281.69721,146.59338 282.00521,146.05957 282.38164,145.75336 C 282.9932,145.2559 283.02559,145.28301 282.68588,146.00793 C 282.47678,146.45415 282.35906,148.62448 282.4243,150.8309 C 282.5319,154.47038 282.63024,154.91126 283.48431,155.58307 C 284.25335,156.18799 284.4647,156.82757 284.6386,159.07597 C 284.78839,161.01273 285.24037,162.64716 286.16384,164.59151 C 287.23183,166.84012 287.43789,167.69463 287.27043,169.18035 C 287.15459,170.2081 286.70684,171.3939 286.24597,171.89349 C 285.2295,172.99536 281.11174,180.12521 280.69642,181.50246 C 279.94371,183.99856 277.41503,189.23736 275.76462,191.71994 C 273.21329,195.55768 270.45935,197.86457 265.70147,200.14953 C 258.59319,203.56326 253.06615,204.18955 245.99986,202.38198 z "
style="fill:#000000;fill-opacity:1" />
<text
xml:space="preserve"
style="font-size:15.53327274px;font-style:normal;font-weight:normal;fill:#7799ee;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
x="285.01266"
y="186.09427"
id="text5574"
transform="scale(1.0402212,0.961334)"><tspan
sodipodi:role="line"
id="tspan5576"
x="285.01266"
y="186.09427"
style="font-size:93.19962311px;font-weight:normal;fill:#7799ee;fill-opacity:1;font-family:MgOpen Modata"
dx="0 -4.2857141 -6.4285722 -5 -5.7142901 -6.0714293"
dy="0 0 -1.3672954 0.35714287 1.0101526 -1.0101526">Theano</tspan></text>
</g>
</svg>
......@@ -41,13 +41,13 @@ As a developer, you should clone this repository like this:
.. code-block:: bash
hg clone 'http://username:password@pylearn.org/hg/Theano'
hg clone 'http://username:password@hg.assembla.com/theano Theano'
You can also clone the code anonymously:
.. code-block:: bash
hg clone http://pylearn.org/hg/Theano
hg clone http://hg.assembla.com/theano Theano
Setting up your environment
===========================
......
......@@ -15,11 +15,13 @@
TODO
.. _libdoc_compile_function:
compile.function
================
This page is about :api:`theano.function
<theano.compile.function_module.function>`, the interface for compiling
This page is about `theano.function`, the interface for compiling
graphs into callable objects.
The signature for this function is:
......@@ -402,6 +404,11 @@ For a finer level of control over which optimizations are applied, and whether
C or python implementations are used, read :api:`compile.mode.Mode`.
.. _compile_debugMode:
DebugMode ??
.. toctree::
function
......
.. currentmodule:: tensor
.. _libdoc_tensor_type:
TensorType
==========
......@@ -9,6 +13,10 @@ TensorType
.. method:: quux()
.. _libdoc_tensor_creation:
Creation
========
......@@ -72,6 +80,9 @@ Basic indexing.
Advanced indexing.
.. _libdoc_tensor_elementwise:
Elementwise
===========
......@@ -84,9 +95,57 @@ Logic Functions
Mathematical
------------
.. _libdoc_tensor_broadcastable:
Broadcasting in Theano vs. Numpy
--------------------------------
Broadcasting is a mechanism which allows tensors with
different numbers of dimensions to be added or multiplied
together by (virtually) replicating the smaller tensor along
the dimensions that it is lacking.
In a nutshell, broadcasting is the mechanism by which a scalar
may be added to a matrix, a vector to a matrix or a scalar to
a vector.
.. figure:: bcast.png
Broadcasting a row matrix. T and F respectively stand for
True and False and indicate along which dimensions we allow
broadcasting.
If the second argument were a vector, its shape would be
``(2,)`` and its broadcastable pattern ``(F,)``. They would
be automatically expanded to the **left** to match the
dimensions of the matrix (adding ``1`` to the shape and ``T``
to the pattern), resulting in ``(1, 2)`` and ``(T, F)``.
It would then behave just like the example above.
Unlike numpy which does broadcasting dynamically, Theano needs
to know, for any operation which supports broadcasting, which
dimensions will need to be broadcasted. When applicable, this
information is given in the :ref:`type` of a *Variable*.
See also:
* :ref:`How broadcasting is used in Theano's tensor types <tensortypes>`
* `SciPy documentation about numpy's broadcasting <http://www.scipy.org/EricsBroadcastingDoc>`_
* `OnLamp article about numpy's broadcasting <http://www.onlamp.com/pub/a/python/2000/09/27/numerically.html>`_
Linear Algebra
==============
Fourier Transforms
==================
[James has some code for this, but hasn't gotten it into the source tree yet.]
=
差异被折叠。
......@@ -11,6 +11,7 @@ Theano's strength is in expressing symbolic calculations involving tensors.
There are many types of symbolic expressions for tensors. For everyone's
sanity, they are grouped into the following sections:
.. toctree::
:maxdepth: 1
......
......@@ -8,8 +8,8 @@ Baby steps - Adding two numbers together
Adding two scalars
==================
So, to get us started and get a feel of what we're working with, let's
make a simple function: add two numbers together. Here is how you do
So, to get us started with Theano and get a feel of what we're working with,
let's make a simple function: add two numbers together. Here is how you do
it:
>>> x = T.dscalar('x')
......@@ -26,17 +26,31 @@ array(28.4)
Let's break this down into several steps. The first step is to define
two symbols, or Variables, representing the quantities that you want
to add. Note that from now on, we will use the term :term:`Variable`
to mean "symbol" (in other words, ``x``, ``y``, ``z`` are all Variable
objects). The output of the function ``f`` is a ``numpy.ndarray``
with zero dimensions.
two symbols (*Variables*) representing the quantities that you want
to add. Note that from now on, we will use the term
*Variable* to mean "symbol" (in other words,
``x``, ``y``, ``z`` are all *Variable* objects). The output of the function
``f`` is a ``numpy.ndarray`` with zero dimensions.
If you are following along and typing into an interpreter, you may have
noticed that there was a slight delay in executing the ``function``
instruction. Behind the scenes, ``f`` was being compiled into C code.
.. TODO: help
.. note:
A *Variable* is the main data structure you work with when
using Theano. The symbolic inputs that you operate on are
*Variables* and what you get from applying various operations to
these inputs are also *Variables*. For example, when I type
>>> x = theano.tensor.ivector()
>>> y = -x
``x`` and ``y`` are both Variables, i.e. instances of the
``theano.gof.graph.Variable`` class. The
type of both ``x`` and ``y`` is ``theano.tensor.ivector``.
-------------------------------------------
......@@ -47,11 +61,11 @@ instruction. Behind the scenes, ``f`` was being compiled into C code.
In Theano, all symbols must be typed. In particular, ``T.dscalar``
is the type we assign to "0-dimensional arrays (`scalar`) of doubles
(`d`)". It is a Theano :term:`Type`.
(`d`)". It is a Theano :ref:`type`.
``dscalar`` is not a class. Therefore, neither ``x`` nor ``y``
are actually instances of ``dscalar``. They are instances of
:api:`TensorVariable <theano.tensor.basic.TensorVariable>`. ``x`` and ``y``
:ref:`TensorVariable <libdoc_tensor_type>`. ``x`` and ``y``
are, however, assigned the theano Type ``dscalar`` in their ``type``
field, as you can see here:
......@@ -64,8 +78,10 @@ TensorType(float64, scalar)
>>> x.type == T.dscalar
True
You can learn more about the structures in Theano in :ref:`graphstructures`.
By calling ``T.dscalar`` with a string argument, you create a
:term:`Variable` representing a floating-point scalar quantity with the
*Variable* representing a floating-point scalar quantity with the
given name. If you provide no argument, the symbol will be unnamed. Names
are not required, but they can help debugging.
......@@ -77,8 +93,8 @@ The second step is to combine ``x`` and ``y`` into their sum ``z``:
>>> z = x + y
``z`` is yet another :term:`Variable` which represents the addition of
``x`` and ``y``. You can use the :api:`pp <theano.printing.pp>`
``z`` is yet another *Variable* which represents the addition of
``x`` and ``y``. You can use the :ref:`pp <libdoc_printing>`
function to pretty-print out the computation associated to ``z``.
>>> print pp(z)
......@@ -93,7 +109,7 @@ and giving ``z`` as output:
>>> f = function([x, y], z)
The first argument to ``function`` is a list of :term:`Variables <Variable>`
The first argument to :ref:`function <libdoc_compile_function>` is a list of Variables
that will be provided as inputs to the function. The second argument
is a single Variable *or* a list of Variables. For either case, the second
argument is what we want to see as output when we apply the function.
......@@ -130,7 +146,7 @@ array([[ 11., 22.],
It is possible to add scalars to matrices, vectors to matrices,
scalars to vectors, etc. The behavior of these operations is defined
by :term:`broadcasting`.
by :ref:`broadcasting <libdoc_tensor_broadcastable>`.
The following types are available:
......
......@@ -110,9 +110,7 @@ put logic inside of the print_eval function that would, for example, only
print something out if a certain kind of Op was used, at a certain program
position, or if a particular value shows up in one of the inputs or outputs.
This can be a really powerful debugging tool. Read about more things you can
do with :api:`link.WrapLinkerMany`.
.. TODO: documentation for link.WrapLinkerMany
Note well the call to ``fn`` inside the call to ``print_eval``; without it,
the graph wouldn't get computed at all!
This can be a really powerful debugging tool. Note the call to ``fn`` inside the call to ``print_eval``; without it, the graph wouldn't get computed at all!
......@@ -7,7 +7,7 @@ Using DebugMode
The DebugMode evaluation mode (available via ``mode='DEBUG_MODE'``,
:api:`DebugMode`) includes a number of self-checks and assertions that
see :ref:`this <function_mode>`) includes a number of self-checks and assertions that
can help to diagnose several kinds of programmer errors that can lead
to incorrect output.
......@@ -41,7 +41,7 @@ In the example above, there is no way to guarantee that a future call to say,
If you instantiate DebugMode using the constructor ``compile.DebugMode``
rather than the keyword ``DEBUG_MODE`` you can configure its behaviour via
constructor arguments. See :api:`DebugMode` for details.
constructor arguments. See :ref:`DebugMode <compile_debugMode>` for details.
The keyword version of DebugMode (which you get by using ``mode='DEBUG_MODE``)
is quite strict, and can raise several different Exception types.
......@@ -56,7 +56,6 @@ This error is typically not raised directly.
However, you can use ``except DebugModeError: ...`` to catch any of the more
specific types of Exception.
For detailed documentation see :api:`DebugModeError`.
BadCLinkerOutput
......@@ -66,7 +65,6 @@ This exception means that python (``perform``) and c (``c_code``) for an Op
didn't compute the same thing like they were supposed to.
The problem might be a bug in either ``perform`` or ``c_code`` (or both).
For detailed documentation see :api:`BadCLinkerOutput`.
BadOptimization
......@@ -82,7 +80,6 @@ exception object will indicate which optimization was at fault.
The exception object also contains information such as a snapshot of the
before/after graph where the optimization introduced the error.
For detailed documentation see :api:`BadOptimization`.
BadDestroyMap
......@@ -93,7 +90,6 @@ supposed to. If either the ``perform`` or ``c_code`` implementation of an Op
might modify any input, it has to advertise that fact via the ``destroy_map``
attribute.
For detailed documentation on the Exception, see :api:`BadDestroyMap`.
For detailed documentation on the ``destroy_map`` attribute, see :ref:`inplace`.
......@@ -105,7 +101,6 @@ This happens when an Op's perform() or c_code() creates an alias or alias-like
dependency between an input and an output... and it didn't warn the
optimization system via the ``view_map`` attribute.
For detailed documentation on the Exception, see :api:`BadViewMap`.
For detailed documentation on the ``view_map`` attribute, see :ref:`views`.
......@@ -119,7 +114,6 @@ steps are ordered by ``id(object)`` somehow, such as via the default object
hash function. A Stochastic optimization invalidates the pattern of work
whereby we debug in DEBUG_MODE and then run the full-size jobs in FAST_RUN.
For detailed documentation see :api:`StochasticOrder`.
......@@ -136,6 +130,5 @@ introduced into the computations. It indicates which Op created the first
NaN. These floating-point values can be allowed by passing the
``check_isfinite=False`` argument to DebugMode.
For detailed documentation see :api:`InvalidValueError`.
......@@ -22,7 +22,7 @@ the logistic curve, which is given by:
A plot of the logistic function, with x on the x-axis and s(x) on the
y-axis.
You want to compute the function :term:`elementwise` on matrices of
You want to compute the function :ref:`elementwise <libdoc_tensor_elementwise>` on matrices of
doubles, which means that you want to apply this function to each
individual element of the matrix.
......@@ -58,7 +58,7 @@ Computing more than one thing at the same time
==============================================
Theano supports functions with multiple outputs. For example, we can
compute the :term:`elementwise` difference, absolute difference, and
compute the :ref:`elementwise <libdoc_tensor_elementwise>` difference, absolute difference, and
squared difference between two matrices ``a`` and ``b`` at the same time:
>>> a, b = T.dmatrices('a', 'b')
......@@ -134,16 +134,17 @@ array([[ 0.25 , 0.19661193],
The resulting function computes the gradient of its first argument
with respect to the second. In this way, Theano can be used for
`automatic differentiation`_.
`automatic differentiation <http://en.wikipedia.org/wiki/Automatic_differentiation>`_.
.. note::
The variable of ``T.grad`` has the same dimensions as the
second argument. This is exactly like the first derivative if the
first argument is a scalar or a tensor of size 1 but not if it is
larger. For more information on the semantics when the first
argument has a larger size and details about the implementation,
see :api:`tensor.grad`.
The second argument of ``T.grad`` can be a list, in which case the
output is also a list. The order in both list is important, element
*i* of the output list is the gradient of the first argument of
``T.grad`` with respect to the *i*-th element of the list given as second argument.
The first arguement of ``T.grad`` has to be a scalar (a tensor
of size 1). For more information on the semantics of the arguments of
``T.grad`` and details about the implementation, see :ref:`this <libdoc_gradient>`.
Setting a default value for an argument
......@@ -291,8 +292,9 @@ the substitutions have to work in any order.
Mode
====
The ``mode`` parameter to :api:`theano.function` controls how the
inputs-to-outputs graph is transformed into a callable object.
The ``mode`` parameter to :ref:`theano.function <libdoc_compile_function>`
controls how the inputs-to-outputs graph is transformed into a callable
object.
Theano defines the following modes by name:
......@@ -304,15 +306,11 @@ Theano defines the following modes by name:
The default mode is typically ``FAST_RUN``, but it can be controlled via
the environment variable ``THEANO_DEFAULT_MODE``, which can in turn be
overridden by setting :api:`theano.compile.mode.default_mode` directly,
overridden by setting `theano.compile.mode.default_mode` directly,
which can in turn be overridden by passing the keyword argument to
:api:`theano.function`.
:ref:`theano.function <libdoc_compile_function>`.
For a finer level of control over which optimizations are applied, and
whether C or python implementations are used, read
:api:`compile.mode.Mode`.
.. _automatic differentiation: http://en.wikipedia.org/wiki/Automatic_differentiation
......@@ -10,7 +10,7 @@ Let's start an interactive session and import Theano.
>>> from theano import *
Many of symbols you will need to use are in the ``tensor`` subpackage
of theano. Let's import that subpackage under a handy name. I like
of Theano. Let's import that subpackage under a handy name. I like
``T`` (and many tutorials use this convention).
>>> import theano.tensor as T
......
......@@ -8,10 +8,9 @@ NumPy refresher
Here are some quick guides to NumPy:
* `Numpy quick guide for Matlab users <http://www.scipy.org/NumPy_for_Matlab_Users>`__
* `More detailed table showing the NumPy equivalent of Matlab commands <http://www.scribd.com/doc/26685/Matlab-Python-and-R>`__
* `Numpy User Guide <http://docs.scipy.org/doc/numpy/user/index.html>`__
* `More detailed Numpy tutorial <http://www.scipy.org/Tentative_NumPy_Tutorial>`__
.. TODO [DefineBroadcasting Broadcasting]
.. Broadcastable - Implicitly assume that all previous entries are true.
.. [TODO: More doc, e.g. see _test_tensor.py]
......@@ -20,8 +19,10 @@ Matrix conventions for machine learning
Rows are horizontal and columns are vertical.
Every row is an example. Therefore, inputs[10,5] is a matrix of 10 examples with 5 dimensions per.
So to make a NN out of it, multiply by a weight matrix of size (5, #hid).
Every row is an example. Therefore, inputs[10,5] is a matrix of 10 examples
where each example has dimension 5. If this would be the input of a
neural network then the weights from the input the the first hidden
layer would represent a matrix of size (5, #hid).
If I have an array:
......@@ -43,3 +44,22 @@ To access the entry in the 3rd row (row #2) and the 1st column (column #0):
To remember this, keep in mind that we read left-to-right, top-to-bottom,
so each thing that is contiguous is a row. That is, there are 3 rows
and 2 columns.
Broadcasting
============
Numpy does *broadcasting* of arrays of different shapes during
arithmetic operations. What this means in general is that the smaller
array is *broadcasted* across the larger array so that they have
compatible shapes. The example below shows an instance of
*broadcastaing*:
>>> a = numpy.asarray([1.0, 2.0, 3.0])
>>> b = 2.0
>>> a * b
array([2., 4., 6.])
The smaller array ``b`` in this case is *broadcasted* to the same size
as ``a`` during the multiplication. This trick is often useful in
simplifying how expression are written. More details about *broadcasting*
can be found at `numpy user guide <http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`__.
......@@ -28,7 +28,7 @@ Predefined types
----------------
Predefined types are
located in the :api:`theano.tensor` package. The name of the types follow
located in the :ref:`theano.tensor <libdoc_tensor>` package. The name of the types follow
a recipe:
``<dtype><dimensionality>``
......@@ -48,26 +48,26 @@ d double floating point 64
Dimensionality is one of:
====== ====== ========================================== =============================================
code shape Rows :term:`broadcastable <broadcasting>`? Columns :term:`broadcastable <broadcasting>`?
====== ====== ========================================== =============================================
scalar [] Yes Yes
vector [n] Yes N/A (vectors are used like row vectors)
row [1, n] Yes No
col [m, 1] No Yes
matrix [m, n] No No
====== ====== ========================================== =============================================
====== ====== ======================================================== ===========================================================
code shape Rows :ref:`broadcastable <libdoc_tensor_broadcastable>`? Columns :ref:`broadcastable <libdoc_tensor_broadcastable>`?
====== ====== ======================================================== ===========================================================
scalar [] Yes Yes
vector [n] Yes N/A (vectors are used like row vectors)
row [1, n] Yes No
col [m, 1] No Yes
matrix [m, n] No No
====== ====== ======================================================== ============================================================
So, if you want a row of 32-bit floats, it is available
as :api:`theano.tensor.frow <theano.tensor.basic.frow>`.
as :ref:`theano.tensor.frow <libdoc_tensor_type>`.
If you want a matrix of unsigned 32-bit integers it is available as
:api:`theano.tensor.imatrix <theano.tensor.basic.imatrix>`.
:ref:`theano.tensor.imatrix <libdoc_tensor_type>`.
Each of the types described above can be constructed by two methods:
a singular version (e.g., :api:`dmatrix <theano.tensor.basic.dmatrix>`)
and a plural version (:api:`dmatrices <theano.tensor.dmatrices>`).
a singular version (e.g., :ref:`dmatrix <libdoc_tensor_creation>`)
and a plural version (:ref:`dmatrices <libdoc_tensor_creation>`).
When called, the singular version takes a single
argument which is the name of the :term:`Variable` we want to make and it
argument which is the name of the *Variable* we want to make and it
makes a single Variable of that type. The plural version can either take
an integer or several strings. If an integer is provided, the method
will return that many Variables and if strings are provided, it will
......@@ -91,7 +91,7 @@ Custom tensor types
If you wish to use a type of tensor which is not already available here
(for example, a 3D tensor) you can build an appropriate type using
:api:`theano.tensor.TensorType <theano.tensor.basic.TensorType>`.
:ref:`theano.tensor.TensorType <libdoc_tensor_type>`.
The first argument you pass is the `dtype` and the second is the
`broadcastable pattern`.
......@@ -116,10 +116,10 @@ complex128 complex 128 (two float64)
.. note::
Even though :api:`theano.tensor` does not define any type
Even though :ref:`theano.tensor <libdoc_tensor>` does not define any type
using ``complex`` dtypes (``complex64`` or ``complex128``),
you can define them explicitly with
:api:`TensorType <theano.tensor.basic.TensorType>` (see example
:ref:`TensorType <libdoc_tensor_type>` (see example
below). However, few operations are fully supported for complex
types: as of version 0.1, only elementary operations (``+-*/``)
have C implementations. Additionally, complex types have received
......@@ -128,8 +128,7 @@ complex128 complex 128 (two float64)
The broadcastable pattern indicates both the number of dimensions and
whether a particular dimension must have length 1.
Here is a table mapping the :term:`broadcastable
<broadcasting>` pattern to what kind of tensor it encodes:
Here is a table mapping the :ref:`broadcastable <libdoc_tensor_broadcastable>` pattern to what kind of tensor it encodes:
===================== =================================
pattern interpretation
......
差异被折叠。
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论