提交 54fad171 authored 作者: Razvan Pascanu's avatar Razvan Pascanu

Fixed several issues I encountered (missing or confusing imports, missing

borrow argument for In, gradient of a mtrix ..)
上级 f601a0ff
...@@ -162,7 +162,7 @@ it, it's best to publish it somewhere. ...@@ -162,7 +162,7 @@ it, it's best to publish it somewhere.
.. code-block:: python .. code-block:: python
def c_init(self, name, sub): def c_init(name, sub):
return """ return """
%(name)s = 0.0; %(name)s = 0.0;
""" % dict(name = name) """ % dict(name = name)
...@@ -191,7 +191,7 @@ called, without knowing for sure which of the two. ...@@ -191,7 +191,7 @@ called, without knowing for sure which of the two.
.. code-block:: python .. code-block:: python
def c_extract(self, name, sub): def c_extract(name, sub):
return """ return """
if (!PyFloat_Check(py_%(name)s)) { if (!PyFloat_Check(py_%(name)s)) {
PyErr_SetString(PyExc_TypeError, "expected a float"); PyErr_SetString(PyExc_TypeError, "expected a float");
...@@ -229,7 +229,7 @@ API) and we put it in our double variable that we declared previously. ...@@ -229,7 +229,7 @@ API) and we put it in our double variable that we declared previously.
.. code-block:: python .. code-block:: python
def c_sync(self, name, sub): def c_sync(name, sub):
return """ return """
Py_XDECREF(py_%(name)s); Py_XDECREF(py_%(name)s);
py_%(name)s = PyFloat_FromDouble(%(name)s); py_%(name)s = PyFloat_FromDouble(%(name)s);
...@@ -291,7 +291,7 @@ than sorry. ...@@ -291,7 +291,7 @@ than sorry.
.. code-block:: python .. code-block:: python
def c_cleanup(self, name, sub): def c_cleanup(name, sub):
return "" return ""
double.c_cleanup = c_cleanup double.c_cleanup = c_cleanup
...@@ -339,6 +339,9 @@ and call it: ...@@ -339,6 +339,9 @@ and call it:
.. theano/tests/test_tutorial.py:T_extending.test_extending_2 .. theano/tests/test_tutorial.py:T_extending.test_extending_2
.. code-block:: python .. code-block:: python
from theano import function
from theano.tensor import double
x, y, z = double('x'), double('y'), double('z') x, y, z = double('x'), double('y'), double('z')
a = add(x, y) a = add(x, y)
...@@ -430,7 +433,7 @@ Final version ...@@ -430,7 +433,7 @@ Final version
class Double(gof.Type): class Double(gof.Type):
def filter(self, x, strict=False): def filter(self, x, strict=False, allow_downcast = False):
if strict and not isinstance(x, float): if strict and not isinstance(x, float):
raise TypeError('Expected a float!') raise TypeError('Expected a float!')
return float(x) return float(x)
......
...@@ -469,6 +469,8 @@ Here are a few examples of how to use a Query on optdb to produce an ...@@ -469,6 +469,8 @@ Here are a few examples of how to use a Query on optdb to produce an
Optimizer: Optimizer:
.. code-block:: python .. code-block:: python
from theano.compile import optdb
# This is how the optimizer for the fast_run mode is defined # This is how the optimizer for the fast_run mode is defined
fast_run = optdb.query(Query(include = ['fast_run'])) fast_run = optdb.query(Query(include = ['fast_run']))
......
...@@ -113,7 +113,7 @@ must define ``filter`` and shall override ``values_eq_approx``. ...@@ -113,7 +113,7 @@ must define ``filter`` and shall override ``values_eq_approx``.
# Note that we shadow Python's function ``filter`` with this # Note that we shadow Python's function ``filter`` with this
# definition. # definition.
def filter(x, strict=False): def filter(x, strict=False, allow_downcast = False):
if strict: if strict:
if isinstance(x, float): if isinstance(x, float):
return x return x
...@@ -278,7 +278,7 @@ Final version ...@@ -278,7 +278,7 @@ Final version
class Double(gof.Type): class Double(gof.Type):
def filter(self, x, strict=False): def filter(self, x, strict=False, allow_downcast = False):
if strict and not isinstance(x, float): if strict and not isinstance(x, float):
raise TypeError('Expected a float!') raise TypeError('Expected a float!')
return float(x) return float(x)
......
...@@ -149,7 +149,7 @@ logistic is: :math:`ds(x)/dx = s(x) \cdot (1 - s(x))`. ...@@ -149,7 +149,7 @@ logistic is: :math:`ds(x)/dx = s(x) \cdot (1 - s(x))`.
.. theano/tests/test_tutorial.py:T_examples.test_examples_5 .. theano/tests/test_tutorial.py:T_examples.test_examples_5
>>> x = T.dmatrix('x') >>> x = T.dmatrix('x')
>>> s = 1 / (1 + T.exp(-x)) >>> s = T.sum(1 / (1 + T.exp(-x)))
>>> gs = T.grad(s, x) >>> gs = T.grad(s, x)
>>> dlogistic = function([x], gs) >>> dlogistic = function([x], gs)
>>> dlogistic([[0, 1], [-1, -2]]) >>> dlogistic([[0, 1], [-1, -2]])
...@@ -321,7 +321,7 @@ for the purpose of one particular function. ...@@ -321,7 +321,7 @@ for the purpose of one particular function.
.. theano/tests/test_tutorial.py:T_examples.test_examples_8 .. theano/tests/test_tutorial.py:T_examples.test_examples_8
>>> fn_of_state = state * 2 + inc >>> fn_of_state = state * 2 + inc
>>> foo = lscalar() # the type (lscalar) must match the shared variable we >>> foo = T.lscalar() # the type (lscalar) must match the shared variable we
>>> # are replacing with the ``givens`` list >>> # are replacing with the ``givens`` list
>>> skip_shared = function([inc, foo], fn_of_state, >>> skip_shared = function([inc, foo], fn_of_state,
givens=[(state, foo)]) givens=[(state, foo)])
...@@ -394,7 +394,7 @@ not affected by calling the returned function. So for example, calling ...@@ -394,7 +394,7 @@ not affected by calling the returned function. So for example, calling
``g`` multiple times will return the same numbers. ``g`` multiple times will return the same numbers.
>>> g_val0 = g() # different numbers from f_val0 and f_val1 >>> g_val0 = g() # different numbers from f_val0 and f_val1
>>> g_val0 = g() # same numbers as g_val0 !!! >>> g_val1 = g() # same numbers as g_val0 !!!
An important remark is that a random variable is drawn at most once during any An important remark is that a random variable is drawn at most once during any
single function execution. So the ``nearly_zeros`` function is guaranteed to single function execution. So the ``nearly_zeros`` function is guaranteed to
......
...@@ -88,7 +88,7 @@ file and run it. ...@@ -88,7 +88,7 @@ file and run it.
r = f() r = f()
print 'Looping %d times took'%iters, time.time() - t0, 'seconds' print 'Looping %d times took'%iters, time.time() - t0, 'seconds'
print 'Result is', r print 'Result is', r
print 'Used the','cpu' if any( [isinstance(x.op,T.Elemwise) for x in f.maker.env.toposort()]) else 'gpu' print 'Used the','cpu' if numpy.any( [isinstance(x.op,T.Elemwise) for x in f.maker.env.toposort()]) else 'gpu'
The program just computes the exp() of a bunch of random numbers. The program just computes the exp() of a bunch of random numbers.
Note that we use the `shared` function to Note that we use the `shared` function to
...@@ -145,7 +145,7 @@ after the T.exp(x) is replaced by a GPU version of exp(). ...@@ -145,7 +145,7 @@ after the T.exp(x) is replaced by a GPU version of exp().
print 'Looping %d times took'%iters, time.time() - t0, 'seconds' print 'Looping %d times took'%iters, time.time() - t0, 'seconds'
print 'Result is', r print 'Result is', r
print 'Numpy result is', numpy.asarray(r) print 'Numpy result is', numpy.asarray(r)
print 'Used the','cpu' if any( [isinstance(x.op,T.Elemwise) for x in f.maker.env.toposort()]) else 'gpu' print 'Used the','cpu' if numpy.any( [isinstance(x.op,T.Elemwise) for x in f.maker.env.toposort()]) else 'gpu'
The output from this program is The output from this program is
...@@ -200,7 +200,7 @@ that it has the un-wanted side-effect of really slowing things down. ...@@ -200,7 +200,7 @@ that it has the un-wanted side-effect of really slowing things down.
print 'Looping %d times took'%iters, time.time() - t0, 'seconds' print 'Looping %d times took'%iters, time.time() - t0, 'seconds'
print 'Result is', r print 'Result is', r
print 'Numpy result is', numpy.asarray(r) print 'Numpy result is', numpy.asarray(r)
print 'Used the','cpu' if any( [isinstance(x.op,T.Elemwise) for x in f.maker.env.toposort()]) else 'gpu' print 'Used the','cpu' if numpy.any( [isinstance(x.op,T.Elemwise) for x in f.maker.env.toposort()]) else 'gpu'
Running this version of the code takes just under 0.05 seconds, over 140x faster than Running this version of the code takes just under 0.05 seconds, over 140x faster than
the CPU implementation! the CPU implementation!
......
...@@ -184,7 +184,7 @@ class In(SymbolicInput): ...@@ -184,7 +184,7 @@ class In(SymbolicInput):
# try to keep it synchronized. # try to keep it synchronized.
def __init__(self, variable, name=None, value=None, update=None, def __init__(self, variable, name=None, value=None, update=None,
mutable=None, strict=False, allow_downcast=False, autoname=True, mutable=None, strict=False, allow_downcast=False, autoname=True,
implicit=None): implicit=None, borrow=False):
if implicit is None: if implicit is None:
implicit = (isinstance(value, gof.Container) or implicit = (isinstance(value, gof.Container) or
isinstance(value, SharedVariable)) isinstance(value, SharedVariable))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论