提交 d2d8f01b authored 作者: Frederic's avatar Frederic

pep8

上级 1b0f3caa
......@@ -36,7 +36,7 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
W = T.matrix("W")
b_sym = T.vector("b_sym")
results, updates = theano.scan(lambda v:T.tanh(T.dot(v, W)+b_sym), sequences=X)
results, updates = theano.scan(lambda v: T.tanh(T.dot(v, W) + b_sym), sequences=X)
compute_elementwise = theano.function(inputs=[X, W, b_sym], outputs=[results])
# test values
......@@ -51,7 +51,7 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
print np.tanh(x.dot(w) + b)
**Scan Example: Computing the sequence x(t) = tanh(x(t-1).dot(W) + y(t).dot(U) + p(T-t).dot(V))**
**Scan Example: Computing the sequence x(t) = tanh(x(t - 1).dot(W) + y(t).dot(U) + p(T - t).dot(V))**
.. code-block:: python
import theano
......@@ -67,8 +67,7 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
V = T.matrix("V")
P = T.matrix("P")
results, updates = theano.scan(lambda
y,p,x_tm1:T.tanh(T.dot(x_tm1, W)+T.dot(y, U)+T.dot(p, V)),
results, updates = theano.scan(lambda y, p, x_tm1: T.tanh(T.dot(x_tm1, W) + T.dot(y, U) + T.dot(p, V)),
sequences=[Y, P[::-1]], outputs_info=[X])
compute_seq = theano.function(inputs=[X, W, Y, U, P, V], outputs=[results])
......@@ -89,7 +88,7 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
x_res = np.zeros((5, 2))
x_res[0] = np.tanh(x.dot(w) + y[0].dot(u) + p[4].dot(v))
for i in range(1, 5):
x_res[i] = np.tanh(x_res[i-1].dot(w) + y[i].dot(u) + p[4-i].dot(v))
x_res[i] = np.tanh(x_res[i - 1].dot(w) + y[i].dot(u) + p[4-i].dot(v))
**Scan Example: Computing norms of lines of X**
......@@ -100,7 +99,7 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
# define tensor variable
X = T.matrix("X")
results, updates = theano.scan(lambda x_i:T.sqrt((x_i**2).sum()), sequences=[X])
results, updates = theano.scan(lambda x_i: T.sqrt((x_i ** 2).sum()), sequences=[X])
compute_norm_lines = theano.function(inputs=[X], outputs=[results])
# test value
......@@ -108,7 +107,7 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
print compute_norm_lines(x)[0]
# comparison with numpy
print np.sqrt((x**2).sum(1))
print np.sqrt((x ** 2).sum(1))
**Scan Example: Computing norms of columns of X**
......@@ -119,7 +118,7 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
# define tensor variable
X = T.matrix("X")
results, updates = theano.scan(lambda x_i:T.sqrt((x_i**2).sum()), sequences=[X.T])
results, updates = theano.scan(lambda x_i: T.sqrt((x_i ** 2).sum()), sequences=[X.T])
compute_norm_cols = theano.function(inputs=[X], outputs=[results])
# test value
......@@ -127,7 +126,7 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
print compute_norm_cols(x)[0]
# comparison with numpy
print np.sqrt((x**2).sum(0))
print np.sqrt((x ** 2).sum(0))
**Scan Example: Computing trace of X**
......@@ -139,7 +138,7 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
# define tensor variable
X = T.matrix("X")
results, updates = theano.scan(lambda i, j, t_f:T.cast(X[i, j]+t_f, floatX),
results, updates = theano.scan(lambda i, j, t_f: T.cast(X[i, j] + t_f, floatX),
sequences=[T.arange(X.shape[0]), T.arange(X.shape[1])],
outputs_info=np.asarray(0., dtype=floatX))
result = results[-1]
......@@ -153,7 +152,7 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
# comparison with numpy
print np.diagonal(x).sum()
**Scan Example: Computing the sequence x(t) = x(t-2).dot(U) + x(t-1).dot(V) + tanh(x(t-1).dot(W) + b)**
**Scan Example: Computing the sequence x(t) = x(t - 2).dot(U) + x(t - 1).dot(V) + tanh(x(t - 1).dot(W) + b)**
.. code-block:: python
import theano
......@@ -168,16 +167,16 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
V = T.matrix("V")
n_sym = T.iscalar("n_sym")
results, updates = theano.scan(lambda x_tm2, x_tm1:T.dot(x_tm2, U) + T.dot(x_tm1, V) + T.tanh(T.dot(x_tm1, W) + b_sym),
results, updates = theano.scan(lambda x_tm2, x_tm1: T.dot(x_tm2, U) + T.dot(x_tm1, V) + T.tanh(T.dot(x_tm1, W) + b_sym),
n_steps=n_sym, outputs_info=[dict(initial=X, taps=[-2, -1])])
compute_seq2 = theano.function(inputs=[X, U, V, W, b_sym, n_sym], outputs=[results])
# test values
x = np.zeros((2, 2)) # the initial value must be able to return x[-2]
x[1, 1] = 1
w = 0.5*np.ones((2, 2))
u = 0.5*(np.ones((2, 2))-np.eye(2))
v = 0.5*np.ones((2, 2))
w = 0.5 * np.ones((2, 2))
u = 0.5 * (np.ones((2, 2)) - np.eye(2))
v = 0.5 * np.ones((2, 2))
n = 10
b = np.ones((2))
......@@ -189,8 +188,8 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
x_res[1] = x[1].dot(u) + x_res[0].dot(v) + numpy.tanh(x_res[0].dot(w) + b)
x_res[2] = x_res[0].dot(u) + x_res[1].dot(v) + numpy.tanh(x_res[1].dot(w) + b)
for i in range(2, 10):
x_res[i] = (x_res[i-2].dot(u) + x_res[i-1].dot(v) +
numpy.tanh(x_res[i-1].dot(w) + b))
x_res[i] = (x_res[i - 2].dot(u) + x_res[i - 1].dot(v) +
numpy.tanh(x_res[i - 1].dot(w) + b))
**Scan Example: Computing the Jacobian of y = tanh(v.dot(A)) wrt x**
......@@ -203,7 +202,7 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
v = T.vector()
A = T.matrix()
y = T.tanh(T.dot(v, A))
results, updates = theano.scan(lambda i:T.grad(y[i], v), sequences=[T.arange(y.shape[0])])
results, updates = theano.scan(lambda i: T.grad(y[i], v), sequences=[T.arange(y.shape[0])])
compute_jac_t = theano.function([A, v], [results], allow_input_downcast=True) # shape (d_out, d_in)
# test values
......@@ -213,7 +212,7 @@ The full documentation can be found in the library: :ref:`Scan <lib_scan>`.
print compute_jac_t(w, x)[0]
# compare with numpy
print ((1 - np.tanh(x.dot(w))**2)*w).T
print ((1 - np.tanh(x.dot(w)) ** 2) * w).T
Note that we need to iterate over the indices of ``y`` and not over the elements of ``y``. The reason is that scan create a placeholder variable for its internal function and this placeholder variable does not have the same dependencies than the variables that will replace it.
......@@ -228,14 +227,14 @@ Note that we need to iterate over the indices of ``y`` and not over the elements
k = theano.shared(0)
n_sym = T.iscalar("n_sym")
results, updates = theano.scan(lambda:{k:(k+1)}, n_steps=n_sym)
results, updates = theano.scan(lambda:{k:(k + 1)}, n_steps=n_sym)
accumulator = theano.function([n_sym], [], updates=updates, allow_input_downcast=True)
k.get_value()
accumulator(5)
k.get_value()
**Scan Example: Computing tanh(v.dot(W) + b)*d where b is binomial**
**Scan Example: Computing tanh(v.dot(W) + b) * d where b is binomial**
.. code-block:: python
import theano
......@@ -251,7 +250,7 @@ Note that we need to iterate over the indices of ``y`` and not over the elements
trng = T.shared_randomstreams.RandomStreams(1234)
d=trng.binomial(size=W[1].shape)
results, updates = theano.scan(lambda v:T.tanh(T.dot(v, W)+b_sym)*d, sequences=X)
results, updates = theano.scan(lambda v: T.tanh(T.dot(v, W) + b_sym) * d, sequences=X)
compute_with_bnoise = theano.function(inputs=[X, W, b_sym], outputs=[results],
updates=updates, allow_input_downcast=True)
x = np.eye(10, 2)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论