提交 a3d76ad2 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Fixup doc/library/*

上级 b1f7979f
......@@ -182,27 +182,38 @@ method to access values by indexing a Function directly by typing
To show some examples of these access methods...
.. testcode::
a, b, c = T.scalars('xys') # set the internal names of graph nodes
# Note that the name of c is 's', not 'c'!
fn = function([a, b, ((c, c+a+b), 10.0)], [])
>>> from theano import tensor as T, function
>>> a, b, c = T.scalars('xys') # set the internal names of graph nodes
>>> # Note that the name of c is 's', not 'c'!
>>> fn = function([a, b, ((c, c+a+b), 10.0)], [])
#the value associated with c is accessible in 3 ways
assert fn['s'] is fn.value[c]
assert fn['s'] is fn.container[c].value
>>> # the value associated with c is accessible in 3 ways
>>> fn['s'] is fn.value[c]
True
>>> fn['s'] is fn.container[c].value
True
assert fn['s'] == 10.0
fn(1, 2)
assert fn['s'] == 13.0
fn.s = 99.0
fn(1, 0)
assert fn['s'] == 100.0
fn.value[c] = 99.0
fn(1,0)
assert fn['s'] == 100.0
assert fn['s'] == fn.value[c]
assert fn['s'] == fn.container[c].value
>>> fn['s']
array(10.0)
>>> fn(1, 2)
[]
>>> fn['s']
array(13.0)
>>> fn['s'] = 99.0
>>> fn(1, 0)
[]
>>> fn['s']
array(100.0)
>>> fn.value[c] = 99.0
>>> fn(1,0)
[]
>>> fn['s']
array(100.0)
>>> fn['s'] == fn.value[c]
True
>>> fn['s'] == fn.container[c].value
True
Input Shortcuts
......@@ -224,31 +235,41 @@ Every element of the inputs list will be upgraded to an In instance if necessary
Example:
.. testcode::
import theano
from theano import tensor as T
from theano.compile.io import In
x = T.scalar()
y = T.scalar('y')
z = T.scalar('z')
w = T.scalar('w')
fn = theano.function(inputs = [x, y, In(z, value=42), ((w, w+x), 0)],
outputs = x + y + z)
# the first two arguments are required and the last two are
# optional and initialized to 42 and 0, respectively.
# The last argument, w, is updated with w + x each time the
# function is called.
fn(1) # illegal because there are two required arguments
fn(1, 2) # legal, z is 42, w goes 0 -> 1 (because w <- w + x), returns array(45.0)
fn(1, y = 2) # legal, z is 42, w goes 1 -> 2, returns array(45.0)
fn(x = 1, y = 2) # illegal because x was not named
fn(1, 2, 3) # legal, z is 3, w goes 2 -> 3, returns array(6.0)
fn(1, z = 3, y = 2) # legal, z is 3, w goes 3 -> 4, returns array(6.0)
fn(1, 2, w = 400) # legal, z is 42 again, w goes 400 -> 401, returns array(45.0)
fn(1, 2) # legal, z is 42, w goes 401 -> 402, returns array(45.0)
>>> import theano
>>> from theano import tensor as T
>>> from theano.compile.io import In
>>> x = T.scalar()
>>> y = T.scalar('y')
>>> z = T.scalar('z')
>>> w = T.scalar('w')
>>> fn = theano.function(inputs=[x, y, In(z, value=42), ((w, w+x), 0)],
... outputs=x + y + z)
>>> # the first two arguments are required and the last two are
>>> # optional and initialized to 42 and 0, respectively.
>>> # The last argument, w, is updated with w + x each time the
>>> # function is called.
>>> fn(1) # illegal because there are two required arguments # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Missing required input: y
>>> fn(1, 2) # legal, z is 42, w goes 0 -> 1 (because w <- w + x)
array(45.0)
>>> fn(1, y=2) # legal, z is 42, w goes 1 -> 2
array(45.0)
>>> fn(x=1, y=2) # illegal because x was not named # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Unknown input or state: x. The function has 3 named inputs (y, z, w), and 1 unnamed input which thus cannot be accessed through keyword argument (use 'name=...' in a variable's constructor to give it a name).
>>> fn(1, 2, 3) # legal, z is 3, w goes 2 -> 3
array(6.0)
>>> fn(1, z=3, y=2) # legal, z is 3, w goes 3 -> 4
array(6.0)
>>> fn(1, 2, w=400) # legal, z is 42 again, w goes 400 -> 401
array(45.0)
>>> fn(1, 2) # legal, z is 42, w goes 401 -> 402
array(45.0)
In the example above, ``z`` has value 42 when no value is explicitly given.
This default value is potentially used at every function invocation, because
......@@ -285,20 +306,25 @@ If a single ``Variable`` or ``Out`` instance is given as argument, then the comp
If a list of ``Variable`` or ``Out`` instances is given as argument, then the compiled function will return a list of their values.
.. testcode::
x, y, s = T.matrices('xys')
# print a list of 2 ndarrays
fn1 = theano.function([x], [x+x, Out((x+x).T, borrow=True)])
print fn1(numpy.asarray([[1,0],[0,1]]))
# print a list of 1 ndarray
fn2 = theano.function([x], [x+x])
print fn2(numpy.asarray([[1,0],[0,1]]))
# print an ndarray
fn3 = theano.function([x], outputs=x+x)
print fn3(numpy.asarray([[1,0],[0,1]]))
>>> import numpy
>>> from theano.compile.io import Out
>>> x, y, s = T.matrices('xys')
>>> # print a list of 2 ndarrays
>>> fn1 = theano.function([x], [x+x, Out((x+x).T, borrow=True)])
>>> fn1(numpy.asarray([[1,0],[0,1]]))
[array([[ 2., 0.],
[ 0., 2.]]), array([[ 2., 0.],
[ 0., 2.]])]
>>> # print a list of 1 ndarray
>>> fn2 = theano.function([x], [x+x])
>>> fn2(numpy.asarray([[1,0],[0,1]]))
[array([[ 2., 0.],
[ 0., 2.]])]
>>> # print an ndarray
>>> fn3 = theano.function([x], outputs=x+x)
>>> fn3(numpy.asarray([[1,0],[0,1]]))
array([[ 2., 0.],
[ 0., 2.]])
......@@ -47,6 +47,14 @@ example, if we pass the following values to ``fun``:
(numpy.asarray(100.) ** 1000000).astype(theano.config.floatX), (3, 5))
fun(infa)
.. testoutput::
:hide:
:options: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: ...
It will raise an AssertionError indicating that Inf value is detected while
executing the function.
......
......@@ -25,7 +25,7 @@ process.
Creating a ProfileMode Instance
-------------------------------
First create a ProfileMode instance.
First create a ProfileMode instance.
>>> import theano
>>> from theano import ProfileMode
......@@ -63,6 +63,12 @@ Compiling your Graph with ProfileMode
Once the ProfileMode instance is created, simply compile your graph as you
would normally, by specifying the mode parameter.
.. testsetup::
import theano
input1, input2 = theano.tensor.scalars(2)
output1 = input1+input2
>>> # with functions
>>> f = theano.function([input1,input2],[output1], mode=profmode)
......@@ -77,13 +83,13 @@ of its time.
This is best shown through an example.
Lets use the example of logistic
regression. (Code for this example is in the file
``benchmark/regression/regression.py``.)
``benchmark/regression/regression.py``.)
Compiling the module with ProfileMode and calling ``profmode.print_summary()``
generates the following output:
.. testcode::
.. code-block:: python
"""
ProfileMode.print_summary()
---------------------------
......@@ -142,7 +148,7 @@ generates the following output:
The Apply-wise summary print the timing information for the worst
offending Apply nodes. This corresponds to individual Op applications
within your graph which take the longest to execute (so if you use dot
twice, you will see two entries there).
twice, you will see two entries there).
The Op-wise summary print the execution time of all Apply nodes
executing the same Op are grouped together and the total execution
......@@ -187,7 +193,7 @@ Reference
Print three summaries to stdout that show where cpu time is spent during theano function executions (for all functions using this object instance).
:param n_apply_to_print: the number of apply nodes to print.
:param n_apply_to_print: the number of apply nodes to print.
The default 15, but can be configured via ``ProfileMode.n_ops_to_print`` in :envvar:`THEANO_FLAGS`.
:param n_ops_to_print: the number of ops to print.
......@@ -199,10 +205,10 @@ Reference
""" As print_summary, but print the difference on two different profile mode.
TODO: Also we don't print the Apply-wise summary as it don't work for now.
TODO: make comparaison with gpu code.
:param other: the other instance of ProfileMode that we want to be compared to.
:param n_apply_to_print: the number of apply nodes to print.
:param n_apply_to_print: the number of apply nodes to print.
The default 15, but can be configured via ``ProfileMode.n_ops_to_print`` in :envvar:`THEANO_FLAGS`.
:param n_ops_to_print: the number of ops to print.
......
......@@ -55,7 +55,7 @@
Each registered constructor ``ctor`` will be called like this:
.. testcode::
.. code-block:: python
ctor(value, name=name, strict=strict, **kwargs)
......
......@@ -254,28 +254,39 @@ Another useful feature of scan, is that it can handle shared variables.
For example, if we want to implement a Gibbs chain of length 10 we would do
the following:
.. testcode::
.. testsetup:: scan1
W = theano.shared(W_values) # we assume that ``W_values`` contains the
# initial values of your weight matrix
import theano
import numpy
W_values = numpy.random.random((2, 2))
bvis_values = numpy.random.random((2,))
bhid_values = numpy.random.random((2,))
bvis = theano.shared(bvis_values)
bhid = theano.shared(bhid_values)
.. testcode:: scan1
trng = T.shared_randomstreams.RandomStreams(1234)
import theano
from theano import tensor as T
def OneStep(vsample) :
hmean = T.nnet.sigmoid(theano.dot(vsample, W) + bhid)
hsample = trng.binomial(size=hmean.shape, n=1, p=hmean)
vmean = T.nnet.sigmoid(theano.dot(hsample, W.T) + bvis)
return trng.binomial(size=vsample.shape, n=1, p=vmean,
dtype=theano.config.floatX)
W = theano.shared(W_values) # we assume that ``W_values`` contains the
# initial values of your weight matrix
sample = theano.tensor.vector()
bvis = theano.shared(bvis_values)
bhid = theano.shared(bhid_values)
values, updates = theano.scan(OneStep, outputs_info=sample, n_steps=10)
trng = T.shared_randomstreams.RandomStreams(1234)
gibbs10 = theano.function([sample], values[-1], updates=updates)
def OneStep(vsample) :
hmean = T.nnet.sigmoid(theano.dot(vsample, W) + bhid)
hsample = trng.binomial(size=hmean.shape, n=1, p=hmean)
vmean = T.nnet.sigmoid(theano.dot(hsample, W.T) + bvis)
return trng.binomial(size=vsample.shape, n=1, p=vmean,
dtype=theano.config.floatX)
sample = theano.tensor.vector()
values, updates = theano.scan(OneStep, outputs_info=sample, n_steps=10)
gibbs10 = theano.function([sample], values[-1], updates=updates)
The first, and probably most crucial observation is that the updates
......@@ -286,7 +297,11 @@ update dictionary to your function, you will always get the same 10
sets of random numbers. You can even use the ``updates`` dictionary
afterwards. Look at this example :
.. testcode::
.. testsetup:: scan2
import theano
.. testcode:: scan2
a = theano.shared(1)
values, updates = theano.scan(lambda: {a: a+1}, n_steps=10)
......@@ -295,15 +310,22 @@ In this case the lambda expression does not require any input parameters
and returns an update dictionary which tells how ``a`` should be updated
after each step of scan. If we write :
.. testcode::
.. testcode:: scan2
b = a + 1
c = updates[a] + 1
f = theano.function([], [b, c], updates=updates)
print b
print c
print a.value
print(b)
print(c)
print(a.get_value())
.. testoutput:: scan2
:hide:
Elemwise{add,no_inplace}.0
Elemwise{add,no_inplace}.0
1
We will see that because ``b`` does not use the updated version of
``a``, it will be 2, ``c`` will be 12, while ``a.value`` is ``11``.
......@@ -324,7 +346,7 @@ execution. To pass the shared variables to Scan you need to put them in a list
and give it to the ``non_sequences`` argument. Here is the Gibbs sampling code
updated:
.. testcode::
.. testcode:: scan1
W = theano.shared(W_values) # we assume that ``W_values`` contains the
# initial values of your weight matrix
......@@ -367,7 +389,7 @@ to be ensured by the user. Otherwise, it will result in an error.
Using the previous Gibbs sampling example:
.. testcode::
.. testcode:: scan1
# The new scan, using strict=True
values, updates = theano.scan(fn=OneStep,
......@@ -404,7 +426,12 @@ In this case we have a sequence over which we need to iterate ``u``,
and two outputs ``x`` and ``y``. To implement this with scan we first
construct a function that computes one iteration step :
.. testcode::
.. testsetup:: scan3
import theano
from theano import tensor as T
.. testcode:: scan3
def oneStep(u_tm4, u_t, x_tm3, x_tm1, y_tm1, W, W_in_1, W_in_2, W_feedback, W_out):
......@@ -427,9 +454,15 @@ an order, but also variables, since this is how scan figures out what should
be represented by what. Given that we have all
the Theano variables needed we construct our RNN as follows :
.. testcode::
.. testcode:: scan3
W = T.matrix()
W_in_1 = T.matrix()
W_in_2 = T.matrix()
W_feedback = T.matrix()
W_out = T.matrix()
u = T.matrix() # it is a sequence of vectors
u = T.matrix() # it is a sequence of vectors
x0 = T.matrix() # initial state of x has to be a matrix, since
# it has to cover x[-3]
y0 = T.vector() # y0 is just a vector since scan has only to provide
......
......@@ -83,7 +83,7 @@ TODO: Give examples on how to use these things! They are pretty complicated.
its ``version`` parameter to ``'no_fft'``. To enable it for just
one Theano function:
.. testcode::
.. code-block:: python
mode = theano.compile.get_default_mode()
mode = mode.including('conv_fft')
......@@ -144,7 +144,7 @@ TODO: Give examples on how to use these things! They are pretty complicated.
CUDA >= 5.0, scikits.cuda >= 0.5.0 and PyCUDA to run.
To enable for just one Theano function:
.. testcode::
.. code-block:: python
mode = theano.compile.get_default_mode()
mode = mode.including('conv3d_fft', 'convgrad3d_fft', 'convtransp3d_fft')
......
......@@ -43,12 +43,12 @@
Example:
.. testcode::
import theano.tensor as T
x,y,b = T.dvectors('x','y','b')
x, y, b = T.dvectors('x', 'y', 'b')
W = T.dmatrix('W')
y = T.nnet.sigmoid(T.dot(W,x) + b)
y = T.nnet.sigmoid(T.dot(W, x) + b)
.. note:: The underlying code will return an exact 0 or 1 if an
element of x is too small or too big.
......@@ -126,7 +126,7 @@
optimize this by inserting the softmax op itself. The code of
the softmax op is more numeriacaly stable by using this code:
.. testcode::
.. code-block:: python
e_x = exp(x - x.max(axis=1, keepdims=True))
out = e_x / e_x.sum(axis=1, keepdims=True)
......@@ -159,8 +159,9 @@
.. testcode::
x, y, b = T.dvectors('x', 'y', 'b')
x, y, b, c = T.dvectors('x', 'y', 'b', 'c')
W = T.dmatrix('W')
V = T.dmatrix('V')
h = T.nnet.sigmoid(T.dot(W, x) + b)
x_recons = T.nnet.sigmoid(T.dot(V, h) + c)
recon_cost = T.nnet.binary_crossentropy(x_recons, x).mean()
......@@ -193,6 +194,11 @@
correct class (which is typically the training criterion in
classification settings).
.. testsetup::
import theano
o = theano.tensor.ivector()
.. testcode::
y = T.nnet.softmax(T.dot(W, x) + b)
......
......@@ -167,259 +167,6 @@ class T_using_gpu(unittest.TestCase):
f = theano.function([x], PyCUDADoubleOp()(x))
xv = numpy.ones((4, 5), dtype="float32")
assert numpy.allclose(f(xv), xv*2)
# print numpy.asarray(f(xv))
# Used in T_fibby
class Fibby(theano.Op):
"""
An arbitrarily generalized Fibbonacci sequence
"""
__props__ = ()
def make_node(self, x):
x_ = theano.tensor.as_tensor_variable(x)
assert x_.ndim == 1
return theano.Apply(self,
inputs=[x_],
outputs=[x_.type()])
# using x_.type() is dangerous, it copies x's broadcasting
# behaviour
def perform(self, node, inputs, output_storage):
x, = inputs
y = output_storage[0][0] = x.copy()
for i in range(2, len(x)):
y[i] = y[i - 1] * y[i - 2] + x[i]
def c_code(self, node, name, inames, onames, sub):
x, = inames
y, = onames
fail = sub['fail']
return """
Py_XDECREF(%(y)s);
%(y)s = (PyArrayObject*)PyArray_FromArray(
%(x)s, 0, NPY_ARRAY_ENSURECOPY);
if (!%(y)s)
%(fail)s;
{//New scope needed to make compilation work
dtype_%(y)s * y = (dtype_%(y)s*)PyArray_DATA(%(y)s);
dtype_%(x)s * x = (dtype_%(x)s*)PyArray_DATA(%(x)s);
for (int i = 2; i < PyArray_DIMS(%(x)s)[0]; ++i)
y[i] = y[i-1]*y[i-2] + x[i];
}
""" % locals()
def c_code_cache_version(self):
return (1,)
class T_scan(unittest.TestCase):
# All tests here belong to
# http://deeplearning.net/software/theano/tutorial/loop.html
# Theano/doc/tutorial/loop.txt
# Any change you do here also add it to the tutorial !
def test_elemwise(self):
# defining the tensor variables
X = T.matrix("X")
W = T.matrix("W")
b_sym = T.vector("b_sym")
results, updates = theano.scan(lambda v: T.tanh(T.dot(v, W) + b_sym),
sequences=X)
compute_elementwise = theano.function(inputs=[X, W, b_sym],
outputs=[results])
# test values
x = numpy.eye(2, dtype=theano.config.floatX)
w = numpy.ones((2, 2), dtype=theano.config.floatX)
b = numpy.ones((2), dtype=theano.config.floatX)
b[1] = 2
print("Scan results:", compute_elementwise(x, w, b)[0])
# comparison with numpy
print("Numpy results:", numpy.tanh(x.dot(w) + b))
def test_sequence(self):
# define tensor variables
X = T.vector("X")
W = T.matrix("W")
b_sym = T.vector("b_sym")
U = T.matrix("U")
Y = T.matrix("Y")
V = T.matrix("V")
P = T.matrix("P")
results, updates = theano.scan(
lambda y, p, x_tm1: T.tanh(T.dot(x_tm1, W) +
T.dot(y, U) + T.dot(p, V)),
sequences=[Y, P[::-1]], outputs_info=[X])
compute_seq = theano.function(inputs=[X, W, Y, U, P, V],
outputs=[results])
# test values
x = numpy.zeros((2), dtype=theano.config.floatX)
x[1] = 1
w = numpy.ones((2, 2), dtype=theano.config.floatX)
y = numpy.ones((5, 2), dtype=theano.config.floatX)
y[0, :] = -3
u = numpy.ones((2, 2), dtype=theano.config.floatX)
p = numpy.ones((5, 2), dtype=theano.config.floatX)
p[0, :] = 3
v = numpy.ones((2, 2), dtype=theano.config.floatX)
print("Scan results", compute_seq(x, w, y, u, p, v)[0])
# comparison with numpy
x_res = numpy.zeros((5, 2), dtype=theano.config.floatX)
x_res[0] = numpy.tanh(x.dot(w) + y[0].dot(u) + p[4].dot(v))
for i in range(1, 5):
x_res[i] = numpy.tanh(x_res[i-1].dot(w) +
y[i].dot(u) + p[4-i].dot(v))
print("Numpy results:", x_res)
def test_norm(self):
# define tensor variable
X = T.matrix("X")
results, updates = theano.scan(lambda x_i: T.sqrt((x_i**2).sum()),
sequences=[X])
compute_norm_lines = theano.function(inputs=[X], outputs=[results])
results, updates = theano.scan(lambda x_i: T.sqrt((x_i**2).sum()),
sequences=[X.T])
compute_norm_cols = theano.function(inputs=[X], outputs=[results])
# test value
x = numpy.diag(numpy.arange(1, 6, dtype=theano.config.floatX), 1)
print("Scan results:", compute_norm_lines(x)[0], \
compute_norm_cols(x)[0])
# comparison with numpy
print("Numpy results:", numpy.sqrt((x**2).sum(1)), \
numpy.sqrt((x**2).sum(0)))
def test_trace(self):
# define tensor variable
X = T.matrix("X")
results, updates = theano.scan(lambda i, j, t_f: T.cast(X[i, j] +
t_f, theano.config.floatX),
sequences=[T.arange(X.shape[0]),
T.arange(X.shape[1])],
outputs_info=numpy.asarray(
0., dtype=theano.config.floatX))
result = results[-1]
compute_trace = theano.function(inputs=[X], outputs=[result])
# test value
x = numpy.eye(5, dtype=theano.config.floatX)
x[0] = numpy.arange(5, dtype=theano.config.floatX)
print("Scan results:", compute_trace(x)[0])
# comparison with numpy
print("Numpy results:", numpy.diagonal(x).sum())
def test_taps(self):
# define tensor variables
X = T.matrix("X")
W = T.matrix("W")
b_sym = T.vector("b_sym")
U = T.matrix("U")
V = T.matrix("V")
n_sym = T.iscalar("n_sym")
results, updates = theano.scan(
lambda x_tm2, x_tm1: T.dot(x_tm2, U) + T.dot(x_tm1, V) + T.tanh(T.dot(x_tm1, W) + b_sym),
n_steps=n_sym,
outputs_info=[dict(initial=X, taps=[-2, -1])])
compute_seq2 = theano.function(inputs=[X, U, V, W, b_sym, n_sym],
outputs=[results])
# test values
x = numpy.zeros((2, 2), dtype=theano.config.floatX)
# the initial value must be able to return x[-2]
x[1, 1] = 1
w = 0.5 * numpy.ones((2, 2), dtype=theano.config.floatX)
u = 0.5 * (numpy.ones((2, 2), dtype=theano.config.floatX) -
numpy.eye(2, dtype=theano.config.floatX))
v = 0.5 * numpy.ones((2, 2), dtype=theano.config.floatX)
n = 10
b = numpy.ones((2), dtype=theano.config.floatX)
print("Scan results:", compute_seq2(x, u, v, w, b, n))
# comparison with numpy
x_res = numpy.zeros((10, 2), dtype=theano.config.floatX)
x_res[0] = x[0].dot(u) + x[1].dot(v) + numpy.tanh(x[1].dot(w) + b)
x_res[1] = x[1].dot(u) + x_res[0].dot(v) \
+ numpy.tanh(x_res[0].dot(w) + b)
x_res[2] = x_res[0].dot(u) + x_res[1].dot(v) \
+ numpy.tanh(x_res[1].dot(w) + b)
for i in range(2, 10):
x_res[i] = (x_res[i-2].dot(u) + x_res[i-1].dot(v) +
numpy.tanh(x_res[i-1].dot(w) + b))
print("Numpy results:", x_res)
def test_jacobian(self):
# define tensor variables
v = T.vector()
A = T.matrix()
y = T.tanh(T.dot(v, A))
results, updates = theano.scan(lambda i: T.grad(y[i], v),
sequences=[T.arange(y.shape[0])])
compute_jac_t = theano.function([A, v], [results],
allow_input_downcast=True) # shape (d_out, d_in)
# test values
x = numpy.eye(5)[0]
w = numpy.eye(5, 3)
w[2] = numpy.ones((3))
print("Scan results:", compute_jac_t(w, x)[0])
# compare with numpy
print("Numpy results:", ((1 - numpy.tanh(x.dot(w))**2)*w).T)
def test_accumulator(self):
# define shared variables
k = theano.shared(0)
n_sym = T.iscalar("n_sym")
results, updates = theano.scan(lambda: {k: (k + 1)}, n_steps=n_sym)
accumulator = theano.function([n_sym], [], updates=updates,
allow_input_downcast=True)
print("Before 5 steps:", k.get_value())
accumulator(5)
print("After 5 steps:", k.get_value())
def test_random(self):
# define tensor variables
X = T.matrix("X")
W = T.matrix("W")
b_sym = T.vector("b_sym")
# define shared random stream
trng = T.shared_randomstreams.RandomStreams(1234)
d = trng.binomial(size=W[1].shape)
results, updates = theano.scan(lambda v: T.tanh(T.dot(v, W) + b_sym) * d,
sequences=X)
compute_with_bnoise = theano.function(inputs=[X, W, b_sym],
outputs=[results],
updates=updates,
allow_input_downcast=True)
x = numpy.eye(10, 2)
w = numpy.ones((2, 2))
b = numpy.ones((2))
print(compute_with_bnoise(x, w, b))
class T_typedlist(unittest.TestCase):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论