提交 90b64005 authored 作者: Frederic Bastien's avatar Frederic Bastien

pep8

上级 bb34c8c2
......@@ -72,14 +72,14 @@ Python in one slide
# PYTHON SYNTAX EXAMPLE
#######################
a = 1 # no type declaration required!
b = (1,2,3) # tuple of three int literals
c = [1,2,3] # list of three int literals
b = (1, 2, 3) # tuple of three int literals
c = [1, 2, 3] # list of three int literals
d = {'a': 5, b: None} # dictionary of two elements
# N.B. string literal, None
print d['a'] # square brackets index
# -> 5
print d[(1,2,3)] # new tuple == b, retrieves None
print d[(1, 2, 3)] # new tuple == b, retrieves None
# -> None
print d[6]
# raises KeyError Exception
......@@ -186,23 +186,23 @@ Training an MNIST-ready classification neural network in pure NumPy might look l
batchsize = 100
for i in xrange(1000):
x_i = x[i*batchsize:(i+1)*batchsize]
y_i = y[i*batchsize:(i+1)*batchsize]
x_i = x[i * batchsize: (i + 1) * batchsize]
y_i = y[i * batchsize: (i + 1) * batchsize]
hidin = np.dot(x_i, w) + b
hidout = np.tanh(hidin)
outin = np.dot(hidout, v) + c
outout = (np.tanh(outin)+1)/2.0
outout = (np.tanh(outin) + 1) / 2.0
g_outout = outout - y_i
err = 0.5 * np.sum(g_outout**2)
err = 0.5 * np.sum(g_outout ** 2)
g_outin = g_outout * outout * (1.0 - outout)
g_hidout = np.dot(g_outin, v.T)
g_hidin = g_hidout * (1 - hidout**2)
g_hidin = g_hidout * (1 - hidout ** 2)
b -= lr * np.sum(g_hidin, axis=0)
c -= lr * np.sum(g_outin, axis=0)
......@@ -229,40 +229,42 @@ you have GPU (I'm skipping some dtype-details which we'll come back to).
# Neural Network on MNIST
#########################
import theano as T
import theano.tensor as TT
import numpy as np
import theano
import theano.tensor as tensor
x = np.load('data_x.npy')
y = np.load('data_y.npy')
# symbol declarations
sx = TT.matrix()
sy = TT.matrix()
w = T.shared(np.random.normal(avg=0, std=.1,
size=(784, 500)))
b = T.shared(np.zeros(500))
v = T.shared(np.zeros((500, 10)))
c = T.shared(np.zeros(10))
sx = tensor.matrix()
sy = tensor.matrix()
w = theano.shared(np.random.normal(avg=0, std=.1,
size=(784, 500)))
b = theano.shared(np.zeros(500))
v = theano.shared(np.zeros((500, 10)))
c = theano.shared(np.zeros(10))
# symbolic expression-building
hid = TT.tanh(TT.dot(sx, w) + b)
out = TT.tanh(TT.dot(hid, v) + c)
err = 0.5 * TT.sum(out - sy)**2
gw, gb, gv, gc = TT.grad(err, [w,b,v,c])
hid = tensor.tanh(tensor.dot(sx, w) + b)
out = tensor.tanh(tensor.dot(hid, v) + c)
err = 0.5 * tensor.sum(out - sy) ** 2
gw, gb, gv, gc = tensor.grad(err, [w, b, v, c])
# compile a fast training function
train = T.function([sx, sy], err,
train = theano.function([sx, sy], err,
updates={
w:w - lr * gw,
b:b - lr * gb,
v:v - lr * gv,
c:c - lr * gc})
w: w - lr * gw,
b: b - lr * gb,
v: v - lr * gv,
c: c - lr * gc})
# now do the computations
batchsize = 100
for i in xrange(1000):
x_i = x[i*batchsize:(i+1)*batchsize]
y_i = y[i*batchsize:(i+1)*batchsize]
x_i = x[i * batchsize: (i + 1) * batchsize]
y_i = y[i * batchsize: (i + 1) * batchsize]
err_i = train(x_i, y_i)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论