提交 b2dd6567 authored 作者: Frederic Bastien's avatar Frederic Bastien

made more general the PROFILE_MODE handling.

上级 1fee9bf4
...@@ -13,6 +13,15 @@ import logging ...@@ -13,6 +13,15 @@ import logging
logging.getLogger('theano.gradient').setLevel(logging.INFO) logging.getLogger('theano.gradient').setLevel(logging.INFO)
def get_mode():
return None if theano.compile.default_mode != "PROFILE_MODE" else theano.compile.ProfileMode()
def print_mode(mode):
try:
if mode != None:
mode.print_summary()
except:
pass
def run_nnet(use_gpu): def run_nnet(use_gpu):
n_batch = 16 n_batch = 16
n_in = 1024 n_in = 1024
...@@ -42,7 +51,7 @@ def run_nnet(use_gpu): ...@@ -42,7 +51,7 @@ def run_nnet(use_gpu):
params = [w, b, v, c] params = [w, b, v, c]
gparams = tensor.grad(loss, params) gparams = tensor.grad(loss, params)
mode = theano.compile.ProfileMode() mode = get_mode()
print 'building pfunc ...' print 'building pfunc ...'
train = pfunc([x,y,lr], [loss], mode=mode, updates=[(p, p-g) for p,g in zip(params, gparams)]) train = pfunc([x,y,lr], [loss], mode=mode, updates=[(p, p-g) for p,g in zip(params, gparams)])
...@@ -56,7 +65,8 @@ def run_nnet(use_gpu): ...@@ -56,7 +65,8 @@ def run_nnet(use_gpu):
for i in xrange(100): for i in xrange(100):
rval = train(xval, yval, lr) rval = train(xval, yval, lr)
mode.print_summary()
print_mode(mode)
return rval return rval
def test_run_nnet(): def test_run_nnet():
...@@ -97,7 +107,7 @@ def run_conv_nnet1(shared_fn): ...@@ -97,7 +107,7 @@ def run_conv_nnet1(shared_fn):
params = [w, b, v, c] params = [w, b, v, c]
gparams = tensor.grad(loss, params) gparams = tensor.grad(loss, params)
mode = theano.compile.ProfileMode() mode = get_mode()
print 'building pfunc ...' print 'building pfunc ...'
train = pfunc([x,y,lr], [loss], mode=mode, updates=[(p, p-g) for p,g in zip(params, gparams)]) train = pfunc([x,y,lr], [loss], mode=mode, updates=[(p, p-g) for p,g in zip(params, gparams)])
...@@ -111,7 +121,8 @@ def run_conv_nnet1(shared_fn): ...@@ -111,7 +121,8 @@ def run_conv_nnet1(shared_fn):
for i in xrange(10): for i in xrange(10):
rval = train(xval, yval, lr) rval = train(xval, yval, lr)
mode.print_summary() print 'training done'
print_mode(mode)
return rval return rval
def test_conv_nnet1(): def test_conv_nnet1():
...@@ -161,7 +172,7 @@ def run_conv_nnet2(shared_fn): # pretend we are training LeNet for MNIST ...@@ -161,7 +172,7 @@ def run_conv_nnet2(shared_fn): # pretend we are training LeNet for MNIST
params = [w0, b0, w1, b1, v, c] params = [w0, b0, w1, b1, v, c]
gparams = tensor.grad(loss, params) gparams = tensor.grad(loss, params)
mode = theano.compile.ProfileMode() mode = get_mode()
print 'building pfunc ...' print 'building pfunc ...'
train = pfunc([x,y,lr], [loss], mode=mode, updates=[(p, p-g) for p,g in zip(params, gparams)]) train = pfunc([x,y,lr], [loss], mode=mode, updates=[(p, p-g) for p,g in zip(params, gparams)])
...@@ -175,10 +186,8 @@ def run_conv_nnet2(shared_fn): # pretend we are training LeNet for MNIST ...@@ -175,10 +186,8 @@ def run_conv_nnet2(shared_fn): # pretend we are training LeNet for MNIST
for i in xrange(10): for i in xrange(10):
rval = train(xval, yval, lr) rval = train(xval, yval, lr)
try:
mode.print_summary() print_mode(mode)
except:
pass
return rval return rval
def test_conv_nnet2(): def test_conv_nnet2():
...@@ -228,7 +237,7 @@ def run_conv_nnet2_classif(shared_fn): # pretend we are training LeNet for MNIST ...@@ -228,7 +237,7 @@ def run_conv_nnet2_classif(shared_fn): # pretend we are training LeNet for MNIST
params = [w0, b0, w1, b1, v, c] params = [w0, b0, w1, b1, v, c]
gparams = tensor.grad(loss, params) gparams = tensor.grad(loss, params)
mode = theano.compile.ProfileMode() mode = get_mode()
print 'building pfunc ...' print 'building pfunc ...'
train = pfunc([x,y,lr], [loss], mode=mode, updates=[(p, p-g) for p,g in zip(params, gparams)]) train = pfunc([x,y,lr], [loss], mode=mode, updates=[(p, p-g) for p,g in zip(params, gparams)])
...@@ -242,10 +251,7 @@ def run_conv_nnet2_classif(shared_fn): # pretend we are training LeNet for MNIST ...@@ -242,10 +251,7 @@ def run_conv_nnet2_classif(shared_fn): # pretend we are training LeNet for MNIST
for i in xrange(10): for i in xrange(10):
rval = train(xval, yval, lr) rval = train(xval, yval, lr)
try: print_mode(mode)
mode.print_summary()
except:
pass
return rval return rval
def test_conv_nnet2_classif(): def test_conv_nnet2_classif():
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论