提交 58a29a81 authored 作者: Frederic Bastien's avatar Frederic Bastien

print less stuff and allow to easily don't execute the c version for speed.

上级 1b5c9a71
......@@ -10,7 +10,7 @@ from .type_support import filter as type_support_filter
import logging
_logger_name = 'theano_cuda_ndarray.basic_ops'
_logger = logging.getLogger(_logger_name)
_logger.setLevel(logging.DEBUG)
_logger.setLevel(logging.INFO)
_logger.addHandler(logging.StreamHandler()) #TO REMOVE
def warning(*msg):
_logger.warning(_logger_name+'WARNING: '+' '.join(str(m) for m in msg))
......
......@@ -192,8 +192,8 @@ def run_conv_nnet2(shared_fn): # pretend we are training LeNet for MNIST
print 'building pfunc ...'
train = pfunc([x,y,lr], [loss], mode=mode, updates=[(p, p-g) for p,g in zip(params, gparams)])
for i, n in enumerate(train.maker.env.toposort()):
print i, n
# for i, n in enumerate(train.maker.env.toposort()):
# print i, n
xval = numpy.asarray(numpy.random.rand(*shape_img), dtype='float32')
yval = numpy.asarray(numpy.random.rand(n_batch,n_out), dtype='float32')#int32 make all 0...
......@@ -205,11 +205,13 @@ def run_conv_nnet2(shared_fn): # pretend we are training LeNet for MNIST
return rval
def test_conv_nnet2():
numpy.random.seed(23456)
rval_cpu = run_conv_nnet2(shared)
numpy.random.seed(23456)
rval_gpu = run_conv_nnet2(tcn.shared_constructor)
assert numpy.allclose(rval_cpu, rval_gpu,rtol=1e-4,atol=1e-4)
if True:
numpy.random.seed(23456)
rval_cpu = run_conv_nnet2(shared)
print rval_cpu[0], rval_gpu[0],rval_cpu[0]-rval_gpu[0]
assert numpy.allclose(rval_cpu, rval_gpu,rtol=1e-4,atol=1e-4)
def run_conv_nnet2_classif(shared_fn): # pretend we are training LeNet for MNIST
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论