提交 a3758920 authored 作者: James Bergstra's avatar James Bergstra

merge

......@@ -156,12 +156,16 @@ def local_gpu_conv(node):
conv(host_from_gpu) -> host_from_gpu(conv)
"""
def GpuConvOp_from_ConvOp(op):
return GpuConv(border_mode=op.out_mode,
ret = GpuConv(border_mode=op.out_mode,
subsample=(op.dx, op.dy),
logical_img_hw=op.imshp_logical[1:3],
logical_kern_hw=op.kshp_logical,
logical_kern_align_top=op.kshp_logical_top_aligned
)
#HACK to print the number of MFlops in the profiler output.
if hasattr(op,'flops'):
ret.flops=op.flops
return ret
if node.op == gpu_from_host:
host_input = node.inputs[0]
......
......@@ -16,11 +16,8 @@ logging.getLogger('theano.gradient').setLevel(logging.INFO)
def get_mode():
return None if theano.compile.default_mode != "PROFILE_MODE" else theano.compile.ProfileMode()
def print_mode(mode):
try:
if mode != None:
mode.print_summary()
except:
pass
if mode != None and isinstance(mode,(theano.compile.ProfileMode,)):
mode.print_summary()
def run_nnet(use_gpu):
#n_batch = 16
......@@ -159,8 +156,8 @@ def run_conv_nnet2(shared_fn): # pretend we are training LeNet for MNIST
n_train=30
logical_hid_shape = tcn.blas.GpuConv.logical_output_shape_2d((32, 32), (5, 5), 'valid')
logical_hid_shape1 = tcn.blas.GpuConv.logical_output_shape_2d((logical_hid_shape[0]/2, logical_hid_shape[1]/2), (5, 5), 'valid')
logical_hid_shape = tcn.blas.GpuConv.logical_output_shape_2d(tuple(shape_img[2:]),tuple(shape_kern[2:]), 'valid')
logical_hid_shape1 = tcn.blas.GpuConv.logical_output_shape_2d((logical_hid_shape[0]/2, logical_hid_shape[1]/2), tuple(shape_kern1[2:]), 'valid')
n_hid = n_kern1 * logical_hid_shape1[0] * logical_hid_shape1[1]
n_out = 10
......@@ -177,6 +174,9 @@ def run_conv_nnet2(shared_fn): # pretend we are training LeNet for MNIST
conv_op = theano.sandbox.conv.ConvOp(shape_img[2:], shape_kern[2:], n_kern, n_batch, 1, 1)
conv_op1 = theano.sandbox.conv.ConvOp((n_kern,logical_hid_shape[0]/2, logical_hid_shape[1]/2), shape_kern1[2:], n_kern1, n_batch, 1, 1)
conv_op.set_flops()
conv_op1.set_flops()
hid = tensor.tanh(conv_op(x, w0)+b0)
hid1 = tensor.tanh(conv_op1(hid[:,:,::2,::2], w1) + b1)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论