提交 b3a4f1bb authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Import theano.tensor.nnet.conv instead of theano.sandbox.conv (disable warning)

上级 594527d0
......@@ -337,7 +337,7 @@ def local_gpu_softmax(node):
return False
#### Convolution, maxpooling
import theano.sandbox.conv
from theano.tensor.nnet import conv
@register_opt()
@local_optimizer([])
def local_gpu_conv(node):
......@@ -367,12 +367,12 @@ def local_gpu_conv(node):
if node.op == gpu_from_host:
#gpu_from_host(conv) -> gpu_conv(gpu_from_host)
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op, theano.sandbox.conv.ConvOp):
if host_input.owner and isinstance(host_input.owner.op, conv.ConvOp):
gpu_conv = GpuConvOp_from_ConvOp(host_input.owner.op)
img, kern = host_input.owner.inputs
return [gpu_conv(gpu_from_host(img), gpu_from_host(kern))]
if isinstance(node.op, theano.sandbox.conv.ConvOp):
if isinstance(node.op, conv.ConvOp):
#conv(host_from_gpu) -> host_from_gpu(gpu_conv)
img, kern = node.inputs
img_on_gpu = (img.owner and img.owner.op == host_from_gpu)
......
......@@ -6,7 +6,7 @@ from theano.compile.pfunc import pfunc
from theano import tensor
import theano.tensor.nnet
import theano.sandbox.conv
import theano.tensor.nnet.conv as conv
import theano.tensor.signal.downsample as downsample
import numpy
......@@ -132,7 +132,7 @@ def run_conv_nnet1(use_gpu):
y = tensor.fmatrix('y')
lr = tensor.fscalar('lr')
conv_op = theano.sandbox.conv.ConvOp(shape_img[2:], shape_kern[2:], n_kern, n_batch, 1, 1)
conv_op = conv.ConvOp(shape_img[2:], shape_kern[2:], n_kern, n_batch, 1, 1)
conv_op.set_flops()
hid = tensor.tanh(conv_op(x, w)+b.dimshuffle((0,'x','x')))
......@@ -215,8 +215,8 @@ def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST
y = tensor.fmatrix('y')
lr = tensor.fscalar('lr')
conv_op = theano.sandbox.conv.ConvOp(shape_img[2:], shape_kern[2:], n_kern, n_batch, 1, 1)
conv_op1 = theano.sandbox.conv.ConvOp((n_kern,logical_hid_shape[0]/2, logical_hid_shape[1]/2), shape_kern1[2:], n_kern1, n_batch, 1, 1)
conv_op = conv.ConvOp(shape_img[2:], shape_kern[2:], n_kern, n_batch, 1, 1)
conv_op1 = conv.ConvOp((n_kern,logical_hid_shape[0]/2, logical_hid_shape[1]/2), shape_kern1[2:], n_kern1, n_batch, 1, 1)
conv_op.set_flops()
conv_op1.set_flops()
......@@ -299,9 +299,9 @@ def run_conv_nnet2_classif(use_gpu, isize, ksize, n_batch, n_iter,
y = tensor.fmatrix('y')
lr = tensor.fscalar('lr')
conv_op = theano.sandbox.conv.ConvOp(shape_img[2:], shape_kern[2:], n_kern,
n_batch, 1, 1, verbose=verbose, version=version)
conv_op1 = theano.sandbox.conv.ConvOp(
conv_op = conv.ConvOp(shape_img[2:], shape_kern[2:], n_kern,
n_batch, 1, 1, verbose=verbose, version=version)
conv_op1 = conv.ConvOp(
(n_kern,logical_hid_shape[0]/2, logical_hid_shape[1]/2),
shape_kern1[2:], n_kern1, n_batch, 1, 1,verbose=verbose, version=version)
conv_op.set_flops()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论