提交 adf81d30 authored 作者: vdumoulin's avatar vdumoulin

Merge pull request #3679 from shabanian/tensor_signal_pep8

Renamed max_pool_2d to pool_2d and DownsampleFactorMax to Pool
...@@ -12,8 +12,8 @@ from theano.compile import optdb ...@@ -12,8 +12,8 @@ from theano.compile import optdb
from theano.compile.ops import shape_i from theano.compile.ops import shape_i
from theano.tensor.nnet import SoftmaxGrad from theano.tensor.nnet import SoftmaxGrad
from theano.tensor.nnet.abstract_conv import get_conv_output_shape from theano.tensor.nnet.abstract_conv import get_conv_output_shape
from theano.tensor.signal.downsample import ( from theano.tensor.signal.pool import (
DownsampleFactorMax, MaxPoolGrad, AveragePoolGrad) Pool, MaxPoolGrad, AveragePoolGrad)
from theano.sandbox.cuda.type import CudaNdarrayType from theano.sandbox.cuda.type import CudaNdarrayType
from theano.sandbox.cuda import GpuOp from theano.sandbox.cuda import GpuOp
...@@ -2299,11 +2299,11 @@ if True: ...@@ -2299,11 +2299,11 @@ if True:
return [dnn_pool(gpu_contiguous(img), ds, ds)] return [dnn_pool(gpu_contiguous(img), ds, ds)]
@register_opt('cudnn') @register_opt('cudnn')
@local_optimizer([DownsampleFactorMax]) @local_optimizer([Pool])
def local_pool_dnn_alternative(node): def local_pool_dnn_alternative(node):
if not dnn_available(): if not dnn_available():
return return
if isinstance(node.op, DownsampleFactorMax): if isinstance(node.op, Pool):
if not node.op.ignore_border: if not node.op.ignore_border:
return return
img, = node.inputs img, = node.inputs
......
...@@ -137,14 +137,14 @@ register_opt(name='local_gpu_reshape_chain')( ...@@ -137,14 +137,14 @@ register_opt(name='local_gpu_reshape_chain')(
# This is a partial list of CPU ops that can be in some circonstance # This is a partial list of CPU ops that can be in some circonstance
# moved to the GPU. This list is used by an optimization. # moved to the GPU. This list is used by an optimization.
# Hopefully, we can keep this list up to date. # Hopefully, we can keep this list up to date.
import theano.tensor.signal.downsample import theano.tensor.signal.pool
import theano.tensor.nnet.neighbours import theano.tensor.nnet.neighbours
cpu_ops_moved_to_gpu = [ cpu_ops_moved_to_gpu = [
tensor.blas.Dot22, tensor.blas.Dot22Scalar, tensor.blas.Gemm, tensor.blas.Dot22, tensor.blas.Dot22Scalar, tensor.blas.Gemm,
tensor.blas.Gemv, tensor.blas.Ger, tensor.nnet.conv.ConvOp, tensor.blas.Gemv, tensor.blas.Ger, tensor.nnet.conv.ConvOp,
tensor.signal.downsample.DownsampleFactorMax, tensor.signal.pool.Pool,
tensor.signal.downsample.MaxPoolGrad, tensor.signal.pool.MaxPoolGrad,
tensor.signal.downsample.AveragePoolGrad, tensor.signal.pool.AveragePoolGrad,
theano.tensor.nnet.neighbours.Images2Neibs, theano.tensor.nnet.neighbours.Images2Neibs,
tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias, tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias,
tensor.nnet.CrossentropySoftmax1HotWithBiasDx, tensor.nnet.CrossentropySoftmax1HotWithBiasDx,
...@@ -1848,13 +1848,13 @@ gpu_optimizer.register("convtransp3d_gemm", local_convtransp3d_gemm) ...@@ -1848,13 +1848,13 @@ gpu_optimizer.register("convtransp3d_gemm", local_convtransp3d_gemm)
# Pooling # Pooling
import theano.tensor.signal.downsample as downsample import theano.tensor.signal.pool as pool
@register_opt() @register_opt()
@local_optimizer([downsample.DownsampleFactorMax]) @local_optimizer([pool.Pool])
def local_gpu_downsample_factor_max(node): def local_gpu_downsample_factor_max(node):
if (isinstance(node.op, downsample.DownsampleFactorMax) if (isinstance(node.op, pool.Pool)
and node.op.ds == node.op.st): and node.op.ds == node.op.st):
assert node.op.__props__ == ('ds', 'ignore_border', 'st', 'padding', assert node.op.__props__ == ('ds', 'ignore_border', 'st', 'padding',
...@@ -1868,9 +1868,9 @@ def local_gpu_downsample_factor_max(node): ...@@ -1868,9 +1868,9 @@ def local_gpu_downsample_factor_max(node):
@register_opt() @register_opt()
@local_optimizer([downsample.MaxPoolGrad]) @local_optimizer([pool.MaxPoolGrad])
def local_gpu_downsample_factor_max_grad(node): def local_gpu_downsample_factor_max_grad(node):
if (isinstance(node.op, downsample.MaxPoolGrad) and if (isinstance(node.op, pool.MaxPoolGrad) and
node.op.ds == node.op.st): node.op.ds == node.op.st):
assert node.op.__props__ == ('ds', 'ignore_border', 'st', 'padding', assert node.op.__props__ == ('ds', 'ignore_border', 'st', 'padding',
...@@ -1890,9 +1890,9 @@ def local_gpu_downsample_factor_max_grad(node): ...@@ -1890,9 +1890,9 @@ def local_gpu_downsample_factor_max_grad(node):
@register_opt() @register_opt()
@local_optimizer([downsample.DownsampleFactorMaxGradGrad]) @local_optimizer([pool.DownsampleFactorMaxGradGrad])
def local_gpu_downsample_factor_max_grad_grad(node): def local_gpu_downsample_factor_max_grad_grad(node):
if isinstance(node.op, downsample.DownsampleFactorMaxGradGrad): if isinstance(node.op, pool.DownsampleFactorMaxGradGrad):
assert node.op.__props__ == ('ds', 'ignore_border', 'st', assert node.op.__props__ == ('ds', 'ignore_border', 'st',
'padding', 'mode') 'padding', 'mode')
if node.op.padding != (0, 0) or node.op.mode != 'max': if node.op.padding != (0, 0) or node.op.mode != 'max':
......
...@@ -16,7 +16,7 @@ if cuda_ndarray.cuda_available == False: ...@@ -16,7 +16,7 @@ if cuda_ndarray.cuda_available == False:
import theano.sandbox.cuda as tcn import theano.sandbox.cuda as tcn
from theano.tensor.signal.downsample import (DownsampleFactorMax, from theano.tensor.signal.pool import (Pool,
DownsampleFactorMaxGrad, DownsampleFactorMaxGradGrad) DownsampleFactorMaxGrad, DownsampleFactorMaxGradGrad)
import theano.compile.mode import theano.compile.mode
...@@ -280,7 +280,7 @@ class TestBlasStridesGpu(TestBlasStrides): ...@@ -280,7 +280,7 @@ class TestBlasStridesGpu(TestBlasStrides):
if 0: if 0:
# This is commented out because it doesn't make sense... # This is commented out because it doesn't make sense...
# tcn.blas has no op called DownsampleFactorMax # tcn.blas has no op called Pool
# tcn.blas has an op called GpuDownsampleFactorMax, but that op requires arguments that are # tcn.blas has an op called GpuDownsampleFactorMax, but that op requires arguments that are
# CudaNdarrayType variables... so rethink this test? # CudaNdarrayType variables... so rethink this test?
def test_maxpool(): def test_maxpool():
...@@ -290,7 +290,7 @@ if 0: ...@@ -290,7 +290,7 @@ if 0:
[[[[6, 8, 9], [ 16, 18, 19], [ 21, 23, 24]]]])]: [[[[6, 8, 9], [ 16, 18, 19], [ 21, 23, 24]]]])]:
for border, ret in [(True, r_true), (False, r_false)]: for border, ret in [(True, r_true), (False, r_false)]:
ret = numpy.array(ret) ret = numpy.array(ret)
a = tcn.blas.DownsampleFactorMax((2, 2), border) a = tcn.blas.Pool((2, 2), border)
dmatrix4 = tensor.TensorType("float32", (False, False, False, False)) dmatrix4 = tensor.TensorType("float32", (False, False, False, False))
b = dmatrix4() b = dmatrix4()
f = pfunc([b], [a(b)], mode=mode_with_gpu) f = pfunc([b], [a(b)], mode=mode_with_gpu)
...@@ -347,7 +347,7 @@ def test_downsample(): ...@@ -347,7 +347,7 @@ def test_downsample():
continue continue
for ignore_border in (True, False): for ignore_border in (True, False):
# print 'test_downsample', shp, ds, ignore_border # print 'test_downsample', shp, ds, ignore_border
ds_op = DownsampleFactorMax(ds, ignore_border=ignore_border) ds_op = Pool(ds, ignore_border=ignore_border)
a = tcn.shared_constructor(my_rand(*shp), 'a') a = tcn.shared_constructor(my_rand(*shp), 'a')
f = pfunc([], ds_op(tensor.as_tensor_variable(a)), f = pfunc([], ds_op(tensor.as_tensor_variable(a)),
...@@ -357,7 +357,7 @@ def test_downsample(): ...@@ -357,7 +357,7 @@ def test_downsample():
assert any([isinstance(node.op, assert any([isinstance(node.op,
tcn.blas.GpuDownsampleFactorMax) tcn.blas.GpuDownsampleFactorMax)
for node in f.maker.fgraph.toposort()]) for node in f.maker.fgraph.toposort()])
assert any([isinstance(node.op, DownsampleFactorMax) assert any([isinstance(node.op, Pool)
for node in f2.maker.fgraph.toposort()]) for node in f2.maker.fgraph.toposort()])
assert numpy.allclose(f(), f2()) assert numpy.allclose(f(), f2())
......
...@@ -9,8 +9,8 @@ from six import StringIO ...@@ -9,8 +9,8 @@ from six import StringIO
import theano.tensor as T import theano.tensor as T
import theano.tests.unittest_tools as utt import theano.tests.unittest_tools as utt
from theano.sandbox.neighbours import images2neibs from theano.sandbox.neighbours import images2neibs
from theano.tensor.signal.downsample import max_pool_2d from theano.tensor.signal.pool import pool_2d
from theano.tensor.signal.downsample import MaxPoolGrad, AveragePoolGrad from theano.tensor.signal.pool import MaxPoolGrad, AveragePoolGrad
import theano.sandbox.cuda.dnn as dnn import theano.sandbox.cuda.dnn as dnn
from theano.sandbox.cuda.basic_ops import GpuAllocEmpty, gpu_alloc_empty from theano.sandbox.cuda.basic_ops import GpuAllocEmpty, gpu_alloc_empty
from theano.sandbox.cuda import float32_shared_constructor as shared from theano.sandbox.cuda import float32_shared_constructor as shared
...@@ -256,7 +256,7 @@ def test_pooling(): ...@@ -256,7 +256,7 @@ def test_pooling():
# Not implemented # Not implemented
continue continue
# We will check that the opt introduced it. # We will check that the opt introduced it.
out1 = max_pool_2d(x, (ws, ws), out1 = pool_2d(x, (ws, ws),
st=(stride, stride), st=(stride, stride),
ignore_border=True, ignore_border=True,
padding=pad, mode=mode) padding=pad, mode=mode)
...@@ -294,7 +294,7 @@ def test_pooling(): ...@@ -294,7 +294,7 @@ def test_pooling():
# This test the CPU grad + opt + GPU implemtentation # This test the CPU grad + opt + GPU implemtentation
def fn(x): def fn(x):
return max_pool_2d(x, (ws, ws), ignore_border=True, return pool_2d(x, (ws, ws), ignore_border=True,
padding=pad, mode=mode) padding=pad, mode=mode)
theano.tests.unittest_tools.verify_grad(fn, [data], theano.tests.unittest_tools.verify_grad(fn, [data],
cast_to_output_type=False, cast_to_output_type=False,
...@@ -325,7 +325,7 @@ def test_pooling(): ...@@ -325,7 +325,7 @@ def test_pooling():
g_out = fg(data) g_out = fg(data)
# Compare again the CPU result # Compare again the CPU result
out = max_pool_2d(x, (ws, ws), out = pool_2d(x, (ws, ws),
padding=pad, padding=pad,
ignore_border=True, mode=mode) ignore_border=True, mode=mode)
fc = theano.function([x], theano.grad(out.sum(), x), fc = theano.function([x], theano.grad(out.sum(), x),
...@@ -453,7 +453,7 @@ def test_pooling_opt(): ...@@ -453,7 +453,7 @@ def test_pooling_opt():
f = theano.function( f = theano.function(
[x], [x],
max_pool_2d(x, ds=(2, 2), mode='average_inc_pad', ignore_border=True), pool_2d(x, ds=(2, 2), mode='average_inc_pad', ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
assert any([isinstance(n.op, cuda.dnn.GpuDnnPool) assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)
...@@ -463,7 +463,7 @@ def test_pooling_opt(): ...@@ -463,7 +463,7 @@ def test_pooling_opt():
f = theano.function( f = theano.function(
[x], [x],
T.grad(max_pool_2d(x, ds=(2, 2), mode='average_inc_pad', T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad',
ignore_border=True).sum(), x), ignore_border=True).sum(), x),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
...@@ -618,7 +618,7 @@ def test_dnn_tag(): ...@@ -618,7 +618,7 @@ def test_dnn_tag():
try: try:
f = theano.function( f = theano.function(
[x], [x],
max_pool_2d(x, ds=(2, 2), ignore_border=True), pool_2d(x, ds=(2, 2), ignore_border=True),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
except (AssertionError, RuntimeError): except (AssertionError, RuntimeError):
assert not cuda.dnn.dnn_available() assert not cuda.dnn.dnn_available()
......
...@@ -14,7 +14,7 @@ from theano.compile.pfunc import pfunc ...@@ -14,7 +14,7 @@ from theano.compile.pfunc import pfunc
from theano import tensor from theano import tensor
from theano import config from theano import config
import theano.tensor.nnet.conv as conv import theano.tensor.nnet.conv as conv
import theano.tensor.signal.downsample as downsample import theano.tensor.signal.pool as pool
import theano.sandbox.cuda as tcn import theano.sandbox.cuda as tcn
import theano.tests.unittest_tools as utt import theano.tests.unittest_tools as utt
...@@ -372,7 +372,7 @@ def build_conv_nnet2_classif(use_gpu, isize, ksize, n_batch, ...@@ -372,7 +372,7 @@ def build_conv_nnet2_classif(use_gpu, isize, ksize, n_batch,
(n_kern, logical_hid_shape[0] // 2, logical_hid_shape[1] // 2), (n_kern, logical_hid_shape[0] // 2, logical_hid_shape[1] // 2),
shape_kern1[2:], n_kern1, n_batch, 1, 1, verbose=verbose, version=version) shape_kern1[2:], n_kern1, n_batch, 1, 1, verbose=verbose, version=version)
ds_op = downsample.DownsampleFactorMax((2, 2), ignore_border=False) ds_op = pool.Pool((2, 2), ignore_border=False)
if downsample_ops: if downsample_ops:
hid = tensor.tanh(ds_op(conv_op(x, w0) + b0.dimshuffle((0, 'x', 'x')))) hid = tensor.tanh(ds_op(conv_op(x, w0) + b0.dimshuffle((0, 'x', 'x'))))
else: else:
...@@ -612,7 +612,7 @@ def test_lenet_32(): # CIFAR10 / Shapeset ...@@ -612,7 +612,7 @@ def test_lenet_32(): # CIFAR10 / Shapeset
def test_lenet_32_long(): # CIFAR10 / Shapeset def test_lenet_32_long(): # CIFAR10 / Shapeset
# this tests the gradient of downsample on the GPU, # this tests the gradient of pool on the GPU,
# which does not recieve specific testing # which does not recieve specific testing
cmp_run_conv_nnet2_classif(seed, 32, 5, 30, n_train=50, cmp_run_conv_nnet2_classif(seed, 32, 5, 30, n_train=50,
ignore_error=ignore_error, gpu_only=gpu_only, ignore_error=ignore_error, gpu_only=gpu_only,
......
...@@ -17,9 +17,8 @@ from theano.tensor.nnet.abstract_conv import (AbstractConv2d, ...@@ -17,9 +17,8 @@ from theano.tensor.nnet.abstract_conv import (AbstractConv2d,
AbstractConv2d_gradWeights, AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs, AbstractConv2d_gradInputs,
get_conv_output_shape) get_conv_output_shape)
from theano.tensor.signal.downsample import (DownsampleFactorMax, from theano.tensor.signal.pool import (
MaxPoolGrad, AveragePoolGrad) Pool, MaxPoolGrad, AveragePoolGrad)
from . import pygpu from . import pygpu
from .type import get_context, gpu_context_type, list_contexts, GpuArrayType from .type import get_context, gpu_context_type, list_contexts, GpuArrayType
from .basic_ops import (as_gpuarray_variable, infer_context_name, from .basic_ops import (as_gpuarray_variable, infer_context_name,
...@@ -1383,7 +1382,7 @@ def local_dnn_convi_output_merge(node, *inputs): ...@@ -1383,7 +1382,7 @@ def local_dnn_convi_output_merge(node, *inputs):
@register_opt('cudnn') @register_opt('cudnn')
@op_lifter([DownsampleFactorMax]) @op_lifter([Pool])
def local_pool_dnn_alternative(node, ctx_name): def local_pool_dnn_alternative(node, ctx_name):
if not dnn_available(ctx_name): if not dnn_available(ctx_name):
return return
......
...@@ -9,8 +9,8 @@ from six import StringIO ...@@ -9,8 +9,8 @@ from six import StringIO
import theano.tensor as T import theano.tensor as T
import theano.tests.unittest_tools as utt import theano.tests.unittest_tools as utt
from theano.sandbox.neighbours import images2neibs from theano.sandbox.neighbours import images2neibs
from theano.tensor.signal.downsample import max_pool_2d from theano.tensor.signal.pool import pool_2d
from theano.tensor.signal.downsample import MaxPoolGrad, AveragePoolGrad from theano.tensor.signal.pool import MaxPoolGrad, AveragePoolGrad
from .. import dnn from .. import dnn
from ..basic_ops import GpuAllocEmpty from ..basic_ops import GpuAllocEmpty
...@@ -185,7 +185,7 @@ def test_pooling(): ...@@ -185,7 +185,7 @@ def test_pooling():
# Not implemented # Not implemented
continue continue
# We will check that the opt introduced it. # We will check that the opt introduced it.
out1 = max_pool_2d(x, (ws, ws), out1 = pool_2d(x, (ws, ws),
st=(stride, stride), st=(stride, stride),
ignore_border=True, ignore_border=True,
padding=pad, mode=mode) padding=pad, mode=mode)
...@@ -223,7 +223,7 @@ def test_pooling(): ...@@ -223,7 +223,7 @@ def test_pooling():
# This test the CPU grad + opt + GPU implemtentation # This test the CPU grad + opt + GPU implemtentation
def fn(x): def fn(x):
return max_pool_2d(x, (ws, ws), ignore_border=True, return pool_2d(x, (ws, ws), ignore_border=True,
padding=pad, mode=mode) padding=pad, mode=mode)
utt.verify_grad(fn, [data], utt.verify_grad(fn, [data],
cast_to_output_type=False, cast_to_output_type=False,
...@@ -253,7 +253,7 @@ def test_pooling(): ...@@ -253,7 +253,7 @@ def test_pooling():
g_out = fg(data) g_out = fg(data)
# Compare against the CPU result # Compare against the CPU result
out = max_pool_2d(x, (ws, ws), out = pool_2d(x, (ws, ws),
padding=pad, padding=pad,
ignore_border=True, mode=mode) ignore_border=True, mode=mode)
fc = theano.function([x], theano.grad(out.sum(), x), fc = theano.function([x], theano.grad(out.sum(), x),
...@@ -276,7 +276,7 @@ def test_pooling_opt(): ...@@ -276,7 +276,7 @@ def test_pooling_opt():
f = theano.function( f = theano.function(
[x], [x],
max_pool_2d(x, ds=(2, 2), mode='average_inc_pad', pool_2d(x, ds=(2, 2), mode='average_inc_pad',
ignore_border=True), ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
...@@ -287,7 +287,7 @@ def test_pooling_opt(): ...@@ -287,7 +287,7 @@ def test_pooling_opt():
f = theano.function( f = theano.function(
[x], [x],
T.grad(max_pool_2d(x, ds=(2, 2), mode='average_inc_pad', T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad',
ignore_border=True).sum(), ignore_border=True).sum(),
x), x),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
...@@ -315,7 +315,7 @@ def test_dnn_tag(): ...@@ -315,7 +315,7 @@ def test_dnn_tag():
try: try:
f = theano.function( f = theano.function(
[x], [x],
max_pool_2d(x, ds=(2, 2), ignore_border=True), pool_2d(x, ds=(2, 2), ignore_border=True),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
except (AssertionError, RuntimeError): except (AssertionError, RuntimeError):
assert not dnn.dnn_available(test_ctx_name) assert not dnn.dnn_available(test_ctx_name)
......
...@@ -3,7 +3,7 @@ Contains an Op for convolving input images with a set of filters. This was ...@@ -3,7 +3,7 @@ Contains an Op for convolving input images with a set of filters. This was
developed especially for Convolutional Neural Networks. developed especially for Convolutional Neural Networks.
For related ops, including downsampling and subsampling, see For related ops, including downsampling and subsampling, see
tensor.signal and tensor.signal.downsample. tensor.signal and tensor.signal.pool.
See especially conv2d(). See especially conv2d().
""" """
......
差异被折叠。
差异被折叠。
...@@ -79,7 +79,7 @@ whitelist_flake8 = [ ...@@ -79,7 +79,7 @@ whitelist_flake8 = [
"tensor/tests/test_blas_c.py", "tensor/tests/test_blas_c.py",
"tensor/tests/test_blas_scipy.py", "tensor/tests/test_blas_scipy.py",
"tensor/tests/test_mpi.py", "tensor/tests/test_mpi.py",
"tensor/signal/downsample.py", "tensor/signal/pool.py",
"tensor/signal/conv.py", "tensor/signal/conv.py",
"tensor/signal/tests/test_conv.py", "tensor/signal/tests/test_conv.py",
"tensor/signal/tests/test_downsample.py", "tensor/signal/tests/test_downsample.py",
......
...@@ -21,7 +21,7 @@ import numpy ...@@ -21,7 +21,7 @@ import numpy
from theano.gof import Op, Apply from theano.gof import Op, Apply
from theano.gradient import grad_undefined from theano.gradient import grad_undefined
from theano.tests.unittest_tools import SkipTest from theano.tests.unittest_tools import SkipTest
from theano.tensor.signal.downsample import DownsampleFactorMax from theano.tensor.signal.pool import Pool
from theano.tensor.nnet import conv from theano.tensor.nnet import conv
''' '''
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论