提交 666b86b3 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #4041 from abergeron/fix_buildbot2

Fix the output shape in python to match the C code for DownsampleFactorMaxGradGrad.
...@@ -3,12 +3,23 @@ from . import pool ...@@ -3,12 +3,23 @@ from . import pool
import warnings import warnings
warnings.warn("downsample module has been moved to the pool module.") warnings.warn("downsample module has been moved to the pool module.")
max_pool2D = pool.max_pool2D
max_pool_2d_same_size = pool.max_pool_2d_same_size max_pool_2d_same_size = pool.max_pool_2d_same_size
max_pool_2d = pool.pool_2d max_pool_2d = pool.pool_2d
DownsampleFactorMax = pool.Pool DownsampleFactorMax = pool.Pool
PoolGrad = pool.PoolGrad PoolGrad = pool.PoolGrad
MaxPoolGrad = pool.MaxPoolGrad MaxPoolGrad = pool.MaxPoolGrad
AveragePoolGrad = pool.AveragePoolGrad AveragePoolGrad = pool.AveragePoolGrad
# This is for compatibility with pickled things. It should go away at
# some point.
class DownsampleFactorMaxGrad(object):
def __new__(self, ds, ignore_border, st=None, padding=(0, 0), mode='max'):
if mode == 'max':
return MaxPoolGrad(ds=ds, ignore_border=ignore_border, st=st,
padding=padding)
else:
return AveragePoolGrad(ds=ds, ignore_border=ignore_border, st=st,
padding=padding, mode=mode)
DownsampleFactorMaxGradGrad = pool.DownsampleFactorMaxGradGrad DownsampleFactorMaxGradGrad = pool.DownsampleFactorMaxGradGrad
local_average_pool_grad = pool.local_average_pool_grad
...@@ -15,14 +15,6 @@ import numpy ...@@ -15,14 +15,6 @@ import numpy
import theano import theano
from theano import gof, Op, tensor, Variable, Apply from theano import gof, Op, tensor, Variable, Apply
from theano.tensor.opt import register_canonicalize
def max_pool2D(*args, **kwargs):
import sys
print("DEPRECATION: max_pool2D renamed to pool_2d", file=sys.stderr)
return pool_2d(*args, **kwargs)
def max_pool_2d_same_size(input, patch_size): def max_pool_2d_same_size(input, patch_size):
""" """
...@@ -633,9 +625,8 @@ class PoolGrad(Op): ...@@ -633,9 +625,8 @@ class PoolGrad(Op):
class MaxPoolGrad(PoolGrad): class MaxPoolGrad(PoolGrad):
def __init__(self, ds, ignore_border, st=None, padding=(0, 0)):
def __init__(self, ds, ignore_border, st=None, padding=(0, 0), mode='max'): PoolGrad.__init__(self, ds, ignore_border, st, padding, mode='max')
PoolGrad.__init__(self, ds, ignore_border, st, padding, mode)
def make_node(self, x, maxout, gz): def make_node(self, x, maxout, gz):
# make_node should only be called by the grad function of # make_node should only be called by the grad function of
...@@ -808,16 +799,18 @@ class MaxPoolGrad(PoolGrad): ...@@ -808,16 +799,18 @@ class MaxPoolGrad(PoolGrad):
def c_code_cache_version(self): def c_code_cache_version(self):
return (0, 7) return (0, 7)
DownsampleFactorMaxGrad = MaxPoolGrad
class AveragePoolGrad(PoolGrad): class AveragePoolGrad(PoolGrad):
def __init__(self, ds, ignore_border, st=None, padding=(0, 0),
def __init__(self, ds, ignore_border, st=None, padding=(0, 0), mode='average_inc_pad'): mode='average_inc_pad'):
assert mode in ['sum', 'average_inc_pad', 'average_exc_pad'] assert mode in ['sum', 'average_inc_pad', 'average_exc_pad']
PoolGrad.__init__(self, ds, ignore_border, st, padding, mode) PoolGrad.__init__(self, ds, ignore_border, st, padding, mode)
def make_node(self, x, gz): # There is an extra dummy parameter to match the parameter count
# of MaxPoolGrad. They have to keep the same interface because of
# the DownsampleFactorMaxGrad trick to keep old scripts working
# (see downsample.py for details on this).
def make_node(self, x, gz, dummy=None):
# make_node should only be called by the grad function of # make_node should only be called by the grad function of
# Pool, so these asserts should not fail. # Pool, so these asserts should not fail.
assert isinstance(x, Variable) and x.ndim == 4 assert isinstance(x, Variable) and x.ndim == 4
...@@ -934,8 +927,8 @@ class DownsampleFactorMaxGradGrad(Op): ...@@ -934,8 +927,8 @@ class DownsampleFactorMaxGradGrad(Op):
if len(x.shape) != 4: if len(x.shape) != 4:
raise NotImplementedError( raise NotImplementedError(
'DownsampleFactorMaxGradGrad requires 4D input for now') 'DownsampleFactorMaxGradGrad requires 4D input for now')
if (z[0] is None) or (z[0].shape != x.shape): if (z[0] is None) or (z[0].shape != maxout.shape):
z[0] = numpy.zeros(x.shape, dtype=x.dtype) z[0] = numpy.zeros(maxout.shape, dtype=x.dtype)
ggz = z[0] # grad wrt maxout_grad has the same shape as maxout ggz = z[0] # grad wrt maxout_grad has the same shape as maxout
# number of pooling output rows # number of pooling output rows
pr = ggz.shape[-2] pr = ggz.shape[-2]
...@@ -1060,19 +1053,3 @@ class DownsampleFactorMaxGradGrad(Op): ...@@ -1060,19 +1053,3 @@ class DownsampleFactorMaxGradGrad(Op):
def c_code_cache_version(self): def c_code_cache_version(self):
return (0, 1) return (0, 1)
@register_canonicalize('fast_compile')
@gof.local_optimizer([MaxPoolGrad])
def local_average_pool_grad(node):
# To assure backward compatibility with
# DownsampleFactorMaxGrad
if (not isinstance(node.op, MaxPoolGrad) or node.op.mode not in
['sum', 'average_exc_pad', 'average_inc_pad']):
return False
return [AveragePoolGrad(ds=node.op.ds,
ignore_border=node.op.ignore_border,
st=node.op.st,
padding=node.op.padding,
mode=node.op.mode)(node.inputs[0],
node.inputs[2])]
...@@ -10,8 +10,10 @@ import theano.tensor as tensor ...@@ -10,8 +10,10 @@ import theano.tensor as tensor
from theano.tests import unittest_tools as utt from theano.tests import unittest_tools as utt
from theano.tensor.signal.pool import (Pool, pool_2d, from theano.tensor.signal.pool import (Pool, pool_2d,
MaxPoolGrad, AveragePoolGrad, MaxPoolGrad, AveragePoolGrad,
DownsampleFactorMaxGrad,
max_pool_2d_same_size) max_pool_2d_same_size)
from theano.tensor.signal.downsample import DownsampleFactorMaxGrad
from theano import function from theano import function
...@@ -802,20 +804,17 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -802,20 +804,17 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
padding=(0, 0))(image)], padding=(0, 0))(image)],
[image_val], Pool) [image_val], Pool)
def test_opt_max_to_average(self): def test_DownsampleFactorMaxGrad(self):
im = theano.tensor.tensor4() im = theano.tensor.tensor4()
maxout = theano.tensor.tensor4() maxout = theano.tensor.tensor4()
grad = theano.tensor.tensor4() grad = theano.tensor.tensor4()
compilation_mode = theano.compile.get_default_mode().including(
'local_average_pool_grad')
for mode in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']: for mode in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:
f = theano.function([im, maxout, grad], f = theano.function([im, maxout, grad],
DownsampleFactorMaxGrad(ds=(3, 3), DownsampleFactorMaxGrad(ds=(3, 3),
ignore_border=False, ignore_border=False,
mode=mode)(im, maxout, grad), mode=mode)(im, maxout, grad),
mode=compilation_mode) on_unused_input='ignore')
if mode == 'max': if mode == 'max':
assert any(isinstance(n.op, MaxPoolGrad) assert any(isinstance(n.op, MaxPoolGrad)
......
...@@ -79,7 +79,6 @@ whitelist_flake8 = [ ...@@ -79,7 +79,6 @@ whitelist_flake8 = [
"tensor/tests/test_blas_c.py", "tensor/tests/test_blas_c.py",
"tensor/tests/test_blas_scipy.py", "tensor/tests/test_blas_scipy.py",
"tensor/tests/test_mpi.py", "tensor/tests/test_mpi.py",
"tensor/signal/pool.py",
"tensor/signal/conv.py", "tensor/signal/conv.py",
"tensor/signal/tests/test_conv.py", "tensor/signal/tests/test_conv.py",
"tensor/signal/tests/test_downsample.py", "tensor/signal/tests/test_downsample.py",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论