提交 28b99277 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Remove the bad compat for pickled DownsampleFactorMaxGrad and use a better one.

上级 2d29bb3c
...@@ -808,8 +808,6 @@ class MaxPoolGrad(PoolGrad): ...@@ -808,8 +808,6 @@ class MaxPoolGrad(PoolGrad):
def c_code_cache_version(self): def c_code_cache_version(self):
return (0, 7) return (0, 7)
DownsampleFactorMaxGrad = MaxPoolGrad
class AveragePoolGrad(PoolGrad): class AveragePoolGrad(PoolGrad):
...@@ -817,7 +815,7 @@ class AveragePoolGrad(PoolGrad): ...@@ -817,7 +815,7 @@ class AveragePoolGrad(PoolGrad):
assert mode in ['sum', 'average_inc_pad', 'average_exc_pad'] assert mode in ['sum', 'average_inc_pad', 'average_exc_pad']
PoolGrad.__init__(self, ds, ignore_border, st, padding, mode) PoolGrad.__init__(self, ds, ignore_border, st, padding, mode)
def make_node(self, x, gz): def make_node(self, x, gz, dummy=None):
# make_node should only be called by the grad function of # make_node should only be called by the grad function of
# Pool, so these asserts should not fail. # Pool, so these asserts should not fail.
assert isinstance(x, Variable) and x.ndim == 4 assert isinstance(x, Variable) and x.ndim == 4
...@@ -892,6 +890,18 @@ class AveragePoolGrad(PoolGrad): ...@@ -892,6 +890,18 @@ class AveragePoolGrad(PoolGrad):
st=self.st, padding=self.padding, mode=self.mode)(ggx)] st=self.st, padding=self.padding, mode=self.mode)(ggx)]
# This is for compatibility with pickled things. It should go away at
# some point.
class DownsampleFactorMaxGrad(object):
def __new__(self, ds, ignore_border, st=None, padding=(0, 0), mode='max'):
if mode == 'max':
return MaxPoolGrad(ds=ds, ignore_border=ignore_border, st=st,
padding=padding, mode='max')
else:
return AveragePoolGrad(ds=ds, ignore_border=ignore_border, st=st,
padding=padding, mode=mode)
class DownsampleFactorMaxGradGrad(Op): class DownsampleFactorMaxGradGrad(Op):
__props__ = ('ds', 'ignore_border', 'st', 'padding', 'mode') __props__ = ('ds', 'ignore_border', 'st', 'padding', 'mode')
...@@ -1060,19 +1070,3 @@ class DownsampleFactorMaxGradGrad(Op): ...@@ -1060,19 +1070,3 @@ class DownsampleFactorMaxGradGrad(Op):
def c_code_cache_version(self): def c_code_cache_version(self):
return (0, 1) return (0, 1)
@register_canonicalize('fast_compile')
@gof.local_optimizer([MaxPoolGrad])
def local_average_pool_grad(node):
# To assure backward compatibility with
# DownsampleFactorMaxGrad
if (not isinstance(node.op, MaxPoolGrad) or node.op.mode not in
['sum', 'average_exc_pad', 'average_inc_pad']):
return False
return [AveragePoolGrad(ds=node.op.ds,
ignore_border=node.op.ignore_border,
st=node.op.st,
padding=node.op.padding,
mode=node.op.mode)(node.inputs[0],
node.inputs[2])]
...@@ -802,20 +802,17 @@ class TestDownsampleFactorMax(utt.InferShapeTester): ...@@ -802,20 +802,17 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
padding=(0, 0))(image)], padding=(0, 0))(image)],
[image_val], Pool) [image_val], Pool)
def test_opt_max_to_average(self): def test_DownsampleFactorMaxGrad(self):
im = theano.tensor.tensor4() im = theano.tensor.tensor4()
maxout = theano.tensor.tensor4() maxout = theano.tensor.tensor4()
grad = theano.tensor.tensor4() grad = theano.tensor.tensor4()
compilation_mode = theano.compile.get_default_mode().including(
'local_average_pool_grad')
for mode in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']: for mode in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:
f = theano.function([im, maxout, grad], f = theano.function([im, maxout, grad],
DownsampleFactorMaxGrad(ds=(3, 3), DownsampleFactorMaxGrad(ds=(3, 3),
ignore_border=False, ignore_border=False,
mode=mode)(im, maxout, grad), mode=mode)(im, maxout, grad),
mode=compilation_mode) on_unused_input='ignore')
if mode == 'max': if mode == 'max':
assert any(isinstance(n.op, MaxPoolGrad) assert any(isinstance(n.op, MaxPoolGrad)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论