提交 b03d5e01 authored 作者: Nicolas Bouchard's avatar Nicolas Bouchard

Rewrite squeeze and delete SqueezeOp.

上级 3ef4a040
......@@ -209,66 +209,22 @@ def bincount(x, weights=None, minlength=None):
return BinCountOp(minlength=minlength)(x, weights)
class SqueezeOp(theano.Op):
"""Remove single-dimensional entries from the shape of an array.
def squeeze(x):
"""Remove broadcastable dimension from
the shape of an array.
It returns the input array, but with with all or a subset of the
dimensions of length 1 removed. This is always x itself or a view
into x. Wraping of numpy.squeeze.
dimensions with the broadcastable dimensions removed. This is
always `x` itself or a view into `x`.
Parameter:
x -- Input data, tensor variable.
out_nd -- Output number of dimension for this op.
"""
def __init__(self, out_nd):
self.view_map = {0: [0]}
self.out_nd = out_nd
def __eq__(self, other):
return (type(self) == type(other) and
self.out_nd == other.out_nd)
def __hash__(self):
return hash(type(self)) ^ hash(self.out_nd)
def make_node(self, x):
x = basic.as_tensor_variable(x)
out_type = theano.tensor.TensorType(dtype=x.dtype,
broadcastable=[False] * self.out_nd)
return theano.Apply(self, [x], [out_type()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
squeezed = np.squeeze(x)
if squeezed.ndim != self.out_nd:
raise TypeError("The number of dimension specified "
"is different from the one calculated.")
z[0] = squeezed
def grad(self, inputs, outputs_gradients):
out = outputs_gradients[0]
return [out.reshape(inputs[0].shape)]
def __str__(self):
return self.__class__.__name__
def squeeze(x, out_nd):
"""Remove single-dimensional entries from the shape of an array.
It returns the input array, but with with all or a subset of the
dimensions of length 1 removed. This is always x itself or a view
into x. Wraping of numpy.squeeze.
Parameter:
x -- Input data, tensor variable.
out_nd -- Output number of dimension for this op.
:param x: Input data, tensor variable.
:param out_nd: Output number of dimension for this op.
:return: `x` without its broadcastable dimensions.
"""
return SqueezeOp(out_nd=out_nd)(x)
view = x.dimshuffle([i for i in range(x.ndim)
if not x.broadcastable[i]])
return view
class RepeatOp(theano.Op):
......
......@@ -4,7 +4,7 @@ import numpy
import theano
from theano.tests import unittest_tools as utt
from theano.tensor.extra_ops import (BinCountOp, bincount, DiffOp, diff,
SqueezeOp, squeeze, RepeatOp, repeat, Bartlett, bartlett,
squeeze, RepeatOp, repeat, Bartlett, bartlett,
FillDiagonal, fill_diagonal)
from theano import tensor as T
from theano import config, tensor, function
......@@ -142,37 +142,47 @@ class TestDiffOp(utt.InferShapeTester):
utt.verify_grad(DiffOp(n=k), [a], eps=7e-3)
class TestSqueezeOp(utt.InferShapeTester):
class SqueezeTester(utt.InferShapeTester):
shape_list = [(1, 3),
(1, 2, 3),
(1, 5, 1 , 1, 6)]
broadcast_list = [[True, False],
[True, False, False],
[True, False, True, True, False]]
def setUp(self):
super(TestSqueezeOp, self).setUp()
self.op_class = SqueezeOp
self.op = SqueezeOp(out_nd=1)
super(SqueezeTester, self).setUp()
self.op = squeeze
def test_squeezeOp(self):
x = T.matrix('x')
a = np.random.random((1, 50)).astype(config.floatX)
def test_op(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
variable = tensor.TensorType(theano.config.floatX, broadcast)()
f = theano.function([x], squeeze(x, out_nd=1))
assert np.allclose(np.squeeze(a), f(a))
f = theano.function([variable], self.op(variable))
x = T.tensor4('x')
f = theano.function([x], squeeze(x, out_nd=2))
expected = numpy.squeeze(data)
tested = f(data)
a = np.random.random((1, 1, 2, 3)).astype(config.floatX)
assert np.allclose(np.squeeze(a), f(a))
assert numpy.allclose(tested, expected)
a = np.random.random((1, 2, 2, 1)).astype(config.floatX)
assert np.allclose(np.squeeze(a), f(a))
def test_infer_shape(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
variable = tensor.TensorType(theano.config.floatX, broadcast)()
a = np.random.random((4, 1, 2, 1)).astype(config.floatX)
assert np.allclose(np.squeeze(a), f(a))
self._compile_and_check([variable],
[self.op(variable)],
[data],
tensor.DimShuffle)
def test_grad(self):
x = T.tensor4('x')
a = np.random.random((1, 1, 3, 4)).astype(config.floatX)
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
variable = tensor.TensorType(theano.config.floatX, broadcast)()
theano.function([x], T.grad(T.sum(squeeze(x, out_nd=1)), x))
utt.verify_grad(SqueezeOp(out_nd=2), [a])
utt.verify_grad(self.op, [data])
class TestRepeatOp(utt.InferShapeTester):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论