提交 9973e963 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #3009 from harlouci/trunk

Class Unique
import numpy as np
import numpy
import warnings
import theano
import theano
from theano.tensor import basic
from theano.tensor import nlinalg
from theano import gof, scalar
from theano.gradient import DisconnectedType
tensor = basic
......@@ -1006,3 +1005,65 @@ def to_one_hot(y, nb_class, dtype=None):
ret = theano.tensor.set_subtensor(ret[theano.tensor.arange(y.shape[0]), y],
1)
return ret
class Unique(theano.Op):
"""
Wraps numpy.unique.
This op is not implemented on the GPU.
"""
__props__ = ("return_index", "return_inverse", "return_counts")
def __init__(self, return_index=False, return_inverse=False,
return_counts=False):
self.return_index = return_index
self.return_inverse = return_inverse
self.return_counts = return_counts
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
if self.return_counts == True and bool(numpy_ver < [1, 9]) :
raise RuntimeError(
"Numpy version = " + np.__version__ +
". Option 'return_counts=True' works starting"
" from version 1.9.0.")
def make_node(self, x):
x = basic.as_tensor_variable(x)
outputs = [basic.TensorType(broadcastable=[False], dtype=x.dtype)()]
typ = basic.TensorType(broadcastable=[False], dtype='int64')
if self.return_index :
outputs.append(typ())
if self.return_inverse :
outputs.append(typ())
if self.return_counts :
outputs.append(typ())
return theano.Apply(self, [x], outputs)
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage
param = {}
if self.return_index :
param['return_index'] = True
if self.return_inverse :
param['return_inverse'] = True
if self.return_counts:
param['return_counts'] = True
outs = np.unique(x,**param)
if ((not self.return_inverse) and
(not self.return_index) and
(not self.return_counts)):
z[0][0]=outs
else :
for i in range(len(outs)):
z[i][0] = outs[i]
def infer_shape(self, node, i0_shapes):
ret = node.fgraph.shape_feature.default_infer_shape(node, i0_shapes)
if self.return_inverse :
shape = (basic.prod(i0_shapes[0]), )
if self.return_index :
ret[2] = shape
return ret
ret[1] = shape
return ret
return ret
......@@ -5,14 +5,13 @@ import unittest
import theano
from theano.tests import unittest_tools as utt
from theano.tensor.extra_ops import (CumsumOp, cumsum, CumprodOp, cumprod,
BinCountOp, bincount, DiffOp, diff,
squeeze, compress, RepeatOp, repeat,
Bartlett, bartlett,
FillDiagonal, fill_diagonal,
FillDiagonalOffset, fill_diagonal_offset,
to_one_hot)
to_one_hot, Unique)
from theano import tensor as T
from theano import config, tensor, function
......@@ -661,3 +660,105 @@ def test_to_one_hot():
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]])
class test_Unique(utt.InferShapeTester):
def setUp(self):
super(test_Unique, self).setUp()
self.op_class = Unique
self.ops = [Unique(),
Unique(True),
Unique(False, True),
Unique(True, True)]
if bool(numpy_ver >= [1, 9]) :
self.ops.extend([
Unique(False, False, True),
Unique(True, False, True),
Unique(False, True, True),
Unique(True, True, True)])
def test_basic_vector(self):
"""
Basic test for a vector.
Done by using the op and checking that it returns the right answer.
"""
x = theano.tensor.vector()
inp = np.asarray([2,1,3,2], dtype=config.floatX)
list_outs_expected = [[np.unique(inp)],
np.unique(inp, True),
np.unique(inp, False, True),
np.unique(inp, True, True)]
if bool(numpy_ver >= [1, 9]) :
list_outs_expected.extend([
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)])
for op, outs_expected in zip(self.ops, list_outs_expected) :
f = theano.function(inputs=[x], outputs=op(x, return_list=True))
outs = f(inp)
# Compare the result computed to the expected value.
for out, out_exp in zip(outs, outs_expected):
utt.assert_allclose(out, out_exp)
def test_basic_matrix(self):
""" Basic test for a matrix.
Done by using the op and checking that it returns the right answer.
"""
x = theano.tensor.matrix()
inp = np.asarray([[2, 1], [3, 2], [2, 3]], dtype=config.floatX)
list_outs_expected = [[np.unique(inp)],
np.unique(inp, True),
np.unique(inp, False, True),
np.unique(inp, True, True)]
if bool(numpy_ver >= [1, 9]) :
list_outs_expected.extend([
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)])
for op, outs_expected in zip(self.ops, list_outs_expected):
f = theano.function(inputs=[x], outputs=op(x, return_list=True))
outs = f(inp)
# Compare the result computed to the expected value.
for out, out_exp in zip(outs, outs_expected):
utt.assert_allclose(out, out_exp)
def test_infer_shape_vector(self):
"""
Testing the infer_shape with a vector.
"""
x = theano.tensor.vector()
for op in self.ops:
if not op.return_inverse:
continue
if op.return_index :
f = op(x)[2]
else:
f = op(x)[1]
self._compile_and_check([x],
[f],
[np.asarray(np.array([2,1,3,2]),
dtype=config.floatX)],
self.op_class)
def test_infer_shape_matrix(self):
"""
Testing the infer_shape with a matrix.
"""
x = theano.tensor.matrix()
for op in self.ops:
if not op.return_inverse:
continue
if op.return_index :
f = op(x)[2]
else:
f = op(x)[1]
self._compile_and_check([x],
[f],
[np.asarray(np.array([[2, 1], [3, 2],[2, 3]]),
dtype=config.floatX)],
self.op_class)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论