提交 ebf8cbd9 authored 作者: Iban Harlouchet's avatar Iban Harlouchet

Added examples to Unique + some flake8 for tensor/extra_ops.py

上级 4f968fb0
......@@ -4,7 +4,6 @@ import warnings
import theano
from theano.tensor import basic
from theano.tensor import nlinalg
from theano import gof, scalar
from theano.gradient import DisconnectedType
tensor = basic
......@@ -359,7 +358,7 @@ class BinCountOp(theano.Op):
def make_node(self, x, weights):
warnings.warn((
"Tile op is deprecated, use tile function instead."),
stacklevel=3)
stacklevel=3)
x = basic.as_tensor_variable(x)
......@@ -677,39 +676,39 @@ def repeat(x, repeats, axis=None):
if repeats.ndim == 1:
return RepeatOp(axis=axis)(x, repeats)
else:
if axis == None:
axis = 0
x = x.flatten()
if axis is None:
axis = 0
x = x.flatten()
else:
if axis >= x.ndim:
raise ValueError('Axis should not exceed x.ndim-1.')
if axis < 0:
axis = x.ndim+axis
axis = x.ndim + axis
shape = [x.shape[i] for i in xrange(x.ndim)]
# shape_ is the shape of the intermediate tensor which has
# an additional dimension comparing to x. We use alloc to
# allocate space for this intermediate tensor to replicate x
# along that additional dimension.
shape_ = shape[:]
shape_.insert(axis+1, repeats)
shape_.insert(axis + 1, repeats)
# shape is now the shape of output, where shape[axis] becomes
# shape[axis]*repeats.
shape[axis] = shape[axis]*repeats
shape[axis] = shape[axis] * repeats
# dims_ is the dimension of that intermediate tensor.
# dims_ is the dimension of that intermediate tensor.
dims_ = list(numpy.arange(x.ndim))
dims_.insert(axis+1, 'x')
dims_.insert(axis + 1, 'x')
# After the original tensor is duplicated along the additional
# dimension, we reshape it to the expected output shape, and
# dimension, we reshape it to the expected output shape, and
# return the output z.
z = tensor.alloc(x.dimshuffle(*dims_), *shape_).reshape(shape)
z = tensor.alloc(x.dimshuffle(*dims_), *shape_).reshape(shape)
return z
class Bartlett(gof.Op):
# See function bartlett for docstring
def __eq__(self, other):
......@@ -1006,62 +1005,76 @@ def to_one_hot(y, nb_class, dtype=None):
1)
return ret
class Unique(theano.Op):
"""
Wraps numpy.unique.
This op is not implemented on the GPU.
"""
This op is not implemented on the GPU.
Examples
========
>>> import numpy as np
>>> x = theano.tensor.vector()
>>> f = theano.function([x], Unique(True, True, False)(x))
>>> f([1,2.,3,4,3,2,1.])
[array([ 1., 2., 3., 4.]), array([0, 1, 2, 3]), array([0, 1, 2, 3, 2, 1, 0])]
>>> y = theano.tensor.matrix()
>>> g = theano.function([y], Unique(True, True, False)(y))
>>> g([[1, 1, 1.0], (2, 3, 3.0)])
[array([ 1., 2., 3.]), array([0, 3, 4]), array([0, 0, 0, 1, 2, 2])]
"""
__props__ = ("return_index", "return_inverse", "return_counts")
def __init__(self, return_index=False, return_inverse=False,
def __init__(self, return_index=False, return_inverse=False,
return_counts=False):
self.return_index = return_index
self.return_inverse = return_inverse
self.return_counts = return_counts
self.return_counts = return_counts
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
if self.return_counts == True and bool(numpy_ver < [1, 9]) :
if self.return_counts and bool(numpy_ver < [1, 9]):
raise RuntimeError(
"Numpy version = " + np.__version__ +
". Option 'return_counts=True' works starting"
" from version 1.9.0.")
def make_node(self, x):
x = basic.as_tensor_variable(x)
x = basic.as_tensor_variable(x)
outputs = [basic.TensorType(broadcastable=[False], dtype=x.dtype)()]
typ = basic.TensorType(broadcastable=[False], dtype='int64')
if self.return_index :
if self.return_index:
outputs.append(typ())
if self.return_inverse:
outputs.append(typ())
if self.return_inverse :
if self.return_counts:
outputs.append(typ())
if self.return_counts :
outputs.append(typ())
return theano.Apply(self, [x], outputs)
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage
param = {}
if self.return_index :
param = {}
if self.return_index:
param['return_index'] = True
if self.return_inverse :
if self.return_inverse:
param['return_inverse'] = True
if self.return_counts:
param['return_counts'] = True
outs = np.unique(x,**param)
outs = np.unique(x, **param)
if ((not self.return_inverse) and
(not self.return_index) and
(not self.return_counts)):
z[0][0]=outs
else :
for i in range(len(outs)):
z[0][0] = outs
else:
for i in range(len(outs)):
z[i][0] = outs[i]
def infer_shape(self, node, i0_shapes):
ret = node.fgraph.shape_feature.default_infer_shape(node, i0_shapes)
if self.return_inverse :
if self.return_inverse:
shape = (basic.prod(i0_shapes[0]), )
if self.return_index :
if self.return_index:
ret[2] = shape
return ret
ret[1] = shape
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论