提交 ebf8cbd9 authored 作者: Iban Harlouchet's avatar Iban Harlouchet

Added examples to Unique + some flake8 for tensor/extra_ops.py

上级 4f968fb0
...@@ -4,7 +4,6 @@ import warnings ...@@ -4,7 +4,6 @@ import warnings
import theano import theano
from theano.tensor import basic from theano.tensor import basic
from theano.tensor import nlinalg
from theano import gof, scalar from theano import gof, scalar
from theano.gradient import DisconnectedType from theano.gradient import DisconnectedType
tensor = basic tensor = basic
...@@ -677,14 +676,14 @@ def repeat(x, repeats, axis=None): ...@@ -677,14 +676,14 @@ def repeat(x, repeats, axis=None):
if repeats.ndim == 1: if repeats.ndim == 1:
return RepeatOp(axis=axis)(x, repeats) return RepeatOp(axis=axis)(x, repeats)
else: else:
if axis == None: if axis is None:
axis = 0 axis = 0
x = x.flatten() x = x.flatten()
else: else:
if axis >= x.ndim: if axis >= x.ndim:
raise ValueError('Axis should not exceed x.ndim-1.') raise ValueError('Axis should not exceed x.ndim-1.')
if axis < 0: if axis < 0:
axis = x.ndim+axis axis = x.ndim + axis
shape = [x.shape[i] for i in xrange(x.ndim)] shape = [x.shape[i] for i in xrange(x.ndim)]
...@@ -693,15 +692,15 @@ def repeat(x, repeats, axis=None): ...@@ -693,15 +692,15 @@ def repeat(x, repeats, axis=None):
# allocate space for this intermediate tensor to replicate x # allocate space for this intermediate tensor to replicate x
# along that additional dimension. # along that additional dimension.
shape_ = shape[:] shape_ = shape[:]
shape_.insert(axis+1, repeats) shape_.insert(axis + 1, repeats)
# shape is now the shape of output, where shape[axis] becomes # shape is now the shape of output, where shape[axis] becomes
# shape[axis]*repeats. # shape[axis]*repeats.
shape[axis] = shape[axis]*repeats shape[axis] = shape[axis] * repeats
# dims_ is the dimension of that intermediate tensor. # dims_ is the dimension of that intermediate tensor.
dims_ = list(numpy.arange(x.ndim)) dims_ = list(numpy.arange(x.ndim))
dims_.insert(axis+1, 'x') dims_.insert(axis + 1, 'x')
# After the original tensor is duplicated along the additional # After the original tensor is duplicated along the additional
# dimension, we reshape it to the expected output shape, and # dimension, we reshape it to the expected output shape, and
...@@ -1006,11 +1005,25 @@ def to_one_hot(y, nb_class, dtype=None): ...@@ -1006,11 +1005,25 @@ def to_one_hot(y, nb_class, dtype=None):
1) 1)
return ret return ret
class Unique(theano.Op): class Unique(theano.Op):
""" """
Wraps numpy.unique. Wraps numpy.unique.
This op is not implemented on the GPU. This op is not implemented on the GPU.
Examples
========
>>> import numpy as np
>>> x = theano.tensor.vector()
>>> f = theano.function([x], Unique(True, True, False)(x))
>>> f([1,2.,3,4,3,2,1.])
[array([ 1., 2., 3., 4.]), array([0, 1, 2, 3]), array([0, 1, 2, 3, 2, 1, 0])]
>>> y = theano.tensor.matrix()
>>> g = theano.function([y], Unique(True, True, False)(y))
>>> g([[1, 1, 1.0], (2, 3, 3.0)])
[array([ 1., 2., 3.]), array([0, 3, 4]), array([0, 0, 0, 1, 2, 2])]
""" """
__props__ = ("return_index", "return_inverse", "return_counts") __props__ = ("return_index", "return_inverse", "return_counts")
...@@ -1020,7 +1033,7 @@ class Unique(theano.Op): ...@@ -1020,7 +1033,7 @@ class Unique(theano.Op):
self.return_inverse = return_inverse self.return_inverse = return_inverse
self.return_counts = return_counts self.return_counts = return_counts
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]] numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
if self.return_counts == True and bool(numpy_ver < [1, 9]) : if self.return_counts and bool(numpy_ver < [1, 9]):
raise RuntimeError( raise RuntimeError(
"Numpy version = " + np.__version__ + "Numpy version = " + np.__version__ +
". Option 'return_counts=True' works starting" ". Option 'return_counts=True' works starting"
...@@ -1030,11 +1043,11 @@ class Unique(theano.Op): ...@@ -1030,11 +1043,11 @@ class Unique(theano.Op):
x = basic.as_tensor_variable(x) x = basic.as_tensor_variable(x)
outputs = [basic.TensorType(broadcastable=[False], dtype=x.dtype)()] outputs = [basic.TensorType(broadcastable=[False], dtype=x.dtype)()]
typ = basic.TensorType(broadcastable=[False], dtype='int64') typ = basic.TensorType(broadcastable=[False], dtype='int64')
if self.return_index : if self.return_index:
outputs.append(typ()) outputs.append(typ())
if self.return_inverse : if self.return_inverse:
outputs.append(typ()) outputs.append(typ())
if self.return_counts : if self.return_counts:
outputs.append(typ()) outputs.append(typ())
return theano.Apply(self, [x], outputs) return theano.Apply(self, [x], outputs)
...@@ -1042,26 +1055,26 @@ class Unique(theano.Op): ...@@ -1042,26 +1055,26 @@ class Unique(theano.Op):
x = inputs[0] x = inputs[0]
z = output_storage z = output_storage
param = {} param = {}
if self.return_index : if self.return_index:
param['return_index'] = True param['return_index'] = True
if self.return_inverse : if self.return_inverse:
param['return_inverse'] = True param['return_inverse'] = True
if self.return_counts: if self.return_counts:
param['return_counts'] = True param['return_counts'] = True
outs = np.unique(x,**param) outs = np.unique(x, **param)
if ((not self.return_inverse) and if ((not self.return_inverse) and
(not self.return_index) and (not self.return_index) and
(not self.return_counts)): (not self.return_counts)):
z[0][0]=outs z[0][0] = outs
else : else:
for i in range(len(outs)): for i in range(len(outs)):
z[i][0] = outs[i] z[i][0] = outs[i]
def infer_shape(self, node, i0_shapes): def infer_shape(self, node, i0_shapes):
ret = node.fgraph.shape_feature.default_infer_shape(node, i0_shapes) ret = node.fgraph.shape_feature.default_infer_shape(node, i0_shapes)
if self.return_inverse : if self.return_inverse:
shape = (basic.prod(i0_shapes[0]), ) shape = (basic.prod(i0_shapes[0]), )
if self.return_index : if self.return_index:
ret[2] = shape ret[2] = shape
return ret return ret
ret[1] = shape ret[1] = shape
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论