提交 799714aa authored 作者: Adam Becker's avatar Adam Becker

flake8

上级 933cb859
from __future__ import absolute_import, print_function, division
import os
from string import Template
import pdb
import numpy as np
import theano
from theano import Apply
from theano.tensor import as_tensor_variable
from theano.tensor.sort import TopKOp
from .basic_ops import (GpuKernelBase, Kernel, infer_context_name,
as_gpuarray_variable, gpu_contiguous)
as_gpuarray_variable)
from .opt import register_opt, op_lifter, register_opt2
from .type import GpuArrayType
......@@ -34,6 +32,7 @@ class GpuTopKOp(GpuKernelBase, TopKOp):
'''
__props__ = TopKOp.__props__
def __init__(self, axis=-1, return_values=True, return_indices=False, idx_dtype='int64'):
GpuKernelBase.__init__(self)
TopKOp.__init__(
......@@ -57,8 +56,8 @@ class GpuTopKOp(GpuKernelBase, TopKOp):
# load kernel source
device_type = node.inputs[0].type.context.kind
knames = ['k_topk_dense', 'k_topk_dense_large']
kernel_ext = {b'cuda':'.cu', b'opencl':'.cl'}[device_type]
common_ext = {b'cuda':'.cuh', b'opencl':'.h'}[device_type]
kernel_ext = {b'cuda': '.cu', b'opencl': '.cl'}[device_type]
common_ext = {b'cuda': '.cuh', b'opencl': '.h'}[device_type]
kernel_src = {}
for kname in knames:
with open(os.path.join(
......@@ -294,4 +293,3 @@ def local_gpua_topkop(op, ctx_name, inputs, outputs):
rets = GpuTopKOp(
axis=axis, return_values=rv, return_indices=ri, idx_dtype=op.idx_dtype)(x, k)
return rets
......@@ -233,7 +233,10 @@ if hasattr(np, 'argpartition'):
elif op.return_values:
zi = np.expand_dims(
fn_argmax(x, axis=axis).astype(idx_dtype), axis)
idx2 = tuple(np.arange(s).reshape((s,) + (1,) * (ndim - i - 1)) if i != axis else zi for i, s in enumerate(x.shape))
idx2 = tuple(
np.arange(s).reshape(
(s,) + (1,) * (ndim - i - 1)
) if i != axis else zi for i, s in enumerate(x.shape))
zv = x[idx2]
return zv, zi.astype(idx_dtype)
else:
......@@ -270,7 +273,10 @@ if hasattr(np, 'argpartition'):
return zv
elif op.return_values:
zi = np.argpartition(x, -k, axis=axis)[idx]
idx2 = tuple(np.arange(s).reshape((s,)+(1,)*(ndim-i-1)) if i != axis else zi for i, s in enumerate(x.shape))
idx2 = tuple(
np.arange(s).reshape(
(s,) + (1,) * (ndim - i - 1)
) if i != axis else zi for i, s in enumerate(x.shape))
zv = x[idx2]
return zv, zi.astype(idx_dtype)
else:
......@@ -324,13 +330,7 @@ class TopKOp(theano.Op):
sorted: bool
Defaults to ``False``
If True, the result array would be incremental-sorted. Mutually exclusive with ``sparse``
sparse: bool
Defaults to ``False``
if ``True``, the output array will always have the same shape as input.
The non-top-k values will be replaced by zero.
If True, the result array would be incremental-sorted.
only_top_kth: bool
Defaults to ``False``
......@@ -341,10 +341,14 @@ class TopKOp(theano.Op):
# TODO c_code
__props__ = ('axis', 'return_values', 'return_indices', 'idx_dtype')
def __init__(self, axis=-1, return_indices=False, return_values=True, idx_dtype='int64'):
def __init__(
self,
axis=-1,
return_indices=False,
return_values=True,
idx_dtype='int64'):
assert isinstance(axis, int)
assert return_indices or return_values
self.axis = axis
......@@ -366,8 +370,8 @@ class TopKOp(theano.Op):
if self.return_values:
outs.append(inp.type())
if self.return_indices:
outs.append(
theano.tensor.TensorType(dtype=self.idx_dtype, broadcastable=bcast)())
outs.append(theano.tensor.TensorType(
dtype=self.idx_dtype, broadcastable=bcast)())
return theano.Apply(self, [inp, k], outs)
def perform(self, node, inputs, output_storage):
......@@ -382,12 +386,12 @@ class TopKOp(theano.Op):
elif self.return_values:
pzv = output_storage[0]
pzi = output_storage[1]
pzv[0], pzi[0] = _topk_py_impl(self, x, k, axis, node.outputs[1].dtype)
pzv[0], pzi[0] = _topk_py_impl(
self, x, k, axis, node.outputs[1].dtype)
else:
pzi = output_storage[0]
pzi[0] = _topk_py_impl(self, x, k, axis, node.outputs[0].dtype)
def infer_shape(self, node, inp_shapes):
_check_tensor_is_scalar(node.inputs[1])
shp = list(inp_shapes[0])
......@@ -405,6 +409,7 @@ class TopKOp(theano.Op):
shp = tuple(shp)
return [shp for i in [self.return_values, self.return_indices] if i]
def topk(x, k, axis=-1):
"""
Returns the k-largest elements along an axis.
......@@ -459,7 +464,11 @@ def argtopk(x, k, axis=-1, idx_dtype='int64'):
if axis is None:
x = theano.tensor.flatten(x)
axis = -1
return TopKOp(axis=axis, return_indices=True, return_values=False, idx_dtype=idx_dtype)(x, k)
return TopKOp(
axis=axis,
return_indices=True,
return_values=False,
idx_dtype=idx_dtype)(x, k)
def topk_and_argtopk(x, k, axis=-1, idx_dtype='int64'):
......@@ -473,4 +482,3 @@ def topk_and_argtopk(x, k, axis=-1, idx_dtype='int64'):
x = theano.tensor.flatten(x)
axis = -1
return TopKOp(axis=axis, return_indices=True, idx_dtype=idx_dtype)(x, k)
......@@ -21,10 +21,11 @@ _int_dtypes = (
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64')
def gen_unique_vector(size, dtype):
# generate a randomized vector with unique elements
retval = np.arange(size*3) + np.random.uniform(-1., 1.)
return (retval[np.random.permutation(size)] - size*1.5).astype(dtype)
retval = np.arange(size) * 3. + np.random.uniform(-1., 1.)
return (retval[np.random.permutation(size)] - size * 1.5).astype(dtype)
class Test_sort(unittest.TestCase):
......@@ -270,7 +271,6 @@ class Test_TopK(unittest.TestCase):
assert yival == np.asarray([0], dtype=idx_dtype)
assert np.allclose(xval, yvval)
@utt.parameterized.expand(chain(
product(
(16, 61, 257),
......@@ -475,4 +475,3 @@ class TopKInferShapeTester(utt.InferShapeTester):
xval = gen_unique_vector(size, theano.config.floatX).reshape(shp)
self._compile_and_check(
[x], [yv, yi], [xval], TopKOp)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论