提交 1765e4e7 authored 作者: Iban Harlouchet's avatar Iban Harlouchet

numpydoc for theano/tensor/subtensor.py

上级 306ee2c8
......@@ -39,6 +39,7 @@ sparse_module_ref = None
class AdvancedIndexingError(TypeError):
"""
Raised when Subtensor is asked to perform advanced indexing.
"""
def __init__(self, *args):
......@@ -52,6 +53,7 @@ class AdvancedIndexingError(TypeError):
def make_constant(args):
"""
Convert python litterals to theano constants in subtensor arguments.
"""
def conv(a):
if a is None:
......@@ -68,13 +70,14 @@ def make_constant(args):
def get_idx_list(inputs, idx_list, get_count=False):
'''
"""
Given a list of inputs to the subtensor and its idx_list reorders
the inputs according to the idx list to get the right values.
If get_counts=True, instead returns the number of inputs consumed
during this process.
'''
"""
# The number of indices
n = len(inputs) - 1
......@@ -102,14 +105,15 @@ def get_idx_list(inputs, idx_list, get_count=False):
def get_canonical_form_slice(theslice, length):
'''
"""
Given a slice [start:stop:step] transform it into a canonical form
that respects the conventions imposed by python and numpy.
In a canonical form a slice is represented by a canonical form slice,
in which 0 <= start <= stop <= length and step > 0, and a flag which says
if the resulting set of numbers needs to be reversed or not.
'''
"""
from theano.tensor import switch, lt, ge, sgn
if isinstance(theslice, slice):
......@@ -252,7 +256,8 @@ def get_canonical_form_slice(theslice, length):
class Subtensor(Op):
"""Return a subtensor view
"""
Return a subtensor view.
The inputs array is the tensor x, followed by scalar integer types.
TODO: WRITEME: how are the scalar integer variables formatted?
......@@ -297,12 +302,15 @@ class Subtensor(Op):
@staticmethod
def collapse(idxs, cond):
"""
Parameters
----------
idxs : a list of indices or slices.
cond : a callable that returns a bool
idxs: a list of indices or slices.
cond: a callable that returns a bool
returns: idxs, with the slices flattened out into a list.
if cond is true for an entry, does not flatten it.
Returns
-------
idxs, with the slices flattened out into a list.
If cond is true for an entry, does not flatten it.
"""
ret = []
......@@ -323,12 +331,14 @@ class Subtensor(Op):
@staticmethod
def convert(entry, slice_ok=True):
"""
Change references to Variables into references to Types.
The "idx_list" field is unique to each Subtensor instance.
It is not unique to each Apply node, so it should not refer to
specific Variables. This method changes references to Variables
into references to Types.
specific Variables.
TODO: WRITEME: This method also accepts "entry" already being a Type;
when would that happen?
"""
invalid_scal_types = [scal.float64, scal.float32, scal.float16]
scal_types = [scal.int64, scal.int32, scal.int16, scal.int8]
......@@ -389,30 +399,33 @@ class Subtensor(Op):
only_process_constants=False):
"""
Return the idx_list with constant inputs replaced by their
python scalar equivalent. May raise
`theano.tensor.NotScalarConstantError` if the idx contains
python scalar equivalent.
May raise `theano.tensor.NotScalarConstantError` if the idx contains
non-constant entries.
If allow_partial is True, then entries that are not constant
will stay as their input variable rather than raising an
exception.
If allow_partial is True, then entries that are not constant will
stay as their input variable rather than raising an exception.
None entries are always left as-is.
Example usage (where v, a are appropriately typed theano variables):
>>> b = a[v, 1:3]
>>> b.owner.op.idx_list
(Scalar(int64), slice(Scalar(int64), Scalar(int64), None))
>>> b.owner.op.get_constant_idx(b.owner.inputs, allow_partial=True)
[v, slice(1, 3, None)]
>>> b.owner.op.get_constant_idx(b.owner.inputs)
NotScalarConstantError: v
Parameters
----------
only_process_constants
If True, we only attempt to obtain the value of an index/slice if
it's directly constant and don't try to dig through dimshuffles,
fills, allocs, and other to figure out its value.
Examples
--------
Example usage where v, a are appropriately typed theano variables :
>>> b = a[v, 1:3]
>>> b.owner.op.idx_list
(Scalar(int64), slice(Scalar(int64), Scalar(int64), None))
>>> b.owner.op.get_constant_idx(b.owner.inputs, allow_partial=True)
[v, slice(1, 3, None)]
>>> b.owner.op.get_constant_idx(b.owner.inputs)
NotScalarConstantError: v
:param only_process_constants: If True, we only attempt to obtain
the value of an index/slice if it's directly constant and don't
try to dig through dimshuffles, fills, allocs, and other to figure
out its value.
"""
real_idx = get_idx_list(inputs, self.idx_list)
......@@ -451,8 +464,13 @@ class Subtensor(Op):
def make_node(self, x, *inputs):
"""
x: the tensor to take a subtensor of
inputs: a list of theano Scalars
Parameters
----------
x
The tensor to take a subtensor of.
inputs
A list of theano Scalars.
"""
x = theano.tensor.as_tensor_variable(x)
inputs = tuple(self.my_as_scalar(a) for a in inputs)
......@@ -607,8 +625,8 @@ class Subtensor(Op):
@staticmethod
def default_helper_c_code_args():
"""
Returns a dictionary of default arguments to
helper_c_code
Returns a dictionary of default arguments to helper_c_code.
"""
return {"c_prefix": "PyArray",
......@@ -622,7 +640,8 @@ class Subtensor(Op):
The parameters c_prefix are there to allow reusing this
function on PyArray and CudaNdarray object.
This fct take as input the x,
This fct take as input the x.
"""
default_args = Subtensor.default_helper_c_code_args()
......@@ -986,16 +1005,25 @@ pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Subtensor),
def set_subtensor(x, y, inplace=False,
tolerate_inplace_aliasing=False):
"""Return x with the given subtensor overwritten by y.
"""
Return x with the given subtensor overwritten by y.
Example: To replicate the numpy expression "r[10:] = 5", type
Parameters
----------
x
Symbolic variable for the lvalue of = operation.
y
Symbolic variable for the rvalue of = operation.
tolerate_inplace_aliasing
See inc_subtensor for documentation.
Examples
--------
To replicate the numpy expression "r[10:] = 5", type
>>> r = ivector()
>>> new_r = set_subtensor(r[10:], 5)
:param x: symbolic variable for the lvalue of = operation
:param y: symbolic variable for the rvalue of = operation
:param tolerate_inplace_aliasing: see inc_subtensor for documentation.
"""
return inc_subtensor(x, y, inplace, set_instead_of_inc=True,
tolerate_inplace_aliasing=tolerate_inplace_aliasing)
......@@ -1003,22 +1031,32 @@ def set_subtensor(x, y, inplace=False,
def inc_subtensor(x, y, inplace=False, set_instead_of_inc=False,
tolerate_inplace_aliasing=False):
"""Return x with the given subtensor incremented by y.
:param x: the symbolic result of a Subtensor operation.
:param y: the amount by which to increment ths subtensor in question
:param inplace: Don't use. Theano will do it when possible.
:param set_instead_of_inc: If True, do a set_subtensor instead.
:param tolerate_inplace_aliasing: allow x and y to be views of a single
underlying array even while working inplace. For correct results,
x and y must not be overlapping views; if they overlap, the result
of this Op will generally be incorrect. This value has no effect if
inplace=False.
Example: To replicate the numpy expression "r[10:] += 5", type
"""
Return x with the given subtensor incremented by y.
Parameters
----------
x
The symbolic result of a Subtensor operation.
y
The amount by which to increment the subtensor in question.
inplace
Don't use. Theano will do it when possible.
set_instead_of_inc
If True, do a set_subtensor instead.
tolerate_inplace_aliasing:
Allow x and y to be views of a single underlying array even while
working inplace. For correct results, x and y must not be overlapping
views; if they overlap, the result of this Op will generally be
incorrect. This value has no effect if inplace=False.
Examples
--------
To replicate the numpy expression "r[10:] += 5", type
>>> r = ivector()
>>> new_r = inc_subtensor(r[10:], 5)
"""
# First of all, y cannot have a higher dimension than x,
# nor have non-broadcastable dimensions where x is broadcastable.
......@@ -1159,7 +1197,8 @@ def inc_subtensor(x, y, inplace=False, set_instead_of_inc=False,
class IncSubtensor(Op):
"""Increment a subtensor.
"""
Increment a subtensor.
This is like numpy's
......@@ -1167,8 +1206,12 @@ class IncSubtensor(Op):
It is used internally to implement the gradient on SubTensor.
:param set_instead_of_inc: if True set the subtensor to the value instead
of incrementing it by that value.
Parameters
----------
set_instead_of_inc
If True set the subtensor to the value instead of incrementing it by
that value.
"""
check_input = False
......@@ -1225,9 +1268,14 @@ class IncSubtensor(Op):
def make_node(self, x, y, *inputs):
"""
x: the tensor to increment
y: the value to increment by
inputs: TODO WRITEME
Parameters
----------
x
The tensor to increment.
y
The value to increment by.
inputs: TODO WRITEME
"""
x, y = map(theano.tensor.as_tensor_variable, [x, y])
if y.ndim > x.ndim:
......@@ -1411,8 +1459,10 @@ class IncSubtensor(Op):
)
def do_type_checking(self, node):
""" Should raise NotImplementedError if c_code does not support
"""
Should raise NotImplementedError if c_code does not support
the types involved in this node.
"""
if not isinstance(node.inputs[0].type, theano.tensor.TensorType):
......@@ -1427,13 +1477,18 @@ class IncSubtensor(Op):
def copy_of_x(self, x):
"""
:param x: a string giving the name of a C variable
pointing to an array
Parameters
----------
x
A string giving the name of a C variable pointing to an array.
Returns
-------
C code expression to make a copy of x.
:return: C code expression to make a copy of x
Base class uses PyArrayObject *, subclasses may override for
different types of arrays.
Base class uses PyArrayObject *, subclasses may override for
different types of arrays.
"""
# Parameters of PyArrary_FromAny are:
# array
......@@ -1448,12 +1503,16 @@ class IncSubtensor(Op):
def make_view_array(self, x, view_ndim):
"""
:param x: a string identifying an array to be viewed
:param view_ndim: a string specifying the number of dimensions
to have in the view
Parameters
----------
x
A string identifying an array to be viewed.
view_ndim
A string specifying the number of dimensions to have in the view.
This doesn't need to actually set up the view with the right indexing;
we'll do that manually later.
This doesn't need to actually set up the view with the
right indexing; we'll do that manually later.
"""
return """Py_INCREF(PyArray_DESCR(%(x)s));
......@@ -1471,22 +1530,35 @@ class IncSubtensor(Op):
""" % locals()
def get_helper_c_code_args(self):
""" Return a dictionary of arguments to pass to helper_c_code."""
"""
Return a dictionary of arguments to pass to helper_c_code.
"""
return Subtensor.default_helper_c_code_args()
def copy_into(self, view, source):
"""
view: string, C code expression for an array
source: string, C code expression for an array
Parameters
----------
view : string
C code expression for an array.
source : string
C code expression for an array.
Returns
-------
Returns a C code expression to copy source into view, and
return 0 on success.
returns a C code expression to copy source into view, and
return 0 on success
"""
return """PyArray_CopyInto(%(view)s, %(source)s)""" % locals()
def add_to_zview(self, name, x, fail):
""" Return C code to add x to zview. Should DECREF zview if the
add fails."""
"""
Return C code to add x to zview. Should DECREF zview if the
add fails.
"""
return """
PyArrayObject * add_rval = (PyArrayObject*)PyNumber_InPlaceAdd(
......@@ -1551,11 +1623,13 @@ class IncSubtensor(Op):
def _sum_grad_over_bcasted_dims(x, gx):
"""Sum of gx over dimensions to reproduce x.broadcastable.
"""
Sum of gx over dimensions to reproduce x.broadcastable.
This is useful to sum gradients over certain dimensions when
x has been broadcasted, and we need to sum the gradient contributions
over all duplications.
"""
if gx.broadcastable != x.broadcastable:
x_dim_added = gx.ndim - x.ndim
......@@ -1592,7 +1666,10 @@ def _sum_grad_over_bcasted_dims(x, gx):
class AdvancedSubtensor1(Op):
"""Implement x[ilist] where ilist is a vector of integers."""
"""
Implement x[ilist] where ilist is a vector of integers.
"""
# sparse_grad doesn't go in here since it only affects the output
# of the grad() method.
__props__ = ()
......@@ -1777,7 +1854,11 @@ advanced_subtensor1 = AdvancedSubtensor1()
class AdvancedIncSubtensor1(Op):
"""Increments a subtensor using advanced slicing (list of index)"""
"""
Increments a subtensor using advanced slicing (list of index).
"""
__props__ = ('inplace', 'set_instead_of_inc')
def __init__(self, inplace=False, set_instead_of_inc=False):
......@@ -1828,13 +1909,18 @@ class AdvancedIncSubtensor1(Op):
def copy_of_x(self, x):
"""
:param x: a string giving the name of a C variable
pointing to an array
Parameters
----------
x: string
Gives the name of a C variable pointing to an array.
Returns
-------
C code expression to make a copy of x.
:return: C code expression to make a copy of x
Base class uses PyArrayObject *, subclasses may override for
different types of arrays.
Base class uses PyArrayObject *, subclasses may override for
different types of arrays.
"""
# Parameters of PyArrary_FromAny are:
# array
......@@ -1994,6 +2080,7 @@ def adv_index_broadcastable_pattern(a, idx):
For this, we make a fake ndarray and a fake idx and call use ask numpy
the output. From this, we find the output broadcast pattern.
"""
def replace_slice(v):
......@@ -2021,8 +2108,11 @@ def adv_index_broadcastable_pattern(a, idx):
class AdvancedSubtensor(Op):
"""Return a subtensor copy, using advanced indexing.
"""
Return a subtensor copy, using advanced indexing.
"""
# Should be used by __getitem__ and __getslice__, as follow:
# AdvancedSubtensor()(self, *args),
# if args contains and advanced indexing pattern
......@@ -2094,13 +2184,16 @@ advanced_subtensor = AdvancedSubtensor()
class AdvancedIncSubtensor(Op):
"""Increments a subtensor using advanced indexing.
"""
Increments a subtensor using advanced indexing.
:note: We need the numpy.inplace_increment() function currently
numpy's PR 326 to be able to make an inplace version of this
op.
Notes
-----
We need the numpy.inplace_increment() function currently
numpy's PR 326 to be able to make an inplace version of this op.
"""
__props__ = ("inplace", "set_instead_of_inc")
def __init__(self, inplace=False, set_instead_of_inc=False):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论