Updated documentation

上级 07d44ce0
"""A simple class to store ndarray data """
"""A simple class to store L{numpy.ndarray} data """
from gof import Result, Op, utils, AbstractFunctionError
import numpy
......@@ -10,26 +10,28 @@ from copy import copy
###########################
class BaseTensor(Result):
"""Result to store numpy.ndarray or equivalent via .data
"""
L{Result} to store L{numpy.ndarray} or equivalent via .data
Attributes:
_dtype - numpy dtype string such as 'int64' or 'float64' (among others)
_broadcastable - tuple of ints in (0,1) saying which dimensions of this
tensor are guaranteed to be 1, and up for broadcasting
@type _dtype: numpy dtype string such as 'int64' or 'float64' (among others)
@type _broadcastable: - tuple of ints in (0,1)
@ivar _broadcastable: which dimensions of this tensor are guaranteed
to be 1, and up for broadcasting.
Properties:
dtype - read-only access to _dtype, which should not be changed
broadcastable - read-only access to _broadcastable, which should not be changed
This class does not implement python operators and has no dependencies
on the Ops that use it.
on the L{Op}s that use it.
"""
def __init__(self, dtype, broadcastable, role=None, name=None):
"""Initialize a Tensor
"""Initialize a L{Tensor}
@todo: Initialize a L{Tensor} or a L{BaseTensor}? -jpt
Note:
This does not actually allocate any data.
@note: This does not actually allocate any data.
"""
# data is not given here. This may seem a bit strange, but when data was
......@@ -56,7 +58,7 @@ class BaseTensor(Result):
# filter
#
def filter(self, arr):
"""cast to an ndarray and ensure arr has correct rank, shape"""
"""cast to an L{numpy.ndarray} and ensure arr has correct rank, shape"""
if not (isinstance(arr, numpy.ndarray) \
and arr.dtype==self.dtype):
arr = numpy.asarray(arr, dtype = self.dtype)
......@@ -80,7 +82,7 @@ class BaseTensor(Result):
"""Return python - C type correspondance tuple for self.data
Return a tuple (python type, c type, numpy typenum) that corresponds to
self.dtype. It is for use in C code generation.
L{self.dtype}. It is for use in C code generation.
"""
#TODO: add more type correspondances for e.g. int32, int64, float32,
#complex64, etc.
......@@ -240,21 +242,20 @@ class BaseTensor(Result):
class BaseTensorOp(Op):
"""
A basic Op subclass that can be used to make Ops that operate on Tensors.
A basic L{Op} subclass that can be used to make L{Op}s that operate on L{Tensor}s.
It is not mandatory to inherit from this class, but it is practical.
BasicTensorOp is parametrized as follows:
* nin: number of inputs
* nout: number of outputs
* out_tensor_class: BaseTensor subclass used to instantiate the outputs
* input_wrapper: returns a Tensor from its argument
* propagate_dtype: returns a list of dtypes corresponding to the
output dtypes from a list of input dtypes (if an input is
not a Tensor, the passed value will be None)
* propagate_broadcastable: returns a list of tuples corresponding to
the output broadcastable flags from the input broadcastable
flags (if an input is not a Tensor, the passed value will be
None).
@ivar nin: number of inputs
@ivar nout: number of outputs
@ivar out_tensor_class: L{BaseTensor} subclass used to instantiate the outputs
- input_wrapper: returns a L{Tensor} from its argument
- propagate_dtype: returns a list of dtypes corresponding to the
output dtypes from a list of input dtypes (if an input is not a
L{Tensor}, the passed value will be None)
- propagate_broadcastable: returns a list of tuples corresponding
to the output broadcastable flags from the input broadcastable flags
(if an input is not a L{Tensor}, the passed value will be None).
"""
nin = -1 # nin == -1 means: arbitrary number of inputs
......@@ -265,7 +266,7 @@ class BaseTensorOp(Op):
@classmethod
def input_wrapper(cls, obj):
"""
Returns a Result from an arbitrary-typed input, if possible.
Returns a L{Result} from an arbitrary-typed input, if possible.
"""
if isinstance(obj, BaseResult):
return obj
......
......@@ -27,14 +27,14 @@ def _mark_indestructible(results):
r.indestructible = True
class Function:
"""An 'executable' compiled from a graph
"""
An 'executable' compiled from a graph
This class is meant to be used as a function: the idea is to use
__call__(*args) and it will compute your graph's function on the args and
return the value(s) corresponding to the output(s).
Attributes
fn - the return value of linker.make_function(False)
@ivar fn: the return value of L{linker.make_function}(False)
Additional Attributes if keep_locals == True
inputs - inputs in the env
......@@ -44,12 +44,11 @@ class Function:
linker - the linker allocated from env
env - The env passed to the linker
REMARK Re: Memory ownership, aliasing, re-use
-------------------------------------------
Note that the objects returned by Function.__call__(self, *args) are owned
@note: B{Re: Memory ownership, aliasing, re-use:}
That the objects returned by L{Function.__call__}(self, *args) are owned
by self, and that in general these outputs might be overwritten (in-place)
by subsequent calls to self.__call__(*args). Why? This behaviour is
necessary for inplace operations to work, and Function's linker might re-use
by subsequent calls to L{self.__call__}(*args). Why? This behaviour is
necessary for inplace operations to work, and L{Function}'s linker might re-use
memory from one execution to the next in order to make each execution faster.
"""
......@@ -61,18 +60,18 @@ class Function:
unpack_single = True,
except_unreachable_input = True,
keep_locals = True):
""" Copy the graph, optimize, and link it.
Parameters:
inputs - a list of results to be this function's inputs
outputs - a list of results to be this function's outputs
features - features to add to the env
optimizer - an optimizer to apply to the copied graph, before linking
linker_cls - a callable that takes an env and returns a Linker
profiler - a Profiler for the produced function (only valid if the
"""
Copy the graph, optimize, and link it.
@param inputs: a list of results to be this function's inputs
@param outputs: a list of results to be this function's outputs
@param features: features to add to the env
@param optimizer: an optimizer to apply to the copied graph, before linking
@param linker_cls: a callable that takes an env and returns a Linker
@param profiler: a L{Profiler} for the produced function (only valid if the
linker_cls's make_function takes a profiler argument)
unpack_single - unpack return value lists of length 1
- see Linker.make_function
keep_locals - add the local variables from __init__ to the class
@param unpack_single: unpack return value lists of length 1. @see: L{Linker.make_function}
@param keep_locals: add the local variables from __init__ to the class
"""
_mark_indestructible(outputs)
......
......@@ -19,7 +19,9 @@ def astensor(data):
##################
class DimShuffle(Op, Viewer):
"""
@todo: DOCUMENTATION? --jpt
"""
def __init__(self, input, new_order, inplace = True):
input = astensor(input)
......@@ -122,6 +124,9 @@ class Transpose(DimShuffle):
#################
class Broadcast(Op, Destroyer):
"""
@todo: DOCUMENTATION? --jpt
"""
def __init__(self, scalar_opclass, inputs, inplace_pattern = {}):
......@@ -356,11 +361,12 @@ class CAReduce(Op):
CAReduce(scalar_op, inputs, dimensions_to_reduce = None, init = None, shortcut = False)
The number of inputs must be the difference between the number of
outputs of scalar_op and its number of inputs. CAReduce holds
outputs of scalar_op and its number of inputs. L{CAReduce} holds
scalar states, the accumulators, in proportion to the number of
outputs of scalar_op and it updates them iteratively:
outputs of scalar_op and it updates them iteratively::
for x, y, ... in input0, input1, ...
scalar_state <- scalar_op(scalar_state, x, y, ...)
scalar_state <- scalar_op(scalar_state, x, y, ...)}
The initial states are init if provided (they must be scalars),
else if there are as many states as inputs, a sample from each
......@@ -373,7 +379,7 @@ class CAReduce(Op):
multiply/and will return 0 at first sight of 0 and 'or' will
return 1 at first sight of 1.
In order to optimize memory usage patterns, CAReduce makes zero
In order to optimize memory usage patterns, L{CAReduce} makes zero
guarantees on the order in which it iterates over the dimensions
and the elements of the array(s). Therefore, to ensure consistent
results, the scalar operation represented by the reduction must be
......
......@@ -118,18 +118,21 @@ def make_loop(loop_orders, dtypes, loop_tasks, sub):
Make a nested loop over several arrays and associate specific code
to each level of nesting.
loop_orders: list of N tuples of length M. Each value of each
@type loop_orders: list of N tuples of length M.
@param loop_orders: Each value of each
tuple can be either the index of a dimension to loop over or
the letter 'x' which means there is no looping to be done
over that variable at that point (in other words we broadcast
over that dimension). If an entry is an integer, it will become
an alias of the entry of that rank.
loop_tasks: list of M+1 pieces of code. The ith loop_task is code
@type loop_tasks: list of M+1 pieces of code.
@param loop_tasks: The ith loop_task is code
to be executed just before going to the next element of the
ith dimension. The last is code to be executed at the very end.
sub: a dictionary that maps 'lv#' to a suitable variable name.
@type sub: a dictionary.
@param sub: Maps 'lv#' to a suitable variable name.
The 'lvi' variable corresponds to the ith element of loop_orders.
"""
......
......@@ -84,7 +84,7 @@ name: Theano
css: white
# The documented project's URL.
url: http://lgcm:8000/testenv/
url: http://lgcm.iro.umontreal.ca:8000/theano/
# HTML code for the project link in the navigation bar. If left
# unspecified, the project link will be generated based on the
......@@ -101,7 +101,8 @@ url: http://lgcm:8000/testenv/
#help: my_helpfile.html
# Whether or not to include a frames-based table of contents.
frames: yes
#frames: yes
frames: no
# Whether each class should be listed in its own section when
# generating LaTeX or PDF output.
......
......@@ -84,7 +84,7 @@ name: Theano
css: white
# The documented project's URL.
url: http://lgcm:8000/testenv/
url: http://lgcm.iro.umontreal.ca:8000/theano/
# HTML code for the project link in the navigation bar. If left
# unspecified, the project link will be generated based on the
......@@ -101,7 +101,8 @@ url: http://lgcm:8000/testenv/
#help: my_helpfile.html
# Whether or not to include a frames-based table of contents.
frames: yes
#frames: yes
frames: no
# Whether each class should be listed in its own section when
# generating LaTeX or PDF output.
......@@ -127,7 +128,7 @@ separate-classes: no
# in the output. Graphs are generated using the Graphviz "dot"
# executable. Graph types include: "classtree", "callgraph",
# "umlclass". Use "all" to include all graph types
graph: all
graph: classtree
# The path to the Graphviz "dot" executable, used to generate
# graphs.
......
......@@ -51,15 +51,15 @@ def compile_dir():
class CodeBlock:
"""
Represents a computation unit composed of:
* declare -> C code that declares variables for use by the computation
* behavior -> C code that performs the computation
* cleanup -> C code that cleans up things allocated or incref-ed in behavior
Represents a computation unit composed of declare, behavior, and cleanup.
@ivar declare: C code that declares variables for use by the computation
@ivar behavior: C code that performs the computation
@ivar cleanup: C code that cleans up things allocated or incref-ed in behavior
"""
def __init__(self, declare, behavior, cleanup, sub):
"""
Initialize a CodeBlock with templatized declare, behavior and cleanup.
Initialize a L{CodeBlock} with templatized declare, behavior and cleanup.
The sub parameter will be used in the other arguments' templates. sub
should contain a key called 'id' that maps to an identifier for this block.
The identifier will be used to determine the failure code and a label
......@@ -83,7 +83,7 @@ def failure_code(sub):
def code_gen(blocks):
"""
From a list of CodeBlock instances, returns a string that executes them
From a list of L{CodeBlock} instances, returns a string that executes them
all in sequence. eg for (decl1, task1, cleanup1) and (decl2, task2, cleanup2)
the returned string will be of the form:
......@@ -114,7 +114,7 @@ def struct_gen(args, struct_builders, blocks, sub):
Generates a struct conforming to the following specifications:
* args -> all of the PyObject* type, stored in the struct
they represent the storage and must be length 1 python lists.
* struct_builders -> list of CodeBlock instances such that
* struct_builders -> list of L{CodeBlock} instances such that
* declarations are in the struct
* behavior is in the constructor
* cleanup is in the destructor
......@@ -302,7 +302,7 @@ def struct_result_codeblocks(result, policies, id, symbol_table, sub):
symbol_table -> a dict that maps results to variable names. It is not read
by this function but a variable name for the result is computed and added
to the table.
sub -> dictionary for use by CodeBlock.
sub -> dictionary for use by L{CodeBlock}.
"""
name = "V%i" % id
......
......@@ -24,12 +24,12 @@ class DestroyHandler(Listener, Constraint, Orderings, Tool):
all of its views have been processed.
Examples:
* (x += 1) + (x += 1) -> fails because the first += makes the second
- (x += 1) + (x += 1) -> fails because the first += makes the second
invalid
* x += transpose_view(x) -> fails because the input that is destroyed
- x += transpose_view(x) -> fails because the input that is destroyed
depends on an input that shares the same data
* (a += b) + (c += a) -> succeeds but we have to do c += a first
* (a += b) + (b += c) + (c += a) -> fails because there's a cyclical
- (a += b) + (c += a) -> succeeds but we have to do c += a first
- (a += b) + (b += c) + (c += a) -> fails because there's a cyclical
dependency (no possible ordering)
This feature allows some optimizations (eg sub += for +) to be applied
......@@ -75,8 +75,8 @@ class DestroyHandler(Listener, Constraint, Orderings, Tool):
def publish(self):
"""
Publishes the following on the env:
* destroyers(r) -> returns all Ops that destroy the result r
* destroy_handler -> self
- destroyers(r) -> returns all L{Op}s that destroy the result r
- destroy_handler -> self
"""
def __destroyers(r):
ret = self.destroyers.get(r, {})
......@@ -89,9 +89,9 @@ class DestroyHandler(Listener, Constraint, Orderings, Tool):
"""
Returns a path from r to the result that it is ultimately
a view of, i.e. path such that:
* path[-1] == r
* path[i] == parent[path[i+1]]
* parent[path[0]] == None
- path[-1] == r
- path[i] == parent[path[i+1]]
- parent[path[0]] == None
"""
path = self.paths.get(r, None)
if path:
......@@ -165,8 +165,9 @@ class DestroyHandler(Listener, Constraint, Orderings, Tool):
Does a depth-first search to find cycles in the graph of
computation given a directed connection from an op to
its __pre__ set.
* seq -> sequence of nodes visited up to now
* r -> current node
@type seq: sequence
@param seq: nodes visited up to now
@param r: current node
If r is found in seq, we have a cycle and it is added to
the set of cycles.
"""
......@@ -198,9 +199,9 @@ class DestroyHandler(Listener, Constraint, Orderings, Tool):
def get_maps(self, op):
"""
Returns vmap, dmap where:
* vmap -> {output : [inputs output is a view of]}
* dmap -> {output : [inputs that are destroyed by the Op
@return: (vmap, dmap) where:
- vmap -> {output : [inputs output is a view of]}
- dmap -> {output : [inputs that are destroyed by the Op
(and presumably returned as that output)]}
"""
try: vmap = op.view_map()
......@@ -398,10 +399,10 @@ class DestroyHandler(Listener, Constraint, Orderings, Tool):
def validate(self):
"""
Raises an InconsistencyError on any of the following conditions:
* Some results are destroyed by more than one Op
* There is a cycle of preconditions
* An Op attempts to destroy an indestructible result.
Raises an L{InconsistencyError} on any of the following conditions:
- Some results are destroyed by more than one L{Op}
- There is a cycle of preconditions
- An L{Op} attempts to destroy an indestructible result.
"""
if self.dups:
raise InconsistencyError("The following values are destroyed more than once: %s" % self.dups)
......@@ -415,9 +416,9 @@ class DestroyHandler(Listener, Constraint, Orderings, Tool):
def orderings(self):
"""
Returns a dict of {op : set(ops that must be computed before it)} according
to DestroyHandler.
to L{DestroyHandler}.
In particular, all the users of a destroyed result have priority over the
op that destroys the result.
L{Op} that destroys the result.
"""
ords = {}
for foundation, destroyers in self.destroyers.items():
......
......@@ -16,7 +16,7 @@ class Feature(object):
def __init__(self, env):
"""
Initializes the Feature's env field to the parameter
Initializes the L{Feature}'s env field to the parameter
provided.
"""
self.env = env
......@@ -24,31 +24,30 @@ class Feature(object):
class Listener(Feature):
"""
When registered by an env, each listener is informed of any op
When registered by an L{Env}, each listener is informed of any L{Op}
entering or leaving the subgraph (which happens at construction
time and whenever there is a replacement).
"""
def on_import(self, op):
"""
This method is called by the env whenever a new op is
This method is called by the L{Env} whenever a new L{Op} is
added to the graph.
"""
raise utils.AbstractFunctionError()
def on_prune(self, op):
"""
This method is called by the env whenever an op is
This method is called by the L{Env} whenever an L{Op} is
removed from the graph.
"""
raise utils.AbstractFunctionError()
def on_rewire(self, clients, r, new_r):
"""
clients -> (op, i) pairs such that op.inputs[i] is new_r
but used to be r
r -> the old result that was used by the ops in clients
new_r -> the new result that is now used by the ops in clients
@param clients: (op, i) pairs such that op.inputs[i] is new_r but used to be r
@param r: the old result that was used by the L{Op}s in clients
@param new_r: the new result that is now used by the L{Op}s in clients
Note that the change from r to new_r is done before this
method is called.
......@@ -58,14 +57,14 @@ class Listener(Feature):
class Constraint(Feature):
"""
When registered by an env, a Constraint can restrict the ops that
can be in the subgraph or restrict the ways ops interact with each
When registered by an L{Env}, a L{Constraint} can restrict the L{Op}s that
can be in the subgraph or restrict the ways L{Op}s interact with each
other.
"""
def validate(self):
"""
Raises an L{InconsistencyError} if the env is currently
Raises an L{InconsistencyError} if the L{Env} is currently
invalid from the perspective of this object.
"""
raise utils.AbstractFunctionError()
......@@ -73,29 +72,29 @@ class Constraint(Feature):
class Orderings(Feature):
"""
When registered by an env, an Orderings object can provide supplemental
When registered by an L{Env}, an L{Orderings} object can provide supplemental
ordering constraints to the subgraph's topological sort.
"""
def orderings(self):
"""
Returns {op: set(ops that must be evaluated before this op), ...}
This is called by env.orderings() and used in env.toposort() but
not in env.io_toposort().
This is called by L{Env.orderings}() and used in L{Env.toposort}() but
not in L{Env.io_toposort}().
"""
raise utils.AbstractFunctionError()
class Tool(Feature):
"""
A Tool can extend the functionality of an env so that, for example,
A L{Tool} can extend the functionality of an L{Env} so that, for example,
optimizations can have access to efficient ways to search the graph.
"""
def publish(self):
"""
This is only called once by the env, when the Tool is added.
Adds methods to env.
This is only called once by the L{Env}, when the L{Tool} is added.
Adds methods to L{Env}.
"""
raise utils.AbstractFunctionError()
......
......@@ -21,7 +21,8 @@ is_op = utils.attr_checker('inputs', 'outputs')
def inputs(o):
"""
o -> list of output Results
@type o: list
@param o: output L{Result}s
Returns the set of inputs necessary to compute the outputs in o
such that input.owner is None.
......@@ -41,11 +42,13 @@ def inputs(o):
def results_and_orphans(i, o, except_unreachable_input=False):
"""
i -> list of input Results
o -> list of output Results
@type i: list
@param i: input L{Result}s
@type o: list
@param o: output L{Result}s
Returns the pair (results, orphans). The former is the set of
Results that are involved in the subgraph that lies between i and
L{Result}s that are involved in the subgraph that lies between i and
o. This includes i, o, orphans(i, o) and all results of all
intermediary steps from i to o. The second element of the returned
pair is orphans(i, o).
......@@ -88,13 +91,15 @@ results_and_orphans.E_unreached = 'there were unreachable inputs'
def ops(i, o):
"""
i -> list of input Results
o -> list of output Results
@type i: list
@param i: input L{Result}s
@type o: list
@param o: output L{Result}s
Returns the set of ops that are contained within the subgraph
that lies between i and o, including the owners of the Results in
that lies between i and o, including the owners of the L{Result}s in
o and intermediary ops between i and o, but not the owners of the
Results in i.
L{Result}s in i.
"""
ops = set()
results, orphans = results_and_orphans(i, o)
......@@ -107,8 +112,10 @@ def ops(i, o):
def results(i, o):
"""
i -> list of input Results
o -> list of output Results
@type i: list
@param i: input L{Result}s
@type o: list
@param o: output L{Result}s
Returns the set of Results that are involved in the subgraph
that lies between i and o. This includes i, o, orphans(i, o)
......@@ -119,8 +126,10 @@ def results(i, o):
def orphans(i, o):
"""
i -> list of input Results
o -> list of output Results
@type i: list
@param i: input L{Result}s
@type o: list
@param o: output L{Result}s
Returns the set of Results which one or more Results in o depend
on but are neither in i nor in the subgraph that lies between
......@@ -133,9 +142,12 @@ def orphans(i, o):
def clone(i, o, copy_inputs = False):
"""
i -> list of input Results
o -> list of output Results
copy_inputs -> if True, the inputs will be copied (defaults to False)
@type i: list
@param i: input L{Result}s
@type o: list
@param o: output L{Result}s
@type copy_inputs: bool
@param copy_inputs: if True, the inputs will be copied (defaults to False)
Copies the subgraph contained between i and o and returns the
outputs of that copy (corresponding to o).
......@@ -146,14 +158,18 @@ def clone(i, o, copy_inputs = False):
def clone_get_equiv(i, o, copy_inputs_and_orphans = False):
"""
i -> list of input Results
o -> list of output Results
copy_inputs_and_orphans -> if True, the inputs and the orphans
will be replaced in the cloned graph by copies available in
the equiv dictionary returned by the function (copy_inputs
defaults to False)
Returns equiv a dictionary mapping each result and op in the
@type i: list
@param i: input L{Result}s
@type o: list
@param o: output L{Result}s
@type copy_inputs_and_orphans: bool
@param copy_inputs_and_orphans: if True, the inputs and the orphans
will be replaced in the cloned graph by copies available
in the equiv dictionary returned by the function
(copy_inputs_and_orphans defaults to False)
@rtype: a dictionary
@return: equiv mapping each L{Result} and L{Op} in the
graph delimited by i and o to a copy (akin to deepcopy's memo).
"""
......@@ -190,14 +206,17 @@ def clone_get_equiv(i, o, copy_inputs_and_orphans = False):
def io_toposort(i, o, orderings = {}):
"""
i -> list of input Results
o -> list of output Results
orderings -> {op: [requirements for op]} (defaults to {})
Returns an ordered list of Ops that belong in the subgraph between
i and o which respects the following constraints:
@type i: list
@param i: input L{Result}s
@type o: list
@param o: output L{Result}s
@param orderings: {op: [requirements for op]} (defaults to {})
@rtype: ordered list
@return: L{Op}s that belong in the subgraph between i and o which
respects the following constraints:
- all inputs in i are assumed to be already computed
- the Ops that compute an Op's inputs must be computed before it
- the L{Op}s that compute an L{Op}'s inputs must be computed before it
- the orderings specified in the optional orderings parameter must be satisfied
Note that this function does not take into account ordering information
......@@ -226,11 +245,15 @@ def as_string(i, o,
leaf_formatter = default_leaf_formatter,
node_formatter = default_node_formatter):
"""
i -> list of input Results
o -> list of output Results
leaf_formatter -> function that takes a result and returns a string to describe it
node_formatter -> function that takes an op and the list of strings corresponding
to its arguments and returns a string to describe it
@type i: list
@param i: input L{Result}s
@type o: list
@param o: output L{Result}s
@type leaf_formatter: function
@param leaf_formatter: takes a L{Result} and returns a string to describe it
@type node_formatter: function
@param node_formatter: takes an L{Op} and the list of strings
corresponding to its arguments and returns a string to describe it
Returns a string representation of the subgraph between i and o. If the same
op is used by several other ops, the first occurrence will be marked as
......
......@@ -12,8 +12,8 @@ def thunk_hook(type, value, trace):
This function is meant to replace excepthook and do some
special work if the exception value has a __thunk_trace__
field. In that case, it retrieves the field, which should
contain a trace as returned by traceback.extract_stack,
and prints it out on stderr.
contain a trace as returned by L{traceback.extract_stack},
and prints it out on L{stderr}.
The normal excepthook is then called.
"""
......@@ -51,10 +51,10 @@ class Linker:
This function must return a triplet (function, input_results, output_results)
where function is a thunk that operates on the returned results. If inplace
is True, the input_results and output_results lists will be the same as the
inputs and outputs of the graph provided to the Linker. Else, independent
inputs and outputs of the graph provided to the L{Linker}. Else, independent
results will be returned.
Example:
Example::
e = x + y
env = Env([x, y], [e])
fn, (new_x, new_y), (new_e, ) = MyLinker(env).make_thunk(inplace)
......@@ -69,12 +69,12 @@ class Linker:
def make_function(self, inplace = False, unpack_single = True, **kwargs):
"""
Returns a function that takes values corresponding to the inputs of the
env used by this Linker and returns values corresponding the the outputs
env used by this L{Linker} and returns values corresponding the the outputs
of that env. If inplace is True, the calculations will operate in the
same storage the env uses, else independent storage will be allocated
for the function.
Example:
Example::
e = x + y
env = Env([x, y], [e])
fn = MyLinker(env).make_function(inplace)
......@@ -111,8 +111,8 @@ class Linker:
class PerformLinker(Linker):
"""
Basic Linker subclass that calls the perform method on each op in
the env in the order given by env.toposort.
Basic L{Linker} subclass that calls the perform method on each L{Op} in
the L{Env} in the order given by L{Env.toposort}.
"""
def __init__(self, env):
......@@ -160,18 +160,18 @@ class Stats:
class Profiler:
"""
Collects performance statistics on a function on a per-op
or per-op-class basis.
Collects performance statistics on a function on a per-L{Op}
or per-L{Op}-class basis.
"""
def __init__(self, ignore = [], by_class = True):
"""
Creates a Profiler. If by_class is True, stats will
be collected for each Op class, adding the totals for
each occurrence of that Op in the computation. If
Creates a L{Profiler}. If by_class is True, stats will
be collected for each L{Op} class, adding the totals for
each occurrence of that L{Op} in the computation. If
by_class is False, each node will be timed individually.
All op classes or ops (depending on the value of by_class)
All L{Op} classes or L{Op}s (depending on the value of by_class)
listed in ignore will not be timed.
"""
self.ignore = ignore
......
"""
Contains the Op class, which is the base interface for all operations
Contains the L{Op} class, which is the base interface for all operations
compatible with gof's graph manipulation routines.
"""
......@@ -104,7 +104,6 @@ class Op(object):
"""
The point of this function is:
1. to save the subclass's __init__ function always having to set the role of the outputs
2. to prevent accidentally re-setting outputs, which would probably be a bug
"""
if not hasattr(self, '_outputs') or self._outputs is None:
......@@ -215,17 +214,17 @@ class Op(object):
def c_validate_update_cleanup(self, inputs, outputs, sub):
"""
Clean up things allocated by c_validate().
Clean up things allocated by L{c_validate}().
"""
raise AbstractFunctionError()
def c_code(self, inputs, outputs, sub):
"""
Returns templated C code that does the computation associated
to this Op. You may assume that input validation and output
to this L{Op}. You may assume that input validation and output
allocation have already been done.
You may use the variable names defined by c_var_names() in
You may use the variable names defined by L{c_var_names}() in
the templates.
"""
raise AbstractFunctionError()
......@@ -257,7 +256,7 @@ class Op(object):
def c_support_code(self):
"""
Return utility code for use by this Op. It may refer to support code
Return utility code for use by this L{Op}. It may refer to support code
defined for its input L{Result}s.
"""
raise AbstractFunctionError()
......
......@@ -10,24 +10,24 @@ import ext
class Optimizer:
"""
An Optimizer can be applied to an env to transform it.
An L{Optimizer} can be applied to an L{Env} to transform it.
It can represent an optimization or in general any kind
of transformation you could apply to an env.
of transformation you could apply to an L{Env}.
"""
def apply(self, env):
"""
Applies the optimization to the provided env. It may
use all the methods defined by the env. If the optimizer
needs to use a certain tool, such as an InstanceFinder,
it should set the __env_require__ field to a list of
what needs to be registered with the Env.
Applies the optimization to the provided L{Env}. It may use all
the methods defined by the L{Env}. If the L{Optimizer} needs
to use a certain tool, such as an L{InstanceFinder}, it should
set the L{__env_require__} field to a list of what needs to be
registered with the L{Env}.
"""
pass
def optimize(self, env):
"""
This is meant as a shortcut to:
This is meant as a shortcut to::
env.satisfy(opt)
opt.apply(env)
"""
......@@ -44,7 +44,7 @@ DummyOpt.__doc__ = "Does nothing."
class SeqOptimizer(Optimizer, list):
"""
Takes a list of Optimizer instances and applies them
Takes a list of L{Optimizer} instances and applies them
sequentially.
"""
......@@ -55,7 +55,7 @@ class SeqOptimizer(Optimizer, list):
def apply(self, env):
"""
Applies each optimizer in self in turn.
Applies each L{Optimizer} in self in turn.
"""
for optimizer in self:
optimizer.optimize(env)
......@@ -70,12 +70,12 @@ class SeqOptimizer(Optimizer, list):
class LocalOptimizer(Optimizer):
"""
Generic Optimizer class that considers local parts of
the env. It must be subclassed and should override the
Generic L{Optimizer} class that considers local parts of
the L{Env}. It must be subclassed and should override the
following two methods:
* candidates(env) -> returns a set of ops that can be
- candidates(env) -> returns a set of ops that can be
optimized
* apply_on_op(env, op) -> for each op in candidates,
- apply_on_op(env, op) -> for each op in candidates,
this function will be called to perform the actual
optimization.
"""
......@@ -105,8 +105,8 @@ class LocalOptimizer(Optimizer):
class OpSpecificOptimizer(LocalOptimizer):
"""
Generic optimizer that applies only to ops of a certain
type. The type in question is accessed through self.opclass.
Generic L{Optimizer} that applies only to ops of a certain
type. The type in question is accessed through L{self.opclass}.
opclass can also be a class variable of the subclass.
"""
......@@ -114,7 +114,7 @@ class OpSpecificOptimizer(LocalOptimizer):
def candidates(self, env):
"""
Returns all instances of self.opclass.
Returns all instances of L{self.opclass}.
"""
return env.get_instances_of(self.opclass)
......@@ -123,7 +123,7 @@ class OpSpecificOptimizer(LocalOptimizer):
class OpSubOptimizer(Optimizer):
"""
Replaces all ops of a certain type by ops of another type that
Replaces all L{Op}s of a certain type by L{Op}s of another type that
take the same inputs as what they are replacing.
e.g. OpSubOptimizer(add, sub) ==> add(div(x, y), add(y, x)) -> sub(div(x, y), sub(y, x))
......@@ -212,7 +212,7 @@ class OpRemover(Optimizer):
class PatternOptimizer(OpSpecificOptimizer):
"""
Replaces all occurrences of the input pattern by the output pattern.
Replaces all occurrences of the input pattern by the output pattern::
input_pattern ::= (OpClass, <sub_pattern1>, <sub_pattern2>, ...)
input_pattern ::= dict(pattern = <input_pattern>,
......
"""
Contains the Result class, which is the base interface for a
value that is the input or the output of an Op.
Contains the L{Result} class, which is the base interface for a
value that is the input or the output of an L{Op}.
"""
......@@ -21,7 +21,7 @@ __all__ = ['Result',
### CLEANUP - DO WE REALLY EVEN THE STATE ANYMORE? ###
class StateError(Exception):
"""The state of the Result is a problem"""
"""The state of the L{Result} is a problem"""
# Result state keywords
......@@ -35,7 +35,7 @@ class Computed : """Memory has been allocated, contents are the owner's output."
############################
class Result(object):
"""Base class for storing Op inputs and outputs
"""Base class for storing L{Op} inputs and outputs
Attributes:
_role - None or (owner, index) #or BrokenLink
......@@ -161,10 +161,10 @@ class Result(object):
"""
Raise an exception if the data is not of an acceptable type.
If a subclass overrides this function, __set_data will use it
If a subclass overrides this function, L{__set_data} will use it
to check that the argument can be used properly. This gives a
subclass the opportunity to ensure that the contents of
self._data remain sensible.
L{self._data} remain sensible.
Returns data or an appropriately wrapped data.
"""
......@@ -185,7 +185,7 @@ class Result(object):
def c_declare(self, name, sub):
"""
Declares variables that will be instantiated by c_data_extract.
Declares variables that will be instantiated by L{c_extract}.
"""
raise AbstractFunctionError()
......@@ -193,18 +193,21 @@ class Result(object):
"""
The code returned from this function must be templated using
"%(name)s", representing the name that the caller wants to
call this Result. The Python object self.data is in a
call this L{Result}. The Python object self.data is in a
variable called "py_%(name)s" and this code must set the
variables declared by c_declare to something representative
of py_%(name)s. If the data is improper, set an appropriate
exception and insert "%(fail)s".
@todo: Point out that template filling (via sub) is now performed
by this function. --jpt
"""
raise AbstractFunctionError()
def c_cleanup(self, name, sub):
"""
This returns C code that should deallocate whatever
c_data_extract allocated or decrease the reference counts. Do
L{c_extract} allocated or decrease the reference counts. Do
not decrease py_%(name)s's reference count.
"""
raise AbstractFunctionError()
......@@ -221,27 +224,27 @@ class Result(object):
def c_compile_args(self):
"""
Return a list of compile args recommended to manipulate this Result.
Return a list of compile args recommended to manipulate this L{Result}.
"""
raise AbstractFunctionError()
def c_headers(self):
"""
Return a list of header files that must be included from C to manipulate
this Result.
this L{Result}.
"""
raise AbstractFunctionError()
def c_libraries(self):
"""
Return a list of libraries to link against to manipulate this Result.
Return a list of libraries to link against to manipulate this L{Result}.
"""
raise AbstractFunctionError()
def c_support_code(self):
"""
Return utility code for use by this Result or Ops manipulating this
Result.
Return utility code for use by this L{Result} or L{Op}s manipulating this
L{Result}.
"""
raise AbstractFunctionError()
......
......@@ -12,7 +12,8 @@ hashgen.next = 0
class OmegaError(Exception): pass
class AbstractFunctionError(Exception):
"""To be raised by functions defined as part of an interface.
"""
To be raised by functions defined as part of an interface.
When the user sees such an error, it is because an important interface
function has been left out of an implementation class.
......@@ -94,9 +95,10 @@ def partial(func, *args, **keywords):
class ClsInit(type):
"""Class initializer for Op subclasses"""
"""Class initializer for L{Op} subclasses"""
def __init__(cls, name, bases, dct):
"""Validate and initialize the Op subclass 'cls'
"""
Validate and initialize the L{Op} subclass 'cls'
This function:
- changes class attributes input_names and output_names to be lists if they are single strings.
......
......@@ -19,20 +19,24 @@ def _pack_result(arg):
return arg
def grad_sources_inputs(sources, graph_inputs):
"""Return a dictionary mapping each result necessary for a source to its gradient
"""
@rtype: dictionary
@return: dictionary mapping each result necessary for a source to its gradient.
sources - a list of gradient sources (explained below)
graph_inputs - a list of results considered to be constant
@type sources: list
@param sources: gradient sources (explained below)
@type graph_inputs: list
@param graph_inputs: results considered to be constant
A gradient source is a pair (r, g_r), in which r is a result, and g_r is a
result that is a gradient wrt r.
This function traverses the graph backward from the 'r' sources,
calling op.grad(...) when it is provided by an op, and at least one of the
outputs of the op has an associated gradient.
calling L{Op.grad}(...) when it is provided by an L{Op}, and at least one of the
outputs of the L{Op} has an associated gradient.
The op.grad(...) functions may be called in several ways (for the
convenience of the op implementer) depending on the number of inputs and
The L{Op.grad}(...) functions may be called in several ways (for the
convenience of the L{Op} implementer) depending on the number of inputs and
outputs.
If there is one input and one output:
......@@ -47,13 +51,13 @@ def grad_sources_inputs(sources, graph_inputs):
If there are multiple inputs and outputs:
op.grad( op.inputs, [grad(o) for o in op.outputs[0]])
This function expects the op.grad(...) function to return the gradient
expression [results] associated with the inputs of the op. If the op has a
single input, it should return a single result; if the op has multiple
This function expects the L{Op.grad}(...) function to return the gradient
expression [results] associated with the inputs of the L{Op}. If the L{Op} has a
single input, it should return a single result; if the L{Op} has multiple
inputs, it should return a list of results corresponding to the gradients in
the same order as the inputs.
For each input wrt to which an op is not differentiable, it should return
For each input wrt to which an L{Op} is not differentiable, it should return
None instead of a result instance.
"""
......
......@@ -64,15 +64,15 @@ inplace_optimizer = InplaceOptimizer()
This variable is used in compile.prog as the optimizer for all programs built
using either compile.single, compile.to_func, and compile.prog.
if 0:
Old code::
if 0:
def optimizer(lst):
begin = gof.SeqOptimizer([])
end = gof.SeqOptimizer([gof.DummyRemover])
seq_opt = gof.SeqOptimizer(begin + lst + end)
return gof.PythonOpt(gof.MergeOptMerge(seq_opt))
if 0:
if 0:
optimizer_begin = gof.SeqOptimizer([opt for name, opt in [
['double_transpose_eliminator', pattern_opt((transpose, (transpose, 'x')), 'x')],
......@@ -88,6 +88,6 @@ if 0:
['add_to_iadd_reverse', pattern_opt((add_elemwise, 'x', 'y'),
(iadd_elemwise, 'y', 'x'))]]])
# ['remove_copies', gof.OpRemover(array_copy)],
# [None, gof.DummyRemover] # has to be at the end
# ['remove_copies', gof.OpRemover(array_copy)],
# [None, gof.DummyRemover] # has to be at the end
"""
......@@ -3,7 +3,7 @@ Classes for handling sparse matrices.
To read about different sparse formats, see U{http://www-users.cs.umn.edu/~saad/software/SPARSKIT/paper.ps}.
@todo Automatic methods for determining best sparse format?
@todo: Automatic methods for determining best sparse format?
"""
import copy #for __copy__
......@@ -199,8 +199,8 @@ class Dot(gof.op.Op):
self.grad_preserves_dense = grad_preserves_dense
def perform(self):
"""
@todo Verify that output is sufficiently sparse, and raise a warning if it is not
@todo Also determine that we are storing the output in the best storage format?
@todo: Verify that output is sufficiently sparse, and raise a warning if it is not
@todo: Also determine that we are storing the output in the best storage format?
"""
self.outputs[0].data = self.inputs[0].data.dot(self.inputs[1].data)
def grad(self, (x, y), (gz,)):
......@@ -216,7 +216,7 @@ class Dot(gof.op.Op):
return self.__class__(new_inputs[0], new_inputs[1], self.grad_preserves_dense)
def dot(x, y, grad_preserves_dense=True):
"""
@todo Maybe the triple-transposition formulation (when x is dense)
@todo: Maybe the triple-transposition formulation (when x is dense)
is slow. See if there is a direct way to do this.
"""
if hasattr(x, 'getnnz'): x = assparse(x)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论