提交 32136eb7 authored 作者: Olivier Breuleux's avatar Olivier Breuleux

merge

......@@ -27,6 +27,7 @@ __docformat__ = "restructuredtext en"
from gof import \
CLinker, OpWiseCLinker, DualLinker, Linker, LocalLinker, PerformLinker, Profiler, \
Container, \
InconsistencyError, Env, \
Apply, Result, Constant, Value, \
Op, \
......@@ -35,7 +36,12 @@ from gof import \
Type, Generic, generic, \
object2, utils
from compile import function, eval_outputs, fast_compute, OpFromGraph
from compile import \
SymbolicInput, SymbolicInputKit, In, \
SymbolicOutput, Out, \
Mode, \
predefined_modes, predefined_linkers, predefined_optimizers, \
FunctionMaker, function, OpFromGraph #, eval_outputs, fast_compute
import tensor
import tensor_random
......
差异被折叠。
......@@ -8,6 +8,10 @@ from sparse import _is_dense, _is_sparse, _is_dense_result, _is_sparse_result
from sparse import _mtypes, _mtype_to_str
import random
import gof
def eval_outputs(outputs):
return compile.function([], outputs)()[0]
class T_transpose(unittest.TestCase):
def setUp(self):
......@@ -23,7 +27,7 @@ class T_transpose(unittest.TestCase):
self.failUnless(ta.type.dtype == 'float64', ta.type.dtype)
self.failUnless(ta.type.format == 'csr', ta.type.format)
vta = compile.eval_outputs([ta])
vta = eval_outputs([ta])
self.failUnless(vta.shape == (3,5))
def test_transpose_csr(self):
a = as_sparse(sparse.csr_matrix(sparse.speye(5,3)))
......@@ -34,7 +38,7 @@ class T_transpose(unittest.TestCase):
self.failUnless(ta.type.dtype == 'float64', ta.type.dtype)
self.failUnless(ta.type.format == 'csc', ta.type.format)
vta = compile.eval_outputs([ta])
vta = eval_outputs([ta])
self.failUnless(vta.shape == (3,5))
class T_Add(unittest.TestCase):
......@@ -60,7 +64,7 @@ class T_Add(unittest.TestCase):
self.failUnless(apb.type.format == aR.type.format, apb.type.format)
self.failUnless(apb.type.format == bR.type.format, apb.type.format)
val = compile.eval_outputs([apb])
val = eval_outputs([apb])
self.failUnless(val.shape == (3,2))
self.failUnless(numpy.all(val.todense() == (a + b).todense()))
self.failUnless(numpy.all(val.todense() == numpy.array([[1., 2], [3, 4], [5, 6]])))
......@@ -85,7 +89,7 @@ class T_Add(unittest.TestCase):
self.failUnless(apb.type.dtype == aR.type.dtype, apb.type.dtype)
self.failUnless(apb.type.dtype == bR.type.dtype, apb.type.dtype)
val = compile.eval_outputs([apb])
val = eval_outputs([apb])
self.failUnless(val.shape == (3, 2))
self.failUnless(numpy.all(val == (a + b)))
self.failUnless(numpy.all(val == numpy.array([[1., 2], [3, 4], [5, 6]])))
......@@ -110,7 +114,7 @@ class T_Add(unittest.TestCase):
self.failUnless(apb.type.dtype == aR.type.dtype, apb.type.dtype)
self.failUnless(apb.type.dtype == bR.type.dtype, apb.type.dtype)
val = compile.eval_outputs([apb])
val = eval_outputs([apb])
self.failUnless(val.shape == (3, 2))
self.failUnless(numpy.all(val == (a + b)))
self.failUnless(numpy.all(val == numpy.array([[1., 2], [3, 4], [5, 6]])))
......@@ -122,14 +126,14 @@ class T_conversion(unittest.TestCase):
def test0(self):
a = tensor.as_tensor(numpy.random.rand(5))
s = csc_from_dense(a)
val = compile.eval_outputs([s])
val = eval_outputs([s])
self.failUnless(str(val.dtype)=='float64')
self.failUnless(val.format == 'csc')
def test1(self):
a = tensor.as_tensor(numpy.random.rand(5))
s = csr_from_dense(a)
val = compile.eval_outputs([s])
val = eval_outputs([s])
self.failUnless(str(val.dtype)=='float64')
self.failUnless(val.format == 'csr')
......@@ -138,7 +142,7 @@ class T_conversion(unittest.TestCase):
s = t((2,5))
d = dense_from_sparse(s)
s[0,0] = 1.0
val = compile.eval_outputs([d])
val = eval_outputs([d])
self.failUnless(str(val.dtype)=='float64')
self.failUnless(numpy.all(val[0] == [1,0,0,0,0]))
......@@ -159,7 +163,7 @@ class _testCase_dot(unittest.TestCase):
zop = dot(x,xT)
self.failUnless(_is_sparse_result(zop))
z = compile.eval_outputs([zop])
z = eval_outputs([zop])
self.failUnless(_is_sparse(z))
self.failUnless(z.shape == (500,500))
self.failUnless(type(z) is mtype)
......@@ -190,7 +194,7 @@ class _testCase_dot(unittest.TestCase):
zop = dot(x,y)
self.failUnless(_is_sparse_result(zop))
z = compile.eval_outputs([zop])
z = eval_outputs([zop])
self.failUnless(_is_sparse(z))
self.failUnless(z.shape == (500,2))
self.failUnless(type(z) is mtype)
......@@ -227,7 +231,7 @@ class _testCase_dot(unittest.TestCase):
# zop = dot(y, x)
zop = transpose(dot(y, x))
self.failUnless(_is_sparse_result(zop))
z = compile.eval_outputs([zop])
z = eval_outputs([zop])
self.failUnless(_is_sparse(z))
self.failUnless(z.shape == (500,2))
# self.failUnless(type(z) is mtype)
......
差异被折叠。
......@@ -107,11 +107,11 @@ class _test_greedy_distribute(unittest.TestCase):
a, b, c, d, x, y, z = matrices('abcdxyz')
e = (a/z + b/x) * x * z
g = Env([a,b,c,d,x,y,z], [e])
print pprint.pp.process(g.outputs[0])
##print pprint.pp.process(g.outputs[0])
mul_canonizer.optimize(g)
gof.TopoOptimizer(gof.LocalOptGroup(local_fill_cut, local_fill_lift), order = 'out_to_in').optimize(g)
gof.TopoOptimizer(gof.LocalOptGroup(local_greedy_distributor), order = 'out_to_in').optimize(g)
print pprint.pp.process(g.outputs[0])
##print pprint.pp.process(g.outputs[0])
......@@ -131,10 +131,10 @@ class _test_canonize(unittest.TestCase):
# e = x / y / x
e = (x / x) * (y / y)
g = Env([x, y, z, a, b, c, d], [e])
print pprint.pp.process(g.outputs[0])
##print pprint.pp.process(g.outputs[0])
mul_canonizer.optimize(g)
gof.TopoOptimizer(gof.LocalOptGroup(local_fill_cut, local_fill_lift), order = 'out_to_in').optimize(g)
print pprint.pp.process(g.outputs[0])
##print pprint.pp.process(g.outputs[0])
# def test_plusmin(self):
# x, y, z = inputs()
......
## TODO: REDO THESE TESTS
import unittest
from tensor_random import *
......@@ -7,7 +9,7 @@ import compile
def Uniform(s, n):
return NumpyGenerator(s, n, numpy.random.RandomState.uniform)
class T_Random(unittest.TestCase):
class T_Random:#(unittest.TestCase):
def test0(self):
rng = Uniform(12345, 2)
......
差异被折叠。
......@@ -7,6 +7,7 @@ import scalar
from scalar import Scalar
import gof
from gof.python25 import all
from copy import copy
# tensor depends on elemwise to provide definitions for several ops
......@@ -231,6 +232,15 @@ class Elemwise(Op):
else:
self.ufunc = None
def __getstate__(self):
d = copy(self.__dict__)
d.pop('ufunc')
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.ufunc = numpy.frompyfunc(self.scalar_op.impl, self.scalar_op.nin, self.scalar_op.nout)
def make_node(self, *inputs):
"""
If the inputs have different number of dimensions, their shape
......
......@@ -12,7 +12,7 @@ from graph import \
Apply, Result, Constant, Value, view_roots
from link import \
Linker, LocalLinker, PerformLinker, WrapLinker, Profiler
Container, Linker, LocalLinker, PerformLinker, WrapLinker, Profiler
from op import \
Op
......@@ -22,7 +22,8 @@ from opt import \
MergeOptimizer, MergeOptMerge, \
LocalOptimizer, local_optimizer, LocalOptGroup, LocalOpKeyOptGroup, \
OpSub, OpRemove, PatternSub, \
NavigatorOptimizer, TopoOptimizer, OpKeyOptimizer
NavigatorOptimizer, TopoOptimizer, OpKeyOptimizer, \
PureThenInplaceOptimizer
from toolbox import \
Bookkeeper, History, Validator, ReplaceValidate, NodeFinder, PrintListener
......
......@@ -631,8 +631,8 @@ class CLinker(link.Linker):
input_storage,
output_storage)
return thunk, \
[link.Filter(input, storage) for input, storage in zip(self.env.inputs, input_storage)], \
[link.Filter(output, storage, True) for output, storage in zip(self.env.outputs, output_storage)], \
[link.Container(input, storage) for input, storage in zip(self.env.inputs, input_storage)], \
[link.Container(output, storage, True) for output, storage in zip(self.env.outputs, output_storage)], \
error_storage
def make_thunk(self, input_storage = None, output_storage = None):
......@@ -881,8 +881,8 @@ class OpWiseCLinker(link.LocalLinker):
f = link.streamline(env, thunks, order, no_recycling = no_recycling, profiler = profiler)
return f, [link.Filter(input, storage) for input, storage in zip(env.inputs, input_storage)], \
[link.Filter(output, storage, True) for output, storage in zip(env.outputs, output_storage)], \
return f, [link.Container(input, storage) for input, storage in zip(env.inputs, input_storage)], \
[link.Container(output, storage, True) for output, storage in zip(env.outputs, output_storage)], \
thunks, order
......@@ -948,6 +948,7 @@ class DualLinker(link.Linker):
no_recycling = self.no_recycling
_f, i1, o1, thunks1, order1 = link.PerformLinker().accept(env, no_recycling = no_recycling).make_all(**kwargs)
kwargs.pop('input_storage', None)
_f, i2, o2, thunks2, order2 = OpWiseCLinker().accept(env, no_recycling = no_recycling).make_all(**kwargs)
def f():
......
......@@ -184,7 +184,7 @@ class Result(utils.object2):
else:
return str(self.owner.op) + "." + str(self.index)
else:
return "<?>::" + str(self.type)
return "<%s>" % str(self.type)
def __repr__(self):
return str(self)
def clone(self):
......@@ -422,8 +422,6 @@ def clone_get_equiv(i, o, copy_inputs_and_orphans = True):
else:
d[input] = input
for apply in io_toposort(i, o):
for input in apply.inputs:
if input not in d:
......@@ -438,6 +436,10 @@ def clone_get_equiv(i, o, copy_inputs_and_orphans = True):
for output, new_output in zip(apply.outputs, new_apply.outputs):
d[output] = new_output
for output in o:
if output not in d:
d[output] = output.clone()
return d
def general_toposort(r_out, deps, debug_print = False):
......
"""WRITEME"""
import utils
import graph
from type import Type
import sys, traceback
from copy import copy
......@@ -109,27 +110,32 @@ class Linker(object):
return execute
class Filter(object):
"""WRITEME"""
def __init__(self, r, storage, readonly = False, strict = False, trace = ()):
self.r = r
self.type = r.type
class Container(object):
def __init__(self, r, storage, readonly = False, strict = False, name = None):
#self.r = r
if isinstance(r, Type):
self.type = r
else:
self.type = r.type
self.name = name or r.name
self.storage = storage
self.readonly = readonly
self.strict = strict
def __get(self):
return self.storage[0]
def __set(self, value):
if self.readonly:
raise Exception("Cannot set readonly storage: %s" % self.name)
try:
if self.readonly:
raise Exception("Cannot set readonly storage.")
if self.strict:
self.storage[0] = self.type.filter(value, strict = True)
else:
self.storage[0] = self.type.filter(value)
except:
raise_with_op(self.r)
except Exception, e:
e.args = e.args + (self.name,)
raise
data = property(__get, __set)
value = property(__get, __set)
def __str__(self):
return "<" + str(self.storage[0]) + ">"
def __repr__(self):
......@@ -260,8 +266,8 @@ class PerformLinker(LocalLinker):
f = streamline(env, thunks, order, no_recycling = no_recycling, profiler = profiler)
return f, [Filter(input, storage) for input, storage in zip(env.inputs, input_storage)], \
[Filter(output, storage, True) for output, storage in zip(env.outputs, output_storage)], \
return f, [Container(input, storage) for input, storage in zip(env.inputs, input_storage)], \
[Container(output, storage, True) for output, storage in zip(env.outputs, output_storage)], \
thunks, order
......@@ -333,7 +339,9 @@ class WrapLinker(Linker):
def make_thunk(self, **kwargs):
no_recycling = self.no_recycling
make_all = [l.make_all(**kwargs) for l in self.linkers]
make_all = [self.linkers[0].make_all(**kwargs)]
kwargs.pop('input_storage', None)
make_all += [l.make_all(**kwargs) for l in self.linkers[1:]]
fns, input_lists, output_lists, thunk_lists, order_lists \
= zip(*make_all)
......
......@@ -12,6 +12,7 @@ import toolbox
import op
from copy import copy
from collections import deque
import destroyhandler as dh
class Optimizer:
......@@ -61,8 +62,7 @@ class FromFunctionOptimizer(Optimizer):
def __init__(self, fn):
self.apply = fn
def add_requirements(self, env):
"""WRITEME"""
env.extend(gof.toolbox.ReplaceValidate)
env.extend(toolbox.ReplaceValidate())
def optimizer(f):
"""WRITEME"""
......@@ -215,7 +215,7 @@ class FromFunctionLocalOptimizer(LocalOptimizer):
def __init__(self, fn):
self.transform = fn
def add_requirements(self, env):
env.extend(gof.toolbox.ReplaceValidate)
env.extend(toolbox.ReplaceValidate())
def local_optimizer(f):
"""WRITEME"""
......@@ -624,6 +624,21 @@ def check_chain(r, *chain):
############
### Misc ###
############
class PureThenInplaceOptimizer(Optimizer):
def __init__(self, pure, inplace):
self.pure = pure
self.inplace = inplace
def apply(self, env):
self.pure(env)
env.extend(dh.DestroyHandler())
self.inplace(env)
......
......@@ -63,6 +63,9 @@ class CLinkerType(object):
"""
raise AbstractFunctionError()
def c_init(self, name, sub):
raise AbstractFunctionError()
def c_extract(self, name, sub):
"""Required: Return c code to extract a PyObject * instance.
......
......@@ -86,7 +86,7 @@ class Scalar(Type):
return str(self.dtype)
def __repr__(self):
return "Scalar{%s}" % self.dtype
return "Scalar(%s)" % self.dtype
def c_literal(self, data):
if 'complex' in self.dtype:
......@@ -252,16 +252,17 @@ def upcast_out(*types):
return Scalar(dtype = Scalar.upcast(*types)),
def same_out(type):
return type,
def transfer_type(i):
assert type(i) == int
def f(*types):
return types[i],
f.__name__ = "transfer_type_%i" % i
return f
def specific_out(*spec):
def f(*types):
return spec
return f
class transfer_type:
def __init__(self, i):
assert type(i) == int
self.i = i
def __call__(self, *types):
return types[self.i],
class specific_out:
def __init__(self, *spec):
self.spec = spec
def __call__(self, *types):
return self.spec
def int_out(*types):
return int64,
def float_out(*types):
......@@ -283,7 +284,7 @@ class ScalarOp(Op):
self.name = name
if output_types_preference is not None:
if not callable(output_types_preference):
raise TypeError("Expected a callable for the 'output_types_preference' argument to %s." % self.__class__)
raise TypeError("Expected a callable for the 'output_types_preference' argument to %s. (got: %s)" % (self.__class__, output_types_preference))
self.output_types_preference = output_types_preference
def make_node(self, *inputs):
......
......@@ -23,7 +23,6 @@ from gof.python25 import partial
### set up the external interface
from elemwise import Elemwise, DimShuffle, CAReduce, Sum
import tensor_random as random
_constructor_list = []
......@@ -113,7 +112,7 @@ def value(x):
class Tensor(Type):
"""Symbolic `Type` representing a numpy.ndarray value."""
def __init__(self, dtype, broadcastable):
def __init__(self, dtype, broadcastable, name = None):
"""Initialize self.dtype and self.broadcastable.
:Parameters:
......@@ -126,11 +125,13 @@ class Tensor(Type):
must be 1. Secondly, the length of this list is the number of
dimensions that an associated value must have. See
:doc:`broadcasting` for an explanation of how this list is used.
- `name`: str
Optional name for this type.
"""
self.dtype = str(dtype)
self.broadcastable = tuple(broadcastable)
self.dtype_specs() # error checking is done there
self.name = name
def filter(self, data, strict = False):
"""Convert `data` to something which can be associated to a `TensorResult`.
......@@ -206,10 +207,21 @@ class Tensor(Type):
return TensorResult(self, name = name)
def __str__(self):
return "%s(%s)" % (str(self.dtype), str(self.broadcastable))
if self.name:
return self.name
else:
b = self.broadcastable
#bcast = str(self.broadcastable)
bcast = {(): 'scalar',
(False,): 'vector',
(False, True): 'col',
(True, False): 'row',
(False, False): 'matrix'}.get(b, "%iD" % len(b) if not any(b) else str(b))
return "Tensor(%s, %s)" % (str(self.dtype), bcast)
def __repr__(self):
return "Tensor{%s, %s}" % (str(self.dtype), str(self.broadcastable))
return str(self)
#"Tensor{%s, %s}" % (str(self.dtype), str(self.broadcastable))
def c_declare(self, name, sub):
"""Override `CLinkerOp.c_declare` """
......@@ -1305,11 +1317,12 @@ class MakeVector(Op):
def __init__(self, stype):
self.stype = stype
def make_node(self, *inputs):
inputs = map(as_tensor, inputs)
assert all(a.type == self.stype for a in inputs)
return Apply(self, inputs, [Tensor(broadcastable = (False,),
dtype = self.stype.dtype)()])
def perform(self, inputs, (out,)):
return numpy.asarray([i[0] for i in inputs])
def perform(self, node, inputs, (out,)):
out[0] = numpy.asarray(inputs)
def grad(self, inputs, (gout,)):
return [None]*len(inputs)
......@@ -1374,6 +1387,16 @@ class Concatenate(Op):
[slice(None)] * (n_dims - axis - 1)] \
for k in range(len(sizes_along_axis))]
def get_vector_length(v):
if isinstance(v, gof.Constant) and v.type.ndim == 1:
return len(v.data)
elif v.owner and isinstance(v.owner.op, MakeVector):
return len(v.owner.inputs)
elif v.owner and v.owner.op == shape:
return v.owner.inputs[0].type.ndim
else:
return None
def concatenate(tensors, axis=0):
"""
Convenience function to concatenate `Tensor`s along the given axis.
......@@ -1395,6 +1418,7 @@ def concatenate(tensors, axis=0):
if not hasattr(concatenate, 'obj'):
concatenate.obj = Concatenate()
return concatenate.obj(axis, *tensors)
>>>>>>> /tmp/tensor.py~other.Lj6QeV
class VerticalStack(Op):
"""
......
......@@ -7,6 +7,7 @@ import tensor as T
import numpy as N
import operator
import itertools
import sys
# Utilities
......@@ -40,8 +41,7 @@ dot_to_gemm = gof.PatternSub((T.dot, 'a', 'b'),
allow_multiple_clients = False)
@gof.optimizer
def insert_inplace_optimizer(self, env):
def _insert_inplace_optimizer(env):
"""
Usage: inplace_optimizer.optimize(env)
......@@ -66,14 +66,16 @@ def insert_inplace_optimizer(self, env):
for candidate_input in candidate_inputs:
inplace_pattern = dict(baseline, **{candidate_output: candidate_input})
try:
new = Elemwise(op.scalar_op, inplace_pattern).make_node(op.inputs)
env.replace_all_validate(dict(zip(node.outputs, new.outputs)))
except:
new = Elemwise(op.scalar_op, inplace_pattern).make_node(*node.inputs)
env.replace_all_validate(zip(node.outputs, new.outputs))
except Exception, e:
continue
candidate_inputs.remove(candidate_input)
node = new
baseline = inplace_pattern
break
insert_inplace_optimizer = gof.optimizer(_insert_inplace_optimizer)
inplace_optimizer = gof.SeqOptimizer(out2in(gemm_pattern_1),
out2in(dot_to_gemm),
......
差异被折叠。
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论