提交 a2c41283 authored 作者: Olivier Breuleux's avatar Olivier Breuleux

function serialization and a lot of fixes for the tests

上级 0d8dc459
......@@ -9,7 +9,10 @@ from gof import \
Type, Generic, generic, \
object2, utils
from compile import FunctionMaker, function, OpFromGraph #, eval_outputs, fast_compute
from compile import \
Mode, \
predefined_modes, predefined_linkers, predefined_optimizers, \
FunctionMaker, function, OpFromGraph #, eval_outputs, fast_compute
import tensor
import tensor_random
......
差异被折叠。
......@@ -8,6 +8,10 @@ from sparse import _is_dense, _is_sparse, _is_dense_result, _is_sparse_result
from sparse import _mtypes, _mtype_to_str
import random
import gof
def eval_outputs(outputs):
return compile.function([], outputs)()[0]
class T_transpose(unittest.TestCase):
def setUp(self):
......@@ -23,7 +27,7 @@ class T_transpose(unittest.TestCase):
self.failUnless(ta.type.dtype == 'float64', ta.type.dtype)
self.failUnless(ta.type.format == 'csr', ta.type.format)
vta = compile.eval_outputs([ta])
vta = eval_outputs([ta])
self.failUnless(vta.shape == (3,5))
def test_transpose_csr(self):
a = as_sparse(sparse.csr_matrix(sparse.speye(5,3)))
......@@ -34,7 +38,7 @@ class T_transpose(unittest.TestCase):
self.failUnless(ta.type.dtype == 'float64', ta.type.dtype)
self.failUnless(ta.type.format == 'csc', ta.type.format)
vta = compile.eval_outputs([ta])
vta = eval_outputs([ta])
self.failUnless(vta.shape == (3,5))
class T_Add(unittest.TestCase):
......@@ -60,7 +64,7 @@ class T_Add(unittest.TestCase):
self.failUnless(apb.type.format == aR.type.format, apb.type.format)
self.failUnless(apb.type.format == bR.type.format, apb.type.format)
val = compile.eval_outputs([apb])
val = eval_outputs([apb])
self.failUnless(val.shape == (3,2))
self.failUnless(numpy.all(val.todense() == (a + b).todense()))
self.failUnless(numpy.all(val.todense() == numpy.array([[1., 2], [3, 4], [5, 6]])))
......@@ -85,7 +89,7 @@ class T_Add(unittest.TestCase):
self.failUnless(apb.type.dtype == aR.type.dtype, apb.type.dtype)
self.failUnless(apb.type.dtype == bR.type.dtype, apb.type.dtype)
val = compile.eval_outputs([apb])
val = eval_outputs([apb])
self.failUnless(val.shape == (3, 2))
self.failUnless(numpy.all(val == (a + b)))
self.failUnless(numpy.all(val == numpy.array([[1., 2], [3, 4], [5, 6]])))
......@@ -110,7 +114,7 @@ class T_Add(unittest.TestCase):
self.failUnless(apb.type.dtype == aR.type.dtype, apb.type.dtype)
self.failUnless(apb.type.dtype == bR.type.dtype, apb.type.dtype)
val = compile.eval_outputs([apb])
val = eval_outputs([apb])
self.failUnless(val.shape == (3, 2))
self.failUnless(numpy.all(val == (a + b)))
self.failUnless(numpy.all(val == numpy.array([[1., 2], [3, 4], [5, 6]])))
......@@ -122,14 +126,14 @@ class T_conversion(unittest.TestCase):
def test0(self):
a = tensor.as_tensor(numpy.random.rand(5))
s = csc_from_dense(a)
val = compile.eval_outputs([s])
val = eval_outputs([s])
self.failUnless(str(val.dtype)=='float64')
self.failUnless(val.format == 'csc')
def test1(self):
a = tensor.as_tensor(numpy.random.rand(5))
s = csr_from_dense(a)
val = compile.eval_outputs([s])
val = eval_outputs([s])
self.failUnless(str(val.dtype)=='float64')
self.failUnless(val.format == 'csr')
......@@ -138,7 +142,7 @@ class T_conversion(unittest.TestCase):
s = t((2,5))
d = dense_from_sparse(s)
s[0,0] = 1.0
val = compile.eval_outputs([d])
val = eval_outputs([d])
self.failUnless(str(val.dtype)=='float64')
self.failUnless(numpy.all(val[0] == [1,0,0,0,0]))
......@@ -159,7 +163,7 @@ class _testCase_dot(unittest.TestCase):
zop = dot(x,xT)
self.failUnless(_is_sparse_result(zop))
z = compile.eval_outputs([zop])
z = eval_outputs([zop])
self.failUnless(_is_sparse(z))
self.failUnless(z.shape == (500,500))
self.failUnless(type(z) is mtype)
......@@ -190,7 +194,7 @@ class _testCase_dot(unittest.TestCase):
zop = dot(x,y)
self.failUnless(_is_sparse_result(zop))
z = compile.eval_outputs([zop])
z = eval_outputs([zop])
self.failUnless(_is_sparse(z))
self.failUnless(z.shape == (500,2))
self.failUnless(type(z) is mtype)
......@@ -227,7 +231,7 @@ class _testCase_dot(unittest.TestCase):
# zop = dot(y, x)
zop = transpose(dot(y, x))
self.failUnless(_is_sparse_result(zop))
z = compile.eval_outputs([zop])
z = eval_outputs([zop])
self.failUnless(_is_sparse(z))
self.failUnless(z.shape == (500,2))
# self.failUnless(type(z) is mtype)
......
差异被折叠。
差异被折叠。
......@@ -7,6 +7,7 @@ import scalar
from scalar import Scalar
import gof
from gof.python25 import all
from copy import copy
# tensor depends on elemwise to provide definitions for several ops
......@@ -231,6 +232,15 @@ class Elemwise(Op):
else:
self.ufunc = None
def __getstate__(self):
d = copy(self.__dict__)
d.pop('ufunc')
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.ufunc = numpy.frompyfunc(self.scalar_op.impl, self.scalar_op.nin, self.scalar_op.nout)
def make_node(self, *inputs):
"""
If the inputs have different number of dimensions, their shape
......
......@@ -12,7 +12,7 @@ from graph import \
Apply, Result, Constant, Value, view_roots
from link import \
Filter, Linker, LocalLinker, PerformLinker, WrapLinker, Profiler
Container, Linker, LocalLinker, PerformLinker, WrapLinker, Profiler
from op import \
Op
......@@ -22,7 +22,8 @@ from opt import \
MergeOptimizer, MergeOptMerge, \
LocalOptimizer, local_optimizer, LocalOptGroup, LocalOpKeyOptGroup, \
OpSub, OpRemove, PatternSub, \
NavigatorOptimizer, TopoOptimizer, OpKeyOptimizer
NavigatorOptimizer, TopoOptimizer, OpKeyOptimizer, \
PureThenInplaceOptimizer
from toolbox import \
Bookkeeper, History, Validator, ReplaceValidate, NodeFinder, PrintListener
......
......@@ -624,8 +624,8 @@ class CLinker(link.Linker):
input_storage,
output_storage)
return thunk, \
[link.Filter(input, storage) for input, storage in zip(self.env.inputs, input_storage)], \
[link.Filter(output, storage, True) for output, storage in zip(self.env.outputs, output_storage)], \
[link.Container(input, storage) for input, storage in zip(self.env.inputs, input_storage)], \
[link.Container(output, storage, True) for output, storage in zip(self.env.outputs, output_storage)], \
error_storage
def make_thunk(self, input_storage = None, output_storage = None):
......@@ -873,8 +873,8 @@ class OpWiseCLinker(link.LocalLinker):
f = link.streamline(env, thunks, order, no_recycling = no_recycling, profiler = profiler)
return f, [link.Filter(input, storage) for input, storage in zip(env.inputs, input_storage)], \
[link.Filter(output, storage, True) for output, storage in zip(env.outputs, output_storage)], \
return f, [link.Container(input, storage) for input, storage in zip(env.inputs, input_storage)], \
[link.Container(output, storage, True) for output, storage in zip(env.outputs, output_storage)], \
thunks, order
......@@ -940,6 +940,7 @@ class DualLinker(link.Linker):
no_recycling = self.no_recycling
_f, i1, o1, thunks1, order1 = link.PerformLinker().accept(env, no_recycling = no_recycling).make_all(**kwargs)
kwargs.pop('input_storage', None)
_f, i2, o2, thunks2, order2 = OpWiseCLinker().accept(env, no_recycling = no_recycling).make_all(**kwargs)
def f():
......
......@@ -140,7 +140,7 @@ class Result(utils.object2):
else:
return str(self.owner.op) + "." + str(self.index)
else:
return "<?>::" + str(self.type)
return "<%s>" % str(self.type)
def __repr__(self):
return str(self)
def clone(self):
......
import utils
import graph
from type import Type
import sys, traceback
from copy import copy
......@@ -107,25 +108,30 @@ class Linker(object):
return execute
class Filter(object):
def __init__(self, r, storage, readonly = False, strict = False):
self.r = r
self.type = r.type
class Container(object):
def __init__(self, r, storage, readonly = False, strict = False, name = None):
#self.r = r
if isinstance(r, Type):
self.type = r
else:
self.type = r.type
self.name = name or r.name
self.storage = storage
self.readonly = readonly
self.strict = strict
def __get(self):
return self.storage[0]
def __set(self, value):
if self.readonly:
raise Exception("Cannot set readonly storage: %s" % self.name)
try:
if self.readonly:
raise Exception("Cannot set readonly storage.")
if self.strict:
self.storage[0] = self.type.filter(value, strict = True)
else:
self.storage[0] = self.type.filter(value)
except:
raise_with_op(self.r)
except Exception, e:
e.args = e.args + (self.name,)
raise
data = property(__get, __set)
value = property(__get, __set)
def __str__(self):
......@@ -256,8 +262,8 @@ class PerformLinker(LocalLinker):
f = streamline(env, thunks, order, no_recycling = no_recycling, profiler = profiler)
return f, [Filter(input, storage) for input, storage in zip(env.inputs, input_storage)], \
[Filter(output, storage, True) for output, storage in zip(env.outputs, output_storage)], \
return f, [Container(input, storage) for input, storage in zip(env.inputs, input_storage)], \
[Container(output, storage, True) for output, storage in zip(env.outputs, output_storage)], \
thunks, order
......@@ -329,7 +335,9 @@ class WrapLinker(Linker):
def make_thunk(self, **kwargs):
no_recycling = self.no_recycling
make_all = [l.make_all(**kwargs) for l in self.linkers]
make_all = [self.linkers[0].make_all(**kwargs)]
kwargs.pop('input_storage', None)
make_all += [l.make_all(**kwargs) for l in self.linkers[1:]]
fns, input_lists, output_lists, thunk_lists, order_lists \
= zip(*make_all)
......
......@@ -12,6 +12,7 @@ import toolbox
import op
from copy import copy
from collections import deque
import destroyhandler as dh
class Optimizer:
......@@ -60,7 +61,7 @@ class FromFunctionOptimizer(Optimizer):
def __init__(self, fn):
self.apply = fn
def add_requirements(self, env):
env.extend(gof.toolbox.ReplaceValidate)
env.extend(toolbox.ReplaceValidate())
def optimizer(f):
return FromFunctionOptimizer(f)
......@@ -208,7 +209,7 @@ class FromFunctionLocalOptimizer(LocalOptimizer):
def __init__(self, fn):
self.transform = fn
def add_requirements(self, env):
env.extend(gof.toolbox.ReplaceValidate)
env.extend(toolbox.ReplaceValidate())
def local_optimizer(f):
return FromFunctionLocalOptimizer(f)
......@@ -608,6 +609,21 @@ def check_chain(r, *chain):
############
### Misc ###
############
class PureThenInplaceOptimizer(Optimizer):
def __init__(self, pure, inplace):
self.pure = pure
self.inplace = inplace
def apply(self, env):
self.pure(env)
env.extend(dh.DestroyHandler())
self.inplace(env)
......
......@@ -252,16 +252,17 @@ def upcast_out(*types):
return Scalar(dtype = Scalar.upcast(*types)),
def same_out(type):
return type,
def transfer_type(i):
assert type(i) == int
def f(*types):
return types[i],
f.__name__ = "transfer_type_%i" % i
return f
def specific_out(*spec):
def f(*types):
return spec
return f
class transfer_type:
def __init__(self, i):
assert type(i) == int
self.i = i
def __call__(self, *types):
return types[self.i]
class specific_out:
def __init__(self, *spec):
self.spec = spec
def __call__(self, *types):
return self.spec
def int_out(*types):
return int64,
def float_out(*types):
......
......@@ -82,10 +82,11 @@ class Tensor(Type):
for L{broadcasting}, as described and implemented in Numpy.
"""
def __init__(self, dtype, broadcastable):
def __init__(self, dtype, broadcastable, name = None):
self.dtype = str(dtype)
self.broadcastable = tuple(broadcastable)
self.dtype_specs() # error checking is done there
self.name = name
def filter(self, data, strict = False):
_data = data
......@@ -141,10 +142,21 @@ class Tensor(Type):
return TensorResult(self, name = name)
def __str__(self):
return "%s(%s)" % (str(self.dtype), str(self.broadcastable))
if self.name:
return self.name
else:
b = self.broadcastable
#bcast = str(self.broadcastable)
bcast = {(): 'scalar',
(False,): 'vector',
(False, True): 'col',
(True, False): 'row',
(False, False): 'matrix'}.get(b, "%iD" % len(b) if not any(b) else str(b))
return "Tensor(%s, %s)" % (str(self.dtype), bcast)
def __repr__(self):
return "Tensor{%s, %s}" % (str(self.dtype), str(self.broadcastable))
return str(self)
#"Tensor{%s, %s}" % (str(self.dtype), str(self.broadcastable))
def c_declare(self, name, sub):
return """
......
......@@ -7,6 +7,7 @@ import tensor as T
import numpy as N
import operator
import itertools
import sys
# Utilities
......@@ -40,8 +41,7 @@ dot_to_gemm = gof.PatternSub((T.dot, 'a', 'b'),
allow_multiple_clients = False)
@gof.optimizer
def insert_inplace_optimizer(self, env):
def _insert_inplace_optimizer(env):
"""
Usage: inplace_optimizer.optimize(env)
......@@ -66,14 +66,16 @@ def insert_inplace_optimizer(self, env):
for candidate_input in candidate_inputs:
inplace_pattern = dict(baseline, **{candidate_output: candidate_input})
try:
new = Elemwise(op.scalar_op, inplace_pattern).make_node(op.inputs)
env.replace_all_validate(dict(zip(node.outputs, new.outputs)))
except:
new = Elemwise(op.scalar_op, inplace_pattern).make_node(*node.inputs)
env.replace_all_validate(zip(node.outputs, new.outputs))
except Exception, e:
continue
candidate_inputs.remove(candidate_input)
node = new
baseline = inplace_pattern
break
insert_inplace_optimizer = gof.optimizer(_insert_inplace_optimizer)
inplace_optimizer = gof.SeqOptimizer(out2in(gemm_pattern_1),
out2in(dot_to_gemm),
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论