提交 5dd95de0 authored 作者: bergstrj@iro.umontreal.ca's avatar bergstrj@iro.umontreal.ca

default optimizations for Function, misc bug fixes

......@@ -159,41 +159,42 @@ class T_transpose(unittest.TestCase):
def test0(self):
n = astensor(numpy.ones(()))
t = transpose(n)
self.failUnless(t.owner.__class__ is Transpose)
self.failUnless(t.owner.__class__ is TransposeInplace)
f = Function([n], [t])
tval = f(n.data)
self.failUnless(tval.shape == n.data.shape)
#test aliasing
tval += 55.0
self.failUnless(n.data == 56.0)
self.failUnless(n.data == 1.0)
def test1(self):
n = astensor(numpy.ones(5))
t = transpose(n)
self.failUnless(t.owner.__class__ is Transpose)
self.failUnless(t.owner.__class__ is TransposeInplace)
f = Function([n], [t])
tval = f(n.data)
self.failUnless(tval.shape == n.data.shape)
#test aliasing
tval += 55.0
self.failUnless(n.data[0] == 56.0)
self.failUnless(n.data[0] == 1.0)
def test2(self):
n = astensor(numpy.ones((5,3)))
t = transpose(n)
self.failUnless(t.owner.__class__ is Transpose)
self.failUnless(t.owner.__class__ is TransposeInplace)
f = Function([n], [t])
tval = f(n.data)
self.failUnless(tval.shape == (3,5))
#test aliasing
tval += 55.0
self.failUnless(n.data[0,0] == 56.0)
self.failUnless(n.data[0,0] == 1.0)
def test3(self):
"""Test transpose of tensor, inplace version"""
n = astensor(numpy.ones((5,3,2)))
t = transpose(n)
self.failUnless(t.owner.__class__ is Transpose)
t = transpose_inplace(n)
self.failUnless(t.owner.__class__ is TransposeInplace)
f = Function([n], [t])
tval = f(n.data)
self.failUnless(tval.shape == (2,3,5))
......@@ -404,6 +405,14 @@ class T_fill(unittest.TestCase):
# self.failUnless(o.inputs[1].dtype[0:3] == 'flo')
self.failUnless(o.outputs[0].broadcastable == (0,))
# self.failUnless(o.outputs[0].dtype[0:3] == 'flo')
self.failUnless(numpy.all(eval_outputs([t]) == [9,9,9]))
def test1(self):
x = astensor(numpy.ones((4,5)))
l = ones_like(x[:,0:1])
r = ones_like(x[0:1,:])
xx = x + dot(l,r)
self.failUnless(numpy.mean(eval_outputs([xx]) == 2.0))
class T_sum(unittest.TestCase):
def test_impl(self):
......
......@@ -100,7 +100,7 @@ class BaseTensor(ResultBase):
# Hash for constant folding
#
def hash(self):
if self.data:
if self.data is not None:
return (BaseTensor, self.dtype, self.broadcastable, self.data.data[:])
else:
return (BaseTensor, self.dtype, self.broadcastable, None)
......
......@@ -14,6 +14,13 @@ def exec_opt(inputs, outputs, features=[]):
return Function(intputs, outputs, features, exec_opt.optimizer, gof.link.PerformLinker, False)
exec_opt.optimizer = None
def default_optimizer(env):
default_optimizer.const(env)
default_optimizer.merge(env)
pass
default_optimizer.merge = gof.opt.MergeOptimizer()
default_optimizer.const = gof.opt.ConstantFinder()
def _mark_indestructible(results):
for r in results:
r.indestructible = True
......@@ -38,7 +45,7 @@ class Function:
"""
def __init__(self, inputs, outputs,
features = [],
optimizer = None,
optimizer = default_optimizer,
linker_cls = gof.link.PerformLinker,
unpack_single = True,
except_unreachable_input = True,
......@@ -97,6 +104,7 @@ class Function:
def __call__(self, *args):
return self.fn(*args)
def eval_outputs(outputs,
features = [],
optimizer = None,
......
import op, result, ext, link, env, features, toolbox, graph, cc
import op, result, ext, link, env, features, toolbox, graph, cc, opt
from op import *
from result import *
......@@ -9,6 +9,7 @@ from env import *
from features import *
from toolbox import *
from cc import *
from opt import *
......
......@@ -34,6 +34,9 @@ class Optimizer:
env.satisfy(self)
self.apply(env)
def __call__(self, env):
return self.optimize(env)
DummyOpt = Optimizer()
DummyOpt.__doc__ = "Does nothing."
......@@ -378,8 +381,8 @@ class MergeOptimizer(Optimizer):
"""
def apply(self, env):
cid = {}
inv_cid = {}
cid = {} #result -> result.hash() (for constants)
inv_cid = {} #hash -> result (for constants)
for i, r in enumerate(env.orphans().union(env.inputs)):
if getattr(r, 'constant', False) and hasattr(r, 'hash'):
ref = ('const', r.hash())
......
......@@ -59,9 +59,7 @@ class Tensor(BaseTensor):
def __rpow__(self,other): return pow(other,self)
#TRANSPOSE
def __get_T(self):
return tensor_copy(transpose(self))
T = property(__get_T)
T = property(lambda self: transpose(self))
#SLICING
def __getitem__(self, item): return subtensor(self, item)
......@@ -357,7 +355,7 @@ tensor_copy = gof.op.constructor(TensorCopy)
# View Operations
##########################
class Transpose(_Op, Viewer):
class TransposeInplace(_Op, Viewer):
def view_map(self):
return {self.out: [self.inputs[0]]}
def propagate_broadcastable(self, x):
......@@ -367,7 +365,7 @@ class Transpose(_Op, Viewer):
def impl(self, x):
return x.T #numpy's transpose
def grad(self, x, gz):
return transpose_copy(gz)
return transpose(gz)
def c_impl(self, x, z):
return """
......@@ -377,7 +375,9 @@ class Transpose(_Op, Viewer):
}
%(z)s = transposed;
"""
transpose = gof.op.constructor(Transpose)
transpose_inplace = gof.op.constructor(TransposeInplace)
def transpose(x, **kwargs):
return transpose_inplace(tensor_copy(x), **kwargs)
class Subtensor(Op, Viewer):
nin = 2
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论