提交 32136eb7 authored 作者: Olivier Breuleux's avatar Olivier Breuleux

merge

...@@ -27,6 +27,7 @@ __docformat__ = "restructuredtext en" ...@@ -27,6 +27,7 @@ __docformat__ = "restructuredtext en"
from gof import \ from gof import \
CLinker, OpWiseCLinker, DualLinker, Linker, LocalLinker, PerformLinker, Profiler, \ CLinker, OpWiseCLinker, DualLinker, Linker, LocalLinker, PerformLinker, Profiler, \
Container, \
InconsistencyError, Env, \ InconsistencyError, Env, \
Apply, Result, Constant, Value, \ Apply, Result, Constant, Value, \
Op, \ Op, \
...@@ -35,7 +36,12 @@ from gof import \ ...@@ -35,7 +36,12 @@ from gof import \
Type, Generic, generic, \ Type, Generic, generic, \
object2, utils object2, utils
from compile import function, eval_outputs, fast_compute, OpFromGraph from compile import \
SymbolicInput, SymbolicInputKit, In, \
SymbolicOutput, Out, \
Mode, \
predefined_modes, predefined_linkers, predefined_optimizers, \
FunctionMaker, function, OpFromGraph #, eval_outputs, fast_compute
import tensor import tensor
import tensor_random import tensor_random
......
...@@ -9,137 +9,151 @@ import tensor ...@@ -9,137 +9,151 @@ import tensor
PatternOptimizer = lambda p1, p2, ign=True: gof.OpKeyOptimizer(gof.PatternSub(p1, p2), ignore_newtrees=ign) PatternOptimizer = lambda p1, p2, ign=True: gof.OpKeyOptimizer(gof.PatternSub(p1, p2), ignore_newtrees=ign)
def checkfor(testcase, fn, E):
try:
fn()
except Exception, e:
if isinstance(e, E):
# we got the exception we wanted
return
else:
# we did not get the exception we wanted
raise
# fn worked, but it shouldn't have
testcase.fail()
def graph1(): # (x+y) * (x/z)
x, y, z = floats('xyz')
o = mul(add(x, y), div(x, z))
return [x,y,z], [o]
# def graph1(): # (x+y) * (x/z)
# x, y, z = floats('xyz')
# o = mul(add(x, y), div(x, z))
# return [x,y,z], [o]
class T_Function(unittest.TestCase):
# class T_Function(unittest.TestCase):
def test_noopt(self): # def test_noopt(self):
gi, go = graph1() # gi, go = graph1()
p = function(gi, go, optimizer = None, linker = 'py') # p = function(gi, go, optimizer = None, linker = 'py')
self.failUnless(p(1.0,3.0,4.0) == 1.0) # self.failUnless(p(1.0,3.0,4.0) == 1.0)
def test_opt(self):
opt = PatternOptimizer((div, '1', '2'), (div, '2', '1'))
gi, go = graph1()
p = function(gi,go, optimizer=opt.optimize, linker = 'py')
self.failUnless(p(1.,3.,4.) == 16.0)
def test_multiout(self):
def graph2():
x, y, z = floats('xyz')
o = mul(add(x, y), div(x, z))
return [x,y,z], [o, o.owner.inputs[1]]
opt = PatternOptimizer((div, '1', '2'), (div, '2', '1'))
gi, go = graph2()
p = function(gi,go, optimizer=opt.optimize)
a,b = p(1.,3.,4.)
self.failUnless(a == 16.0)
self.failUnless(b == 4.0)
def test_make_many_functions(self):
x, y, z = tensor.scalars('xyz')
e0, e1, e2 = x+y+z, x*y-z, z*z+x*x+y*y
f1 = function([x, y, z], [e0])
f2 = function([x, y, z], [e0])
f3 = function([x, y, z], [e1])
f4 = function([x, y, z], [e2])
f5 = function([e0], [e0 * e0])
ff = FunctionFactory([x, y, z], [e0])
f6 = ff.create()
f7 = ff.create()
f8 = ff.create()
f9 = ff.partial(1.0, 2.0)
assert f1(1.0, 2.0, 3.0) == 6.0
assert f2(1.0, 2.0, 3.0) == 6.0
assert f3(1.0, 2.0, 3.0) == -1.0
assert f4(1.0, 2.0, 3.0) == 14.0
assert f5(7.0) == 49.0
assert f6(1.0, 2.0, 3.0) == 6.0
assert f7(1.0, 2.0, 3.0) == 6.0
assert f8(1.0, 2.0, 3.0) == 6.0
assert f9(3.0) == 6.0
def test_no_inputs(self):
x, y, z = tensor.value(1.0), tensor.value(2.0), tensor.value(3.0)
e = x*x + y*y + z*z
assert function([], [e], linker = 'py')() == 14.0
assert function([], [e], linker = 'c')() == 14.0
assert function([], [e], linker = 'c|py')() == 14.0
assert function([], [e], linker = 'c&py')() == 14.0
assert eval_outputs([e]) == 14.0
assert fast_compute(e) == 14.0
def test_closure(self):
x, y, z = tensor.scalars('xyz')
v = tensor.value(numpy.zeros(()))
e = x + tensor._add_inplace(v, 1)
f = function([x], [e])
assert f(1.) == 2.
assert f(1.) == 3.
assert f(1.) == 4.
def test_borrow_true(self):
x, y, z = tensor.scalars('xyz')
e = x + y + z
f = function([x, y, z], [e], borrow_outputs = True)
res1 = f(1.0, 2.0, 3.0)
assert res1 == 6.0
res2 = f(1.0, 3.0, 5.0)
assert res1 is res2
assert res1 == 9.0
assert res2 == 9.0
def test_borrow_false(self):
x, y, z = tensor.scalars('xyz')
e = x + y + z
for linker in 'py c c|py c&py'.split():
f = function([x, y, z], [e], borrow_outputs = False, linker = linker)
res1 = f(1.0, 2.0, 3.0)
self.failUnless(res1 == 6.0, (res1, linker))
res2 = f(1.0, 3.0, 5.0)
self.failUnless(res1 is not res2, (res1, res2, linker))
self.failUnless(res1 == 6.0, (res1, linker))
self.failUnless(res2 == 9.0, (res2, linker))
def test_borrow_false_through_inplace(self):
x, y, z = tensor.scalars('xyz')
# if borrow_outputs is False, we must not reuse the temporary created for x+y
e = tensor._add_inplace(x + y, z)
for linker in 'py c c|py c&py'.split():
f = function([x, y, z], [e], borrow_outputs = False, linker = linker)
res1 = f(1.0, 2.0, 3.0)
self.failUnless(res1 == 6.0, (res1, linker))
res2 = f(1.0, 3.0, 5.0)
self.failUnless(res1 is not res2, (res1, res2, linker))
self.failUnless(res1 == 6.0, (res1, linker))
self.failUnless(res2 == 9.0, (res2, linker))
class T_fast_compute(unittest.TestCase):
def test_straightforward(self): # def test_opt(self):
x, y, z = tensor.value(1.0), tensor.value(2.0), tensor.value(3.0) # opt = PatternOptimizer((div, '1', '2'), (div, '2', '1'))
e = x*x + y*y + z*z # gi, go = graph1()
assert fast_compute(e) == 14.0 # p = function(gi,go, optimizer=opt.optimize, linker = 'py')
assert compile._fcache[(e, )]() == 14.0 # self.failUnless(p(1.,3.,4.) == 16.0)
# def test_multiout(self):
# def graph2():
# x, y, z = floats('xyz')
# o = mul(add(x, y), div(x, z))
# return [x,y,z], [o, o.owner.inputs[1]]
# opt = PatternOptimizer((div, '1', '2'), (div, '2', '1'))
# gi, go = graph2()
# p = function(gi,go, optimizer=opt.optimize)
# a,b = p(1.,3.,4.)
# self.failUnless(a == 16.0)
# self.failUnless(b == 4.0)
# def test_make_many_functions(self):
# x, y, z = tensor.scalars('xyz')
# e0, e1, e2 = x+y+z, x*y-z, z*z+x*x+y*y
# f1 = function([x, y, z], [e0])
# f2 = function([x, y, z], [e0])
# f3 = function([x, y, z], [e1])
# f4 = function([x, y, z], [e2])
# f5 = function([e0], [e0 * e0])
# ff = FunctionFactory([x, y, z], [e0])
# f6 = ff.create()
# f7 = ff.create()
# f8 = ff.create()
# f9 = ff.partial(1.0, 2.0)
# assert f1(1.0, 2.0, 3.0) == 6.0
# assert f2(1.0, 2.0, 3.0) == 6.0
# assert f3(1.0, 2.0, 3.0) == -1.0
# assert f4(1.0, 2.0, 3.0) == 14.0
# assert f5(7.0) == 49.0
# assert f6(1.0, 2.0, 3.0) == 6.0
# assert f7(1.0, 2.0, 3.0) == 6.0
# assert f8(1.0, 2.0, 3.0) == 6.0
# assert f9(3.0) == 6.0
# def test_no_inputs(self):
# x, y, z = tensor.value(1.0), tensor.value(2.0), tensor.value(3.0)
# e = x*x + y*y + z*z
# assert function([], [e], linker = 'py')() == 14.0
# assert function([], [e], linker = 'c')() == 14.0
# assert function([], [e], linker = 'c|py')() == 14.0
# assert function([], [e], linker = 'c&py')() == 14.0
# assert eval_outputs([e]) == 14.0
# assert fast_compute(e) == 14.0
# def test_closure(self):
# x, y, z = tensor.scalars('xyz')
# v = tensor.value(numpy.zeros(()))
# e = x + tensor.add_inplace(v, 1)
# f = function([x], [e])
# assert f(1.) == 2.
# assert f(1.) == 3.
# assert f(1.) == 4.
# def test_borrow_true(self):
# x, y, z = tensor.scalars('xyz')
# e = x + y + z
# f = function([x, y, z], [e], borrow_outputs = True)
# res1 = f(1.0, 2.0, 3.0)
# assert res1 == 6.0
# res2 = f(1.0, 3.0, 5.0)
# assert res1 is res2
# assert res1 == 9.0
# assert res2 == 9.0
# def test_borrow_false(self):
# x, y, z = tensor.scalars('xyz')
# e = x + y + z
# for linker in 'py c c|py c&py'.split():
# f = function([x, y, z], [e], borrow_outputs = False, linker = linker)
# res1 = f(1.0, 2.0, 3.0)
# self.failUnless(res1 == 6.0, (res1, linker))
# res2 = f(1.0, 3.0, 5.0)
# self.failUnless(res1 is not res2, (res1, res2, linker))
# self.failUnless(res1 == 6.0, (res1, linker))
# self.failUnless(res2 == 9.0, (res2, linker))
# def test_borrow_false_through_inplace(self):
# x, y, z = tensor.scalars('xyz')
# # if borrow_outputs is False, we must not reuse the temporary created for x+y
# e = tensor.add_inplace(x + y, z)
# for linker in 'py c c|py c&py'.split():
# f = function([x, y, z], [e], borrow_outputs = False, linker = linker)
# res1 = f(1.0, 2.0, 3.0)
# self.failUnless(res1 == 6.0, (res1, linker))
# res2 = f(1.0, 3.0, 5.0)
# self.failUnless(res1 is not res2, (res1, res2, linker))
# self.failUnless(res1 == 6.0, (res1, linker))
# self.failUnless(res2 == 9.0, (res2, linker))
# class T_fast_compute(unittest.TestCase):
# def test_straightforward(self):
# x, y, z = tensor.value(1.0), tensor.value(2.0), tensor.value(3.0)
# e = x*x + y*y + z*z
# assert fast_compute(e) == 14.0
# assert compile._fcache[(e, )]() == 14.0
import tensor as T import tensor as T
import random import random
import numpy as N import numpy as N
class T_OpFromGraph(unittest.TestCase): class T_OpFromGraph(unittest.TestCase):
def test_straightforward(self): def test_straightforward(self):
x, y, z = T.matrices('xyz') x, y, z = T.matrices('xyz')
e = x + y * z e = x + y * z
op = OpFromGraph([x, y, z], [e], linker='c|py') op = OpFromGraph([x, y, z], [e], mode='FAST_RUN')
f = op(x, y, z) - op(y, z, x) f = op(x, y, z) - op(y, z, x)
fn = function([x, y, z], [f]) fn = function([x, y, z], f)
xv, yv, zv = N.ones((2, 2)), N.ones((2, 2))*3, N.ones((2, 2))*5 xv, yv, zv = N.ones((2, 2)), N.ones((2, 2))*3, N.ones((2, 2))*5
assert numpy.all(8.0 == fn(xv, yv, zv)) assert numpy.all(8.0 == fn(xv, yv, zv))
assert numpy.all(8.0 == fn(xv, yv, zv)) assert numpy.all(8.0 == fn(xv, yv, zv))
...@@ -147,9 +161,9 @@ class T_OpFromGraph(unittest.TestCase): ...@@ -147,9 +161,9 @@ class T_OpFromGraph(unittest.TestCase):
def test_size_changes(self): def test_size_changes(self):
x, y, z = T.matrices('xyz') x, y, z = T.matrices('xyz')
e = T.dot(x, y) e = T.dot(x, y)
op = OpFromGraph([x, y], [e], linker='c|py') op = OpFromGraph([x, y], [e], mode='FAST_RUN')
f = op(x, op(y, z)) f = op(x, op(y, z))
fn = function([x, y, z], [f]) fn = function([x, y, z], f)
xv, yv, zv = N.ones((2, 3)), N.ones((3, 4))*3, N.ones((4, 5))*5 xv, yv, zv = N.ones((2, 3)), N.ones((3, 4))*3, N.ones((4, 5))*5
res = fn(xv, yv, zv) res = fn(xv, yv, zv)
assert res.shape == (2, 5) assert res.shape == (2, 5)
...@@ -161,20 +175,371 @@ class T_OpFromGraph(unittest.TestCase): ...@@ -161,20 +175,371 @@ class T_OpFromGraph(unittest.TestCase):
def test_grad(self): def test_grad(self):
x, y, z = T.matrices('xyz') x, y, z = T.matrices('xyz')
e = x + y * z e = x + y * z
op = OpFromGraph([x, y, z], [e], linker='c|py', grad_depth = 2) op = OpFromGraph([x, y, z], [e], mode='FAST_RUN', grad_depth = 2)
f = op(x, y, z) f = op(x, y, z)
f = f - T.grad(f, y) f = f - T.grad(f, y)
fn = function([x, y, z], [f]) fn = function([x, y, z], f)
xv, yv, zv = N.ones((2, 2)), N.ones((2, 2))*3, N.ones((2, 2))*5 xv, yv, zv = N.ones((2, 2)), N.ones((2, 2))*3, N.ones((2, 2))*5
assert numpy.all(11.0 == fn(xv, yv, zv)) assert numpy.all(11.0 == fn(xv, yv, zv))
class T_function(unittest.TestCase):
def test_empty(self):
fn = function([], []) #ok
self.failUnless(fn() == [])
def test_missing_inputs(self):
MissingInputException = TypeError
def fn():
x,s = T.scalars('xs')
fn = function([], [x])
checkfor(self, fn, MissingInputException)
def fn():
x,s = T.scalars('xs')
fn = function([s], [x])
checkfor(self, fn, MissingInputException)
def fn():
x,s = T.scalars('xs')
fn = function([s], x)
checkfor(self, fn, MissingInputException)
def fn():
x,s = T.scalars('xs')
fn = function([s], Out(x))
checkfor(self, fn, MissingInputException)
def fn():
x,s = T.scalars('xs')
fn = function([In(x, update=s+x)], x)
checkfor(self, fn, MissingInputException)
def fn():
x,s = T.scalars('xs')
fn = function([In(x, update=mul(s,s)+x)], x)
checkfor(self, fn, MissingInputException)
def test_input_anon_singleton(self):
x,s = T.scalars('xs')
fn = function([s,x], [x+s])
self.failUnless(fn(2,3) == [5])
# no state
self.failUnless(fn(2,3) == [5])
def test_input_anon_unpack(self):
x,s = T.scalars('xs')
fn = function([s,x], x+s)
self.failUnless(fn(2,3) == 5)
def test_naming_rule0(self):
x,s = T.scalars('xs')
f = function([x,s], x/s)
self.failUnless(f(1,2) == 0.5)
self.failUnless(f(2,1) == 2.0)
self.failUnless(f(s=2,x=1) == 0.5)
self.failUnless(f(x=2,s=1) == 2.0)
self.failUnless(f(2, s=1) == 2.0)
checkfor(self, lambda :f(2, x=2.0), TypeError) #got multiple values for keyword argument 'x'
checkfor(self, lambda :f(x=1), TypeError) #takes exactly 2 non-keyword arguments (1 given)
checkfor(self, lambda :f(s=1), TypeError) #takes exactly 2 non-keyword arguments (0 given)
def test_naming_rule1(self):
a = T.scalar() # the a is for 'anonymous' (un-named).
x,s = T.scalars('xs')
f = function([a, s], a/s)
self.failUnless(f(1,2) == 0.5)
self.failUnless(f(2,1) == 2.0)
self.failUnless(f(2, s=1) == 2.0)
checkfor(self, lambda:f(q=2,s=1), TypeError) #got unexpected keyword argument 'q'
checkfor(self, lambda:f(a=2,s=1), TypeError) #got unexpected keyword argument 'a'
def test_naming_rule2(self):
a = T.scalar() # the a is for 'anonymous' (un-named).
x,s = T.scalars('xs')
#x's name is ignored because it is followed by anonymous parameter a.
f = function([x, a, s], a/s)
self.failUnless(f(9,1,2) == 0.5)
self.failUnless(f(9,2,1) == 2.0)
self.failUnless(f(9,2, s=1) == 2.0)
checkfor(self, lambda:f(x=9,a=2,s=1), TypeError) #got unexpected keyword argument 'x'
checkfor(self, lambda:f(5.0,x=9), TypeError) #got unexpected keyword argument 'x'
def test_naming_rule3(self):
a = T.scalar() # the a is for 'anonymous' (un-named).
x,s = T.scalars('xs')
#x's name is not ignored (as in test_naming_rule2) because a has a default value.
f = function([x, In(a, value=1.0), s], a/s+x)
self.failUnless(f(9,2,4) == 9.5) #can specify all args in order
self.failUnless(f(9,2,s=4) == 9.5) # can give s as kwarg
self.failUnless(f(9,s=4) == 9.25) # can give s as kwarg, get default a
self.failUnless(f(x=9,s=4) == 9.25) # can give s as kwarg, omit a, x as kw
checkfor(self, lambda:f(x=9,a=2,s=4), TypeError) #got unexpected keyword argument 'a'
checkfor(self, lambda:f(), TypeError) #takes exactly 3 non-keyword arguments (0 given)
checkfor(self, lambda:f(x=9), TypeError) #takes exactly 3 non-keyword arguments (1 given)
def test_naming_rule4(self):
a = T.scalar() # the a is for 'anonymous' (un-named).
x,s = T.scalars('xs')
f = function([x, In(a, value=1.0,name='a'), s], a/s+x)
self.failUnless(f(9,2,4) == 9.5) #can specify all args in order
self.failUnless(f(9,2,s=4) == 9.5) # can give s as kwarg
self.failUnless(f(9,s=4) == 9.25) # can give s as kwarg, get default a
self.failUnless(f(9,a=2,s=4) == 9.5) # can give s as kwarg, a as kwarg
self.failUnless(f(x=9,a=2, s=4) == 9.5) # can give all kwargs
self.failUnless(f(x=9,s=4) == 9.25) # can give all kwargs
checkfor(self, lambda:f(), TypeError) #takes exactly 3 non-keyword arguments (0 given)
checkfor(self, lambda:f(5.0,x=9), TypeError) #got multiple values for keyword argument 'x'
def test_state_access(self):
a = T.scalar() # the a is for 'anonymous' (un-named).
x,s = T.scalars('xs')
f = function([x, In(a, value=1.0,name='a'), In(s, value=0.0, update=s+a*x)], s+a*x)
self.failUnless(f[a] == 1.0)
self.failUnless(f[s] == 0.0)
self.failUnless(f(3.0) == 3.0)
self.failUnless(f(3.0,a=2.0) == 9.0) #3.0 + 2*3.0
self.failUnless(f[a] == 1.0) #state hasn't changed permanently, we just overrode it last line
self.failUnless(f[s] == 9.0)
f[a] = 5.0
self.failUnless(f[a] == 5.0)
self.failUnless(f(3.0) == 24.0) #9 + 3*5
self.failUnless(f[s] == 24.0)
def test_same_names(self):
a,x,s = T.scalars('xxx')
#implicit names would cause error. What do we do?
f = function([a, x, s], a+x+s)
self.failUnless(f(1,2,3) == 6)
checkfor(self, lambda:f(1,2,x=3), TypeError)
def test_weird_names(self):
a,x,s = T.scalars('xxx')
checkfor(self, lambda:function([In(a,name=[])],[]), TypeError)
def t():
f = function([In(a,name=set(['adsf',()]), value=1.0),
In(x,name=(), value=2.0),
In(s,name=T.scalar(), value=3.0)], a+x+s)
checkfor(self, t, TypeError)
def test_copy(self):
a = T.scalar() # the a is for 'anonymous' (un-named).
x,s = T.scalars('xs')
f = function([x, In(a, value=1.0,name='a'), In(s, value=0.0, update=s+a*x, mutable=True)], s+a*x)
g = copy(f)
#if they both return, assume that they return equivalent things.
self.failIf(g.container[x].storage is f.container[x].storage)
self.failIf(g.container[a].storage is f.container[a].storage)
self.failIf(g.container[s].storage is f.container[s].storage)
self.failIf(g.value[a] is not f.value[a]) # should not have been copied
self.failIf(g.value[s] is f.value[s]) # should have been copied because it is mutable.
self.failIf((g.value[s] != f.value[s]).any()) # its contents should be identical
self.failUnless(f(2, 1) == g(2)) #they should be in sync, default value should be copied.
self.failUnless(f(2, 1) == g(2)) #they should be in sync, default value should be copied.
f(1,2) # put them out of sync
self.failIf(f(1, 2) == g(1, 2)) #they should not be equal anymore.
def test_shared_state0(self):
a = T.scalar() # the a is for 'anonymous' (un-named).
x,s = T.scalars('xs')
f = function([x, In(a, value=1.0,name='a'), In(s, value=0.0, update=s+a*x, mutable=True)], s+a*x)
g = function([x, In(a, value=1.0,name='a'), In(s, value=f.container[s], update=s-a*x, mutable=True)], s+a*x)
f(1, 2)
self.failUnless(f[s] == 2)
self.failUnless(g[s] == 2)
g(1, 2)
self.failUnless(f[s] == 0)
self.failUnless(g[s] == 0)
# class T_function_examples(unittest.TestCase):
# def test_accumulator(self):
# """Test low-level interface with state."""
# x = T.scalar('x')
# s = T.scalar('s')
# fn, states = program_states(inputs = [x], outputs = [], states = [(s, 0, s+x)])
# sum = 0
# for inc in [1, 4, 5,23, -324]:
# sum += inc
# fn.run([inc], states)
# assert sum == states[0].value
# def test_misc0(self):
# fn_inc, states_inc = function_states(\
# inputs = [x], outputs = [], states = [(s, 0, s+x)])
# fn_inc2, states_inc2 = function_states(\
# inputs = [x], outputs = [], states = [(s, 0, s+x)])
# fn_inc_copy = copy.copy(fn_inc) #USE fn copy
# # run() is like __call__, but requires an explicit state argument
# fn_inc.run([5], states_inc) #run on own state object
# fn_inc2.run([3], states_inc) #run on compatible state object
# assert states_inc[0].value == 8
# states_inc_copy = copy.copy(states_inc) #USE state copy
# fn_inc_copy.run([2], states_inc_copy)
# assert states_inc[0].value == 10 #compatible
# fn_dec, states_dec = function_states(\
# inputs = [x], outputs = [], states = [((s, s-x), states_inc[0])])
# try:
# fn_inc.run([5], states_dec) # wrong kind of state for given program
# self.fail("fn accepted an invalid state argument")
# except SpecificException:
# raise NotImplementedError() #TODO
# except Exception:
# self.fail("fn accepted an invalid state argument")
# def test_perceptron(self):
# """Test high-level state interface."""
# mu0 = numpy.array([1.0,0.0])
# mu1 = numpy.array([0.0,0.1])
# si0 = numpy.ones_like(mu0) #unit variance
# si1 = numpy.ones_like(mu1) #unit variance
# #implicit internal state
# r_state = random.random_state()
# label = r_state.bernoulli(0.5)
# #implicit internal state for each DiagGaussian
# x = label * DiagGaussian(mu0, si0, state=r_state) \
# + (1 - label) * random.DiagGaussian(mu1, si1, state=r_state)
# w = T.tensor.dvector()
# b = T.tensor.dscalar()
# lr = 0.01
# decision = dot(x,w) + b > 0
# new_w = w + neq(label, decision) * lr * x
# new_b = b + neq(label, decision) * (label * (-lr) + (1-label)*lr)
# init_w = numpy.array([0.0, 0.0])
# init_b = 0.0
# io_stream = T.function([], [label, x], state={'seed':(r_state, 42)})
# perceptron_learn = T.function([x, label], [decision],
# state={
# 'w':((w, update_w), init_w),
# 'b':((b, update_b), init_b),
# 'lr':(lr, 0.01)})
# perceptron_use = T.function([x], [decision],
# state={
# 'w':(w, perceptron_learn.shared['w']),
# 'b':(b, perceptron_learn.shared['b'])})
# errs = 0
# for i in xrange(100):
# il, ix = io_stream()
# d0 = perceptron_use(ix)
# d1 = perceptron_learn(ix, il)
# assert d0 == d1
# errs += (d0 != d1)
# print d0
# print 'errs =', errs
# class T_dict_interface(unittest.TestCase):
# def test_keyword(self):
# x = T.scalar('x')
# y = T.scalar('y')
# s = T.scalar('s')
# fn = function(input_kw = {'a':x, 'b':y}, outputs = [], state = {'s':(s, 0, s+x/y)})
# try:
# fn(1, 1)
# self.fail("non-keyword call accepted!")
# except SpecificException:
# raise NotImplementedError()
# except Exception:
# self.fail("non-keyword call accepted!")
# try:
# fn(a=1)
# self.fail("incomplete call accepted!")
# except SpecificException:
# raise NotImplementedError()
# except Exception:
# self.fail("incomplete call accepted!")
# try:
# fn(a=1, b=1, c=1)
# self.fail("overcomplete call accepted!")
# except SpecificException:
# raise NotImplementedError()
# except Exception:
# self.fail("overcomplete call accepted!")
# def test_aliased_state(self):
# """Test keyword input and copy."""
# x = T.scalar('x')
# y = T.scalar('y')
# s = T.scalar('s')
# fn = function(input_kw = {'a':x, 'b':y}, outputs = [], state = {'s':(s, 0, s+x/y)})
# fn2 = fn.copy()
# fn3 = fn.copy()
# fn(a=2, b=5)
# fn2(a=5, b=2)
# fn3(b=2, a=5)
# assert fn.state['s'] == 2.0/5
# assert fn2.state['s'] == 5.0/2
# assert fn3.state['s'] == 5.0/2
# #fn and fn3 use the same sort of state, so this is OK.
# fn3.state = fn.state
# fn.state['s'] = 0
# fn(a=1, b=1) #increment the shared state
# assert fn3.state['s'] == 1
# fn3(a=-1, b=1) #decrement the shared state
# assert fn.state['s'] == 0
if __name__ == '__main__': if __name__ == '__main__':
if 1: if 1:
unittest.main() unittest.main()
else: else:
testcases = [T_dict_interface, T_state] testcases = []
testcases.append(T_function)
#<testsuite boilerplate> #<testsuite boilerplate>
testloader = unittest.TestLoader() testloader = unittest.TestLoader()
......
...@@ -8,6 +8,10 @@ from sparse import _is_dense, _is_sparse, _is_dense_result, _is_sparse_result ...@@ -8,6 +8,10 @@ from sparse import _is_dense, _is_sparse, _is_dense_result, _is_sparse_result
from sparse import _mtypes, _mtype_to_str from sparse import _mtypes, _mtype_to_str
import random import random
import gof
def eval_outputs(outputs):
return compile.function([], outputs)()[0]
class T_transpose(unittest.TestCase): class T_transpose(unittest.TestCase):
def setUp(self): def setUp(self):
...@@ -23,7 +27,7 @@ class T_transpose(unittest.TestCase): ...@@ -23,7 +27,7 @@ class T_transpose(unittest.TestCase):
self.failUnless(ta.type.dtype == 'float64', ta.type.dtype) self.failUnless(ta.type.dtype == 'float64', ta.type.dtype)
self.failUnless(ta.type.format == 'csr', ta.type.format) self.failUnless(ta.type.format == 'csr', ta.type.format)
vta = compile.eval_outputs([ta]) vta = eval_outputs([ta])
self.failUnless(vta.shape == (3,5)) self.failUnless(vta.shape == (3,5))
def test_transpose_csr(self): def test_transpose_csr(self):
a = as_sparse(sparse.csr_matrix(sparse.speye(5,3))) a = as_sparse(sparse.csr_matrix(sparse.speye(5,3)))
...@@ -34,7 +38,7 @@ class T_transpose(unittest.TestCase): ...@@ -34,7 +38,7 @@ class T_transpose(unittest.TestCase):
self.failUnless(ta.type.dtype == 'float64', ta.type.dtype) self.failUnless(ta.type.dtype == 'float64', ta.type.dtype)
self.failUnless(ta.type.format == 'csc', ta.type.format) self.failUnless(ta.type.format == 'csc', ta.type.format)
vta = compile.eval_outputs([ta]) vta = eval_outputs([ta])
self.failUnless(vta.shape == (3,5)) self.failUnless(vta.shape == (3,5))
class T_Add(unittest.TestCase): class T_Add(unittest.TestCase):
...@@ -60,7 +64,7 @@ class T_Add(unittest.TestCase): ...@@ -60,7 +64,7 @@ class T_Add(unittest.TestCase):
self.failUnless(apb.type.format == aR.type.format, apb.type.format) self.failUnless(apb.type.format == aR.type.format, apb.type.format)
self.failUnless(apb.type.format == bR.type.format, apb.type.format) self.failUnless(apb.type.format == bR.type.format, apb.type.format)
val = compile.eval_outputs([apb]) val = eval_outputs([apb])
self.failUnless(val.shape == (3,2)) self.failUnless(val.shape == (3,2))
self.failUnless(numpy.all(val.todense() == (a + b).todense())) self.failUnless(numpy.all(val.todense() == (a + b).todense()))
self.failUnless(numpy.all(val.todense() == numpy.array([[1., 2], [3, 4], [5, 6]]))) self.failUnless(numpy.all(val.todense() == numpy.array([[1., 2], [3, 4], [5, 6]])))
...@@ -85,7 +89,7 @@ class T_Add(unittest.TestCase): ...@@ -85,7 +89,7 @@ class T_Add(unittest.TestCase):
self.failUnless(apb.type.dtype == aR.type.dtype, apb.type.dtype) self.failUnless(apb.type.dtype == aR.type.dtype, apb.type.dtype)
self.failUnless(apb.type.dtype == bR.type.dtype, apb.type.dtype) self.failUnless(apb.type.dtype == bR.type.dtype, apb.type.dtype)
val = compile.eval_outputs([apb]) val = eval_outputs([apb])
self.failUnless(val.shape == (3, 2)) self.failUnless(val.shape == (3, 2))
self.failUnless(numpy.all(val == (a + b))) self.failUnless(numpy.all(val == (a + b)))
self.failUnless(numpy.all(val == numpy.array([[1., 2], [3, 4], [5, 6]]))) self.failUnless(numpy.all(val == numpy.array([[1., 2], [3, 4], [5, 6]])))
...@@ -110,7 +114,7 @@ class T_Add(unittest.TestCase): ...@@ -110,7 +114,7 @@ class T_Add(unittest.TestCase):
self.failUnless(apb.type.dtype == aR.type.dtype, apb.type.dtype) self.failUnless(apb.type.dtype == aR.type.dtype, apb.type.dtype)
self.failUnless(apb.type.dtype == bR.type.dtype, apb.type.dtype) self.failUnless(apb.type.dtype == bR.type.dtype, apb.type.dtype)
val = compile.eval_outputs([apb]) val = eval_outputs([apb])
self.failUnless(val.shape == (3, 2)) self.failUnless(val.shape == (3, 2))
self.failUnless(numpy.all(val == (a + b))) self.failUnless(numpy.all(val == (a + b)))
self.failUnless(numpy.all(val == numpy.array([[1., 2], [3, 4], [5, 6]]))) self.failUnless(numpy.all(val == numpy.array([[1., 2], [3, 4], [5, 6]])))
...@@ -122,14 +126,14 @@ class T_conversion(unittest.TestCase): ...@@ -122,14 +126,14 @@ class T_conversion(unittest.TestCase):
def test0(self): def test0(self):
a = tensor.as_tensor(numpy.random.rand(5)) a = tensor.as_tensor(numpy.random.rand(5))
s = csc_from_dense(a) s = csc_from_dense(a)
val = compile.eval_outputs([s]) val = eval_outputs([s])
self.failUnless(str(val.dtype)=='float64') self.failUnless(str(val.dtype)=='float64')
self.failUnless(val.format == 'csc') self.failUnless(val.format == 'csc')
def test1(self): def test1(self):
a = tensor.as_tensor(numpy.random.rand(5)) a = tensor.as_tensor(numpy.random.rand(5))
s = csr_from_dense(a) s = csr_from_dense(a)
val = compile.eval_outputs([s]) val = eval_outputs([s])
self.failUnless(str(val.dtype)=='float64') self.failUnless(str(val.dtype)=='float64')
self.failUnless(val.format == 'csr') self.failUnless(val.format == 'csr')
...@@ -138,7 +142,7 @@ class T_conversion(unittest.TestCase): ...@@ -138,7 +142,7 @@ class T_conversion(unittest.TestCase):
s = t((2,5)) s = t((2,5))
d = dense_from_sparse(s) d = dense_from_sparse(s)
s[0,0] = 1.0 s[0,0] = 1.0
val = compile.eval_outputs([d]) val = eval_outputs([d])
self.failUnless(str(val.dtype)=='float64') self.failUnless(str(val.dtype)=='float64')
self.failUnless(numpy.all(val[0] == [1,0,0,0,0])) self.failUnless(numpy.all(val[0] == [1,0,0,0,0]))
...@@ -159,7 +163,7 @@ class _testCase_dot(unittest.TestCase): ...@@ -159,7 +163,7 @@ class _testCase_dot(unittest.TestCase):
zop = dot(x,xT) zop = dot(x,xT)
self.failUnless(_is_sparse_result(zop)) self.failUnless(_is_sparse_result(zop))
z = compile.eval_outputs([zop]) z = eval_outputs([zop])
self.failUnless(_is_sparse(z)) self.failUnless(_is_sparse(z))
self.failUnless(z.shape == (500,500)) self.failUnless(z.shape == (500,500))
self.failUnless(type(z) is mtype) self.failUnless(type(z) is mtype)
...@@ -190,7 +194,7 @@ class _testCase_dot(unittest.TestCase): ...@@ -190,7 +194,7 @@ class _testCase_dot(unittest.TestCase):
zop = dot(x,y) zop = dot(x,y)
self.failUnless(_is_sparse_result(zop)) self.failUnless(_is_sparse_result(zop))
z = compile.eval_outputs([zop]) z = eval_outputs([zop])
self.failUnless(_is_sparse(z)) self.failUnless(_is_sparse(z))
self.failUnless(z.shape == (500,2)) self.failUnless(z.shape == (500,2))
self.failUnless(type(z) is mtype) self.failUnless(type(z) is mtype)
...@@ -227,7 +231,7 @@ class _testCase_dot(unittest.TestCase): ...@@ -227,7 +231,7 @@ class _testCase_dot(unittest.TestCase):
# zop = dot(y, x) # zop = dot(y, x)
zop = transpose(dot(y, x)) zop = transpose(dot(y, x))
self.failUnless(_is_sparse_result(zop)) self.failUnless(_is_sparse_result(zop))
z = compile.eval_outputs([zop]) z = eval_outputs([zop])
self.failUnless(_is_sparse(z)) self.failUnless(_is_sparse(z))
self.failUnless(z.shape == (500,2)) self.failUnless(z.shape == (500,2))
# self.failUnless(type(z) is mtype) # self.failUnless(type(z) is mtype)
......
...@@ -6,7 +6,7 @@ import tensor # for hidden symbols ...@@ -6,7 +6,7 @@ import tensor # for hidden symbols
import unittest import unittest
from copy import copy from copy import copy
from compile import function, FunctionFactory, eval_outputs import compile
import gradient import gradient
import gof, gof.graph import gof, gof.graph
from gof.python25 import any from gof.python25 import any
...@@ -15,6 +15,21 @@ from gof.utils import AbstractFunctionError ...@@ -15,6 +15,21 @@ from gof.utils import AbstractFunctionError
from elemwise import DimShuffle from elemwise import DimShuffle
default_mode = compile.Mode(optimizer = None,
linker = 'c&py')
def function(inputs, outputs, mode = default_mode):
return compile.function(inputs, outputs, mode = mode, accept_inplace = True)
def eval_outputs(outputs, mode = default_mode):
results = function([], outputs, mode = mode)()
if len(results) == 1:
return results[0]
return results
def _numpy_checker(x, y): def _numpy_checker(x, y):
""" """
Checks if x.data and y.data have the same contents. Checks if x.data and y.data have the same contents.
...@@ -64,9 +79,8 @@ def make_tester(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_ ...@@ -64,9 +79,8 @@ def make_tester(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_
try: try:
f = function(inputrs, node.outputs, f = function(inputrs, node.outputs,
linker = 'c&py', ##lambda env, **kwargs: gof.DualLinker(env, checker = _numpy_checker, **kwargs), mode = default_mode, ##lambda env, **kwargs: gof.DualLinker(env, checker = _numpy_checker, **kwargs),
unpack_single = False, )
optimizer = None)
except: except:
type, exc_value, traceback = sys.exc_info() type, exc_value, traceback = sys.exc_info()
err_msg = "Test %s::%s: Error occurred while trying to make a Function" \ err_msg = "Test %s::%s: Error occurred while trying to make a Function" \
...@@ -124,9 +138,8 @@ def make_tester(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_ ...@@ -124,9 +138,8 @@ def make_tester(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_
try: try:
f = function(inputrs, node.outputs, f = function(inputrs, node.outputs,
linker = 'c&py', #lambda env, **kwargs: gof.DualLinker(env, checker = _numpy_checker, **kwargs), mode = default_mode, #lambda env, **kwargs: gof.DualLinker(env, checker = _numpy_checker, **kwargs),
unpack_single = False, )
optimizer = None)
except: except:
type, exc_value, traceback = sys.exc_info() type, exc_value, traceback = sys.exc_info()
err_msg = "Test %s::%s: Error occurred while trying to make a Function" \ err_msg = "Test %s::%s: Error occurred while trying to make a Function" \
...@@ -541,14 +554,14 @@ def verify_grad(testcase, op, pt, n_tests=1, rng=numpy.random, eps=0.0000001, to ...@@ -541,14 +554,14 @@ def verify_grad(testcase, op, pt, n_tests=1, rng=numpy.random, eps=0.0000001, to
# we could make loop over outputs making random projections R for each, # we could make loop over outputs making random projections R for each,
# but this doesn't handle the case where not all the outputs are # but this doesn't handle the case where not all the outputs are
# differentiable... so I leave this as TODO for now -JB. # differentiable... so I leave this as TODO for now -JB.
o_fn = function(tensor_pt, o_outputs, linker=linker) o_fn = function(tensor_pt, o_outputs[0], mode=compile.Mode(optimizer = None, linker = linker))
o_fn_out = o_fn(*pt) o_fn_out = o_fn(*pt)
random_projection = rng.rand(*o_fn_out.shape) random_projection = rng.rand(*o_fn_out.shape)
t_r = as_tensor(random_projection) t_r = as_tensor(random_projection)
#random projection of o onto t_r #random projection of o onto t_r
cost = sum(t_r * o_outputs[0]) cost = sum(t_r * o_outputs[0])
cost_fn = function(tensor_pt, [cost], linker=linker) cost_fn = function(tensor_pt, cost, mode=compile.Mode(optimizer = None, linker = linker))
num_grad = gradient.numeric_grad(cost_fn, pt) num_grad = gradient.numeric_grad(cost_fn, pt)
...@@ -560,7 +573,7 @@ def verify_grad(testcase, op, pt, n_tests=1, rng=numpy.random, eps=0.0000001, to ...@@ -560,7 +573,7 @@ def verify_grad(testcase, op, pt, n_tests=1, rng=numpy.random, eps=0.0000001, to
for op in gof.graph.io_toposort(tensor_pt, symbolic_grad): for op in gof.graph.io_toposort(tensor_pt, symbolic_grad):
print op print op
grad_fn = function(tensor_pt, symbolic_grad,linker=linker) grad_fn = function(tensor_pt, symbolic_grad, mode=compile.Mode(optimizer = None, linker = linker))
analytic_grad = grad_fn(*pt) analytic_grad = grad_fn(*pt)
if not isinstance(analytic_grad, (list, tuple)): if not isinstance(analytic_grad, (list, tuple)):
...@@ -595,24 +608,24 @@ def _approx_eq(a,b,eps=1.0e-9): ...@@ -595,24 +608,24 @@ def _approx_eq(a,b,eps=1.0e-9):
return True return True
_approx_eq.debug = 0 _approx_eq.debug = 0
def check_eq(self, node_in, node_out, arg_in, arg_out): # def check_eq(self, node_in, node_out, arg_in, arg_out):
fn = Function([node_in], [node_out]) # fn = Function([node_in], node_out)
self.failUnless( numpy.all(fn(arg_in) == arg_out), (arg_in, arg_out)) # self.failUnless( numpy.all(fn(arg_in) == arg_out), (arg_in, arg_out))
def check_eq2(self, inputs, output, args_in, arg_out): # def check_eq2(self, inputs, output, args_in, arg_out):
fn = Function(inputs, [output]) # fn = Function(inputs, output)
val = fn(*args_in) # val = fn(*args_in)
self.failUnless( numpy.all(val == arg_out), (val, arg_out)) # self.failUnless( numpy.all(val == arg_out), (val, arg_out))
def check_eq2_c(self, inputs, output, args_in, arg_out): # def check_eq2_c(self, inputs, output, args_in, arg_out):
fn = Function(inputs, [output], linker_cls = gof.CLinker) # fn = Function(inputs, [output], linker_cls = gof.CLinker)
val = fn(*args_in) # val = fn(*args_in)
self.failUnless( numpy.all(val == arg_out), (val, arg_out)) # self.failUnless( numpy.all(val == arg_out), (val, arg_out))
def check_eq2_both(self, inputs, output, args_in, arg_out): # def check_eq2_both(self, inputs, output, args_in, arg_out):
fn = Function(inputs, [output], linker_cls = lambda env: gof.DualLinker(env, _numpy_checker)) # fn = Function(inputs, [output], linker_cls = lambda env: gof.DualLinker(env, _numpy_checker))
val = fn(*args_in) # val = fn(*args_in)
self.failUnless( numpy.all(val == arg_out), (val, arg_out)) # self.failUnless( numpy.all(val == arg_out), (val, arg_out))
class T_Shape(unittest.TestCase): class T_Shape(unittest.TestCase):
def test_basic0(self): def test_basic0(self):
...@@ -633,7 +646,7 @@ class T_Cast(unittest.TestCase): ...@@ -633,7 +646,7 @@ class T_Cast(unittest.TestCase):
[convert_to_int8, convert_to_int16, convert_to_int32, convert_to_int64, [convert_to_int8, convert_to_int16, convert_to_int32, convert_to_int64,
convert_to_float32, convert_to_float64]): convert_to_float32, convert_to_float64]):
y = converter(x) y = converter(x)
f = function([x], [y], strict = True, linker = 'c&py') f = function([compile.In(x, strict = True)], y, mode = default_mode)
a = numpy.arange(10, dtype = type1) a = numpy.arange(10, dtype = type1)
b = f(a) b = f(a)
self.failUnless(numpy.all(b == numpy.arange(10, dtype = type2))) self.failUnless(numpy.all(b == numpy.arange(10, dtype = type2)))
...@@ -701,7 +714,7 @@ class T_transpose(unittest.TestCase): ...@@ -701,7 +714,7 @@ class T_transpose(unittest.TestCase):
n = as_tensor(numpy.ones(())) n = as_tensor(numpy.ones(()))
t = transpose(n) t = transpose(n)
self.failUnless(t.owner.op == tensor._transpose_inplace) self.failUnless(t.owner.op == tensor._transpose_inplace)
f = function([n], [t]) f = function([n], t)
tval = f(n.data) tval = f(n.data)
self.failUnless(tval.shape == n.data.shape) self.failUnless(tval.shape == n.data.shape)
...@@ -713,7 +726,7 @@ class T_transpose(unittest.TestCase): ...@@ -713,7 +726,7 @@ class T_transpose(unittest.TestCase):
n = as_tensor(numpy.ones(5)) n = as_tensor(numpy.ones(5))
t = transpose(n) t = transpose(n)
self.failUnless(t.owner.op == tensor._transpose_inplace) self.failUnless(t.owner.op == tensor._transpose_inplace)
f = function([n], [t]) f = function([n], t)
tval = f(n.data) tval = f(n.data)
self.failUnless(tval.shape == n.data.shape) self.failUnless(tval.shape == n.data.shape)
#test aliasing #test aliasing
...@@ -724,7 +737,7 @@ class T_transpose(unittest.TestCase): ...@@ -724,7 +737,7 @@ class T_transpose(unittest.TestCase):
n = as_tensor(numpy.ones((5,3))) n = as_tensor(numpy.ones((5,3)))
t = transpose(n) t = transpose(n)
self.failUnless(t.owner.op == tensor._transpose_inplace) self.failUnless(t.owner.op == tensor._transpose_inplace)
f = function([n], [t]) f = function([n], t)
tval = f(n.data) tval = f(n.data)
self.failUnless(tval.shape == (3,5)) self.failUnless(tval.shape == (3,5))
#test aliasing #test aliasing
...@@ -736,7 +749,7 @@ class T_transpose(unittest.TestCase): ...@@ -736,7 +749,7 @@ class T_transpose(unittest.TestCase):
n = as_tensor(numpy.ones((5,3,2))) n = as_tensor(numpy.ones((5,3,2)))
t = tensor._transpose_inplace(n) t = tensor._transpose_inplace(n)
self.failUnless(t.owner.op == tensor._transpose_inplace) self.failUnless(t.owner.op == tensor._transpose_inplace)
f = function([n], [t]) f = function([n], t)
tval = f(n.data) tval = f(n.data)
self.failUnless(tval.shape == (2,3,5)) self.failUnless(tval.shape == (2,3,5))
#test aliasing #test aliasing
...@@ -960,7 +973,7 @@ class T_Stack(unittest.TestCase): ...@@ -960,7 +973,7 @@ class T_Stack(unittest.TestCase):
class _test_comparison(unittest.TestCase): class _test_comparison(unittest.TestCase):
def test_gt(self): def test_gt(self):
x, y = fvector(), fvector() x, y = fvector(), fvector()
fn = function([x,y], [x > y]) fn = function([x,y], x > y)
l = numpy.asarray([0.,-1.,1.]) l = numpy.asarray([0.,-1.,1.])
r = numpy.asarray([0.,1.,-1.]) r = numpy.asarray([0.,1.,-1.])
v = fn(l, r) v = fn(l, r)
...@@ -968,7 +981,7 @@ class _test_comparison(unittest.TestCase): ...@@ -968,7 +981,7 @@ class _test_comparison(unittest.TestCase):
def test_lt(self): def test_lt(self):
x, y = fvector(), fvector() x, y = fvector(), fvector()
fn = function([x,y], [x < y]) fn = function([x,y], x < y)
l = numpy.asarray([0.,-1.,1.]) l = numpy.asarray([0.,-1.,1.])
r = numpy.asarray([0.,1.,-1.]) r = numpy.asarray([0.,1.,-1.])
v = fn(l, r) v = fn(l, r)
...@@ -976,7 +989,7 @@ class _test_comparison(unittest.TestCase): ...@@ -976,7 +989,7 @@ class _test_comparison(unittest.TestCase):
def test_le(self): def test_le(self):
x, y = fvector(), fvector() x, y = fvector(), fvector()
fn = function([x,y], [x <= y]) fn = function([x,y], x <= y)
l = numpy.asarray([0.,-1.,1.]) l = numpy.asarray([0.,-1.,1.])
r = numpy.asarray([0.,1.,-1.]) r = numpy.asarray([0.,1.,-1.])
v = fn(l, r) v = fn(l, r)
...@@ -984,7 +997,7 @@ class _test_comparison(unittest.TestCase): ...@@ -984,7 +997,7 @@ class _test_comparison(unittest.TestCase):
def test_ge(self): def test_ge(self):
x, y = fvector(), fvector() x, y = fvector(), fvector()
fn = function([x,y], [x >= y]) fn = function([x,y], x >= y)
l = numpy.asarray([0.,-1.,1.]) l = numpy.asarray([0.,-1.,1.])
r = numpy.asarray([0.,1.,-1.]) r = numpy.asarray([0.,1.,-1.])
v = fn(l, r) v = fn(l, r)
...@@ -992,7 +1005,7 @@ class _test_comparison(unittest.TestCase): ...@@ -992,7 +1005,7 @@ class _test_comparison(unittest.TestCase):
def test_eq(self): def test_eq(self):
x, y = fvector(), fvector() x, y = fvector(), fvector()
fn = function([x,y], [eq(x,y)]) fn = function([x,y], eq(x,y))
l = numpy.asarray([0.,-1.,1.]) l = numpy.asarray([0.,-1.,1.])
r = numpy.asarray([0.,1.,-1.]) r = numpy.asarray([0.,1.,-1.])
v = fn(l, r) v = fn(l, r)
...@@ -1000,7 +1013,7 @@ class _test_comparison(unittest.TestCase): ...@@ -1000,7 +1013,7 @@ class _test_comparison(unittest.TestCase):
def test_neq(self): def test_neq(self):
x, y = fvector(), fvector() x, y = fvector(), fvector()
fn = function([x,y], [neq(x, y)]) fn = function([x,y], neq(x, y))
l = numpy.asarray([0.,-1.,1.]) l = numpy.asarray([0.,-1.,1.])
r = numpy.asarray([0.,1.,-1.]) r = numpy.asarray([0.,1.,-1.])
v = fn(l, r) v = fn(l, r)
...@@ -1009,7 +1022,7 @@ class _test_comparison(unittest.TestCase): ...@@ -1009,7 +1022,7 @@ class _test_comparison(unittest.TestCase):
class _test_bitwise(unittest.TestCase): class _test_bitwise(unittest.TestCase):
def test_or(self): def test_or(self):
x, y = bvector(), bvector() x, y = bvector(), bvector()
fn = function([x,y], [x|y]) fn = function([x,y], x|y)
l = numpy.asarray([0,0,1,1], dtype = 'int8') l = numpy.asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8') r = numpy.asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r) v = fn(l, r)
...@@ -1017,10 +1030,10 @@ class _test_bitwise(unittest.TestCase): ...@@ -1017,10 +1030,10 @@ class _test_bitwise(unittest.TestCase):
def test_xor(self): def test_xor(self):
x, y = bvector(), bvector() x, y = bvector(), bvector()
fn = function([x,y], [x^y]) fn = function([x,y], x^y)
ix = x ix = x
ix ^= y ix ^= y
gn = function([x,y], [ix]) gn = function([x,y], ix)
l = numpy.asarray([0,0,1,1], dtype = 'int8') l = numpy.asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8') r = numpy.asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r) v = fn(l, r)
...@@ -1031,7 +1044,7 @@ class _test_bitwise(unittest.TestCase): ...@@ -1031,7 +1044,7 @@ class _test_bitwise(unittest.TestCase):
def test_and(self): def test_and(self):
x, y = bvector(), bvector() x, y = bvector(), bvector()
fn = function([x,y], [x&y]) fn = function([x,y], x&y)
l = numpy.asarray([0,0,1,1], dtype = 'int8') l = numpy.asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8') r = numpy.asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r) v = fn(l, r)
...@@ -1039,7 +1052,7 @@ class _test_bitwise(unittest.TestCase): ...@@ -1039,7 +1052,7 @@ class _test_bitwise(unittest.TestCase):
def test_inv(self): def test_inv(self):
x, y = bvector(), bvector() x, y = bvector(), bvector()
fn = function([x,y], [~x]) fn = function([x,y], ~x)
l = numpy.asarray([0,0,1,1], dtype = 'int8') l = numpy.asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8') r = numpy.asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r) v = fn(l, r)
...@@ -1058,7 +1071,7 @@ class T_add(unittest.TestCase): ...@@ -1058,7 +1071,7 @@ class T_add(unittest.TestCase):
("*", lambda x,y: x*y), ("*", lambda x,y: x*y),
("/", lambda x,y: x/y)) ("/", lambda x,y: x/y))
for s, fn in tests: for s, fn in tests:
f = function([a,b], [fn(a, b)], linker = 'c') f = function([a,b], fn(a, b), mode = compile.Mode(optimizer = None, linker = 'c'))
self.failUnless(numpy.all(fn(a.data, b.data) == f(a.data, b.data))) self.failUnless(numpy.all(fn(a.data, b.data) == f(a.data, b.data)))
def test_grad_scalar_l(self): def test_grad_scalar_l(self):
...@@ -1324,7 +1337,7 @@ class t_dot(unittest.TestCase): ...@@ -1324,7 +1337,7 @@ class t_dot(unittest.TestCase):
def not_aligned(self, x, y): def not_aligned(self, x, y):
z = dot(x,y) z = dot(x,y)
try: try:
tz = eval_outputs([z]) tz = eval_outputs([z], mode = compile.Mode(optimizer = None, linker = 'py'))
except ValueError, e: except ValueError, e:
self.failUnless(e[0].split()[1:4] == ['are', 'not', 'aligned'], e) self.failUnless(e[0].split()[1:4] == ['are', 'not', 'aligned'], e)
return return
...@@ -1370,7 +1383,7 @@ class t_gemm(unittest.TestCase): ...@@ -1370,7 +1383,7 @@ class t_gemm(unittest.TestCase):
z_orig = z.copy() z_orig = z.copy()
tz,ta,tx,ty,tb = [as_tensor(p).type() for p in z,a,x,y,b] tz,ta,tx,ty,tb = [as_tensor(p).type() for p in z,a,x,y,b]
f = function([tz,ta,tx,ty,tb], [gemm(tz,ta,tx,ty,tb)], linker=l) f = function([tz,ta,tx,ty,tb], gemm(tz,ta,tx,ty,tb), mode=compile.Mode(optimizer = None, linker = l))
new_z = f(z,a,x,y,b) new_z = f(z,a,x,y,b)
z_after = self._gemm(z_orig, a, x, y, b) z_after = self._gemm(z_orig, a, x, y, b)
...@@ -1492,7 +1505,7 @@ class t_gemm(unittest.TestCase): ...@@ -1492,7 +1505,7 @@ class t_gemm(unittest.TestCase):
tz,ta,tx,ty,tb = [value(p) for p in z,a,x,y,b] tz,ta,tx,ty,tb = [value(p) for p in z,a,x,y,b]
f = function([tz,ta,tx,ty,tb], [gemm(tz,ta,tx,ty,tb)], linker=l) f = function([tz,ta,tx,ty,tb], gemm(tz,ta,tx,ty,tb), mode = compile.Mode(optimizer = None, linker=l))
f(z, a, x, y, b) f(z, a, x, y, b)
self.failUnless(_approx_eq(z_after, z), (z_orig, z_after, z)) self.failUnless(_approx_eq(z_after, z), (z_orig, z_after, z))
f(z.T, a, y.T, x.T, b) f(z.T, a, y.T, x.T, b)
...@@ -1741,8 +1754,8 @@ class T_op_cache(unittest.TestCase): ...@@ -1741,8 +1754,8 @@ class T_op_cache(unittest.TestCase):
v = matrix() v = matrix()
v.name = 'v' v.name = 'v'
gv = fill(v/v, 1.0)/v - (fill(v/v, 1.0) * v) / (v*v) gv = fill(v/v, 1.0)/v - (fill(v/v, 1.0) * v) / (v*v)
fn_py = function([v], [gv], linker = 'py') fn_py = function([v], gv, mode = compile.Mode(optimizer = None, linker = 'py'))
fn_c_or_py = function([v], [gv], linker = 'c|py') fn_c_or_py = function([v], gv, compile.Mode(optimizer = None, linker = 'c|py'))
a = numpy.random.rand(5,2) a = numpy.random.rand(5,2)
self.failUnless(numpy.all(fn_py(a) == fn_c_or_py(a))) self.failUnless(numpy.all(fn_py(a) == fn_c_or_py(a)))
......
...@@ -107,11 +107,11 @@ class _test_greedy_distribute(unittest.TestCase): ...@@ -107,11 +107,11 @@ class _test_greedy_distribute(unittest.TestCase):
a, b, c, d, x, y, z = matrices('abcdxyz') a, b, c, d, x, y, z = matrices('abcdxyz')
e = (a/z + b/x) * x * z e = (a/z + b/x) * x * z
g = Env([a,b,c,d,x,y,z], [e]) g = Env([a,b,c,d,x,y,z], [e])
print pprint.pp.process(g.outputs[0]) ##print pprint.pp.process(g.outputs[0])
mul_canonizer.optimize(g) mul_canonizer.optimize(g)
gof.TopoOptimizer(gof.LocalOptGroup(local_fill_cut, local_fill_lift), order = 'out_to_in').optimize(g) gof.TopoOptimizer(gof.LocalOptGroup(local_fill_cut, local_fill_lift), order = 'out_to_in').optimize(g)
gof.TopoOptimizer(gof.LocalOptGroup(local_greedy_distributor), order = 'out_to_in').optimize(g) gof.TopoOptimizer(gof.LocalOptGroup(local_greedy_distributor), order = 'out_to_in').optimize(g)
print pprint.pp.process(g.outputs[0]) ##print pprint.pp.process(g.outputs[0])
...@@ -131,10 +131,10 @@ class _test_canonize(unittest.TestCase): ...@@ -131,10 +131,10 @@ class _test_canonize(unittest.TestCase):
# e = x / y / x # e = x / y / x
e = (x / x) * (y / y) e = (x / x) * (y / y)
g = Env([x, y, z, a, b, c, d], [e]) g = Env([x, y, z, a, b, c, d], [e])
print pprint.pp.process(g.outputs[0]) ##print pprint.pp.process(g.outputs[0])
mul_canonizer.optimize(g) mul_canonizer.optimize(g)
gof.TopoOptimizer(gof.LocalOptGroup(local_fill_cut, local_fill_lift), order = 'out_to_in').optimize(g) gof.TopoOptimizer(gof.LocalOptGroup(local_fill_cut, local_fill_lift), order = 'out_to_in').optimize(g)
print pprint.pp.process(g.outputs[0]) ##print pprint.pp.process(g.outputs[0])
# def test_plusmin(self): # def test_plusmin(self):
# x, y, z = inputs() # x, y, z = inputs()
......
## TODO: REDO THESE TESTS
import unittest import unittest
from tensor_random import * from tensor_random import *
...@@ -7,7 +9,7 @@ import compile ...@@ -7,7 +9,7 @@ import compile
def Uniform(s, n): def Uniform(s, n):
return NumpyGenerator(s, n, numpy.random.RandomState.uniform) return NumpyGenerator(s, n, numpy.random.RandomState.uniform)
class T_Random(unittest.TestCase): class T_Random:#(unittest.TestCase):
def test0(self): def test0(self):
rng = Uniform(12345, 2) rng = Uniform(12345, 2)
......
"""Convenient driver of graph construction, optimization, and linking.""" """Convenient driver of graph construction, optimization, and linking."""
import numpy import numpy
import gof import gof
import sys import sys
from copy import copy from copy import copy
import tensor_opt
#TODO: put together some default optimizations (TRAC #67)
def exec_py_opt(inputs, outputs, features=[]):
"""Return an optimized graph running purely python implementations"""
return Function(intputs, outputs, features, exec_py_opt.optimizer, gof.link.PerformLinker(), False)
exec_py_opt.optimizer = None
def exec_opt(inputs, outputs, features=[]):
"""Return a fast implementation"""
return Function(intputs, outputs, features, exec_opt.optimizer, gof.link.PerformLinker(), False)
exec_opt.optimizer = None
class _DefaultOptimizer(object):
#const = gof.opt.ConstantFinder()
merge = gof.opt.MergeOptimizer()
def __call__(self, env):
#self.const(env)
self.merge(env)
default_optimizer = _DefaultOptimizer()
def _mark_indestructible(results):
for r in results:
r.tag.indestructible = True
# def linker_cls_python_and_c(env, **kwargs):
# """Use this as the linker_cls argument to Function.__init__ to compare
# python and C implementations"""
def check_equal(x, y): def check_equal(x, y):
"""
Returns True iff x[0] and y[0] are equal (checks the dtype and
shape if x and y are numpy.ndarray instances). Used internally.
"""
x, y = x[0], y[0] x, y = x[0], y[0]
if isinstance(x, numpy.ndarray) or isinstance(y, numpy.ndarray): if isinstance(x, numpy.ndarray) or isinstance(y, numpy.ndarray):
if x.dtype != y.dtype or x.shape != y.shape or numpy.any(abs(x - y) > 1e-10): if x.dtype != y.dtype or x.shape != y.shape or numpy.any(abs(x - y) > 1e-10):
raise Exception("Output mismatch.", {'performlinker': x, 'clinker': y}) raise Exception("Output mismatch.", {'performlinker': x, 'clinker': y})
else: else:
if x != y: if x != y:
raise Exception("Output mismatch.", {'performlinker': x, 'clinker': y}) raise Exception("Output mismatch.", {'performlinker': x, 'clinker': y})
# return gof.DualLinker(checker, **kwargs).accept(env)
def infer_reuse_pattern(env, outputs_to_disown): def infer_reuse_pattern(env, outputs_to_disown):
"""
Given an env and a list of results, returns the list of all
results which may share the same underlying data storage as any of
the specified results. Used internally by function, FunctionMaker.
"""
do_not_reuse = list() do_not_reuse = list()
seen = set() seen = set()
def walk(r): def walk(r):
...@@ -64,32 +45,9 @@ def infer_reuse_pattern(env, outputs_to_disown): ...@@ -64,32 +45,9 @@ def infer_reuse_pattern(env, outputs_to_disown):
walk(output) walk(output)
return do_not_reuse return do_not_reuse
# If a string is passed as the linker argument in the constructor for
def cloned_env(inputs, outputs): # Mode, it will be used as the key to retrieve the real linker in this
inputs, outputs = gof.graph.clone(inputs, outputs) # dictionary
env = gof.env.Env(inputs, outputs)
return env
def std_env(inputs, outputs, disown_inputs = False,
use_destroy_handler = True):
inputs, outputs = gof.graph.clone(inputs, outputs)
_mark_indestructible(outputs)
env = gof.env.Env(inputs, outputs)
if use_destroy_handler:
env.extend(gof.DestroyHandler())
env.extend(gof.ReplaceValidate())
env.validate()
for input in inputs:
input.destroyed_by_user = use_destroy_handler and len(env.destroyers(input)) != 0
if not input.destroyed_by_user and not disown_inputs:
# prevent optimizations from destroying the inputs
input.tag.indestructible = True
return env
def std_opt(env):
pass
predefined_linkers = { predefined_linkers = {
'py' : gof.PerformLinker(), 'py' : gof.PerformLinker(),
'c' : gof.CLinker(), 'c' : gof.CLinker(),
...@@ -97,84 +55,779 @@ predefined_linkers = { ...@@ -97,84 +55,779 @@ predefined_linkers = {
'c&py' : gof.DualLinker(checker = check_equal) 'c&py' : gof.DualLinker(checker = check_equal)
} }
class FunctionFactory: default_linker = 'c|py'
def __init__(self, inputs, outputs, linker = 'py', optimizer = std_opt, borrow_outputs = False, disown_inputs = False,
use_destroy_handler = True): # If a string is passed as the optimizer argument in the constructor
if len(inputs) != len(set(inputs)): # for Mode, it will be used as the key to retrieve the real optimizer
print >>sys.stderr, "Warning: duplicate inputs" # in this dictionary
for r in list(inputs) + list(outputs): predefined_optimizers = {
if not isinstance(r, gof.Result): None : lambda env: None,
raise TypeError("All inputs and outputs to FunctionFactory should be Result instances. Received:", type(r), r) 'merge' : gof.MergeOptimizer(),
env = std_env(inputs, outputs, disown_inputs = disown_inputs, 'math' : gof.MergeOptMerge(
use_destroy_handler = use_destroy_handler) gof.PureThenInplaceOptimizer(tensor_opt.math_optimizer,
if None is not optimizer: tensor_opt.inplace_optimizer))
optimizer(env) }
env.validate()
default_optimizer = 'merge'
class Mode(object):
"""
The Mode represents a way to optimize and then link a computation
graph.
* optimizer -> a structure of type Optimizer. An Optimizer may
simplify the math, put similar computations together, improve
numerical stability and various other improvements.
* linker -> a structure of type Linker. A Linker decides which
implementations to use (C or Python, for example) and how to
string them together to perform the computation.
See predefined_linkers, predefined_optimizers and also
predefined_modes.
"""
def __init__(self, linker = default_linker, optimizer = default_optimizer):
self.__setstate__((linker, optimizer))
def __getstate__(self):
return (self.provided_linker, self.provided_optimizer)
def __setstate__(self, (linker, optimizer)):
self.provided_linker = linker
self.provided_optimizer = optimizer
if isinstance(linker, str) or linker is None:
linker = predefined_linkers[linker]
self.linker = linker
if isinstance(optimizer, str) or optimizer is None:
optimizer = predefined_optimizers[optimizer]
self.optimizer = optimizer
def __str__(self):
return "Mode(linker = %s, optimizer = %s)" % (self.provided_linker, self.provided_optimizer)
# If a string is passed as the mode argument in function or
# FunctionMaker, the Mode will be taken from this dictionary using the
# string as the key
predefined_modes = {
'SANITY_CHECK' : Mode('c&py', 'math'),
'FAST_COMPILE' : Mode('py', 'merge'),
'FAST_RUN' : Mode('c|py', 'math'),
'EXPENSIVE_OPTIMIZATIONS' : Mode('c|py', 'math'),
}
default_mode = 'FAST_RUN'
class SymbolicInput(object):
"""
Represents a symbolic input for use with function or FunctionMaker.
result: a Result instance.
This will be assigned a value before running the function,
not computed from its owner.
name: Any type. (If autoname=True, defaults to result.name).
If name is a valid Python identifier, this input can be set by kwarg, and its value
can be accessed by self.<name>.
update: Result instance (default: None)
value (see previous) will be replaced with this expression result after each function call.
If update is None, the update will be the default value of the input.
mutable: Bool (default: False if update is None, True if update is not None)
True: permit the compiled function to modify the python object being passed as the input
False: do not permit the compiled function to modify the python object being passed as the input.
strict: Bool (default: False)
True: means that the value you pass for this input must have exactly the right type
False: the value you pass for this input may be casted automatically to the proper type
autoname: Bool (default: True)
See the name option.
"""
def __init__(self, result, name=None, update=None, mutable=None, strict=False, autoname=True):
self.result = result
self.name = result.name if (autoname and name is None) else name
if self.name is not None and not isinstance(self.name, str):
raise TypeError("name must be a string! (got: %s)" % self.name)
self.update = update
self.mutable = mutable if (mutable is not None) else (update is not None)
self.strict = strict
def __str__(self):
if self.update:
return "In(%s -> %s)" % (self.result, self.update)
else:
return "In(%s)" % self.result
def __repr__(self):
return str(self)
class SymbolicInputKit(object):
"""
Represents a group ("kit") of SymbolicInputs. If fed into function or
FunctionMaker, only the inputs which are needed to compile the function
properly will be taken.
A SymbolicInputKit provides the distribute function in order to set or
initialize several inputs from a single value. Specialized Kits should
override it.
"""
def __init__(self, name):
if not isinstance(name, str):
raise TypeError('naem must be a string (got: %s)' % name)
self.name = name
self.sinputs = []
self.results = []
def add_input(self, sinput):
"""
Add a SymbolicInput to this SymbolicInputKit. It will be given the
next available index.
"""
self.sinputs.append(sinput)
self.results.append(sinput.result)
def distribute(self, value, indices, containers):
"""
Given a list of indices corresponding to SymbolicInputs in this kit
as well as a corresponding list of containers, initialize all the
containers using the provided value.
"""
raise NotImplementedError
def complete(self, inputs):
"""
Given inputs (a list of Result instances), checks through all
the SymbolicInputs in the kit and return a sorted list of
indices and a list of their corresponding SymbolicInputs such
that each of them represents some result in the inputs list.
Not all the provided inputs will have a corresponding
SymbolicInput in the kit.
"""
ret = []
for input in inputs:
try:
i = self.results.index(input)
ret.append((i, self.sinputs[i]))
except ValueError:
pass
ret.sort()
return zip(*ret)
class In(SymbolicInput):
"""
Represents a symbolic input for use with function or FunctionMaker.
result: a Result instance.
This will be assigned a value before running the function,
not computed from its owner.
name: Any type. (If autoname=True, defaults to result.name).
If name is a valid Python identifier, this input can be set by kwarg, and its value
can be accessed by self.<name>.
value: Any type.
The initial/default value for this input. If update is None, this input acts just like
an argument with a default value in Python. If update is not None, changes to this
value will "stick around", whether due to an update or a user's explicit action.
update: Result instance (default: None)
value (see previous) will be replaced with this expression result after each function call.
If update is None, the update will be the default value of the input.
mutable: Bool (default: False if update is None, True if update is not None)
True: permit the compiled function to modify the python object being passed as the input
False: do not permit the compiled function to modify the python object being passed as the input.
strict: Bool (default: False)
True: means that the value you pass for this input must have exactly the right type
False: the value you pass for this input may be casted automatically to the proper type
autoname: Bool (default: True)
See the name option.
"""
def __init__(self, result, name=None, value=None, update=None, mutable=None, strict=False, autoname=True):
super(In, self).__init__(result, name, update, mutable, strict, autoname)
self.value = value
class SymbolicOutput(object):
"""
Represents a symbolic output for use with function or FunctionMaker.
borrow: set this to True to indicate that a reference to
function's internal storage may be returned. A value
returned for this output might be clobbered by running
the function again, but the function might be faster.
"""
def __init__(self, result, borrow=False):
self.result = result
self.borrow = borrow
Out = SymbolicOutput
class Supervisor:
"""
Listener for Env events which makes sure that no operation overwrites the
contents of protected Results. The outputs of the Env are protected by default.
"""
def __init__(self, protected):
self.protected = list(protected)
def validate(self, env):
if not hasattr(env, 'destroyers'):
return True
for r in self.protected + list(env.outputs):
if env.destroyers(r):
raise gof.InconsistencyError("Trying to destroy a protected Result.")
def std_env(input_specs, output_specs, accept_inplace = False):
"""
Makes an Env corresponding to the input specs and the output
specs. Any SymbolicInput in the input_specs, if its update field
is not None, will add an output to the Env corresponding to that
update. The return value is the Env as well as a list of
SymbolicOutput instances corresponding to the updates.
If accept_inplace is False, the graph will be checked for inplace
operations and an exception will be raised if it has any. If
accept_inplace is True, a DestroyHandler will be added to the Env
if there are any inplace operations.
The returned Env is a clone of the graph between the provided
inputs and outputs.
"""
orig_inputs = [spec.result for spec in input_specs]
updates = [spec.update for spec in input_specs if spec.update]
orig_outputs = [spec.result for spec in output_specs] + updates
inputs, outputs = gof.graph.clone(orig_inputs, orig_outputs)
env = gof.env.Env(inputs, outputs)
for node in env.nodes:
if getattr(node.op, 'destroy_map', None):
if not accept_inplace:
raise TypeError("Graph must not contain inplace operations", node)
else:
env.extend(gof.DestroyHandler())
break
# We need to protect all immutable inputs from inplace operations.
env.extend(Supervisor(input for spec, input in zip(input_specs, inputs) if not spec.mutable))
return env, map(SymbolicOutput, updates)
class FunctionMaker(object):
@staticmethod
def wrap_in(input):
if isinstance(input, (SymbolicInput, SymbolicInputKit)):
return input
elif isinstance(input, gof.Result):
# r -> SymbolicInput(result=r)
return SymbolicInput(input)
elif isinstance(input, (list, tuple)):
# (r, u) -> SymbolicInput(result=r, update=u)
if len(input) == 2:
return SymbolicInput(input[0], update = input[1])
else:
raise TypeError("Expected two elements in the list or tuple.", input)
else:
raise TypeError("Unknown input type:", type(input), input)
@staticmethod
def expand_in(sinput, rinputs):
# For SymbolicInputKits, this extracts a list of SymbolicInput instances
# and corresponding indices such that these SymbolicInputs are representative
# of some of the Result instances in inputs.
# For SymbolicInput, this returns None as the list of indices and a list with
# just the SymbolicInput.
if isinstance(sinput, SymbolicInputKit):
return sinput.complete(rinputs)
elif isinstance(sinput, SymbolicInput):
return [None, [sinput]]
@staticmethod
def wrap_out(output):
if isinstance(output, SymbolicOutput):
return output
elif isinstance(output, gof.Result):
return SymbolicOutput(output)
else:
raise TypeError("Unknown output type:", type(output), output)
def __init__(self, inputs, outputs, mode = 'FAST_RUN', accept_inplace = False):
"""
Create a FunctionMaker for the specified inputs, outputs and mode.
@param inputs: a list of SymbolicInput instances
@param outputs: a list of SymbolicOutput instances
outputs may also be a single Result (not a list), in which
case the functions produced by FunctionMaker will return
their output value directly
@param mode: a Mode instance telling FunctionMaker how to optimize and link
@param accept_inplace: True iff it is acceptable to have inplace operations
in the graph from the inputs to the outputs
"""
# Handle the case where inputs and/or outputs is a single Result (not in a list)
unpack_single = False
if not isinstance(outputs, (list, tuple)):
unpack_single = True
outputs = [outputs]
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
# Wrap them in In or Out instances if needed.
inputs, outputs = map(self.wrap_in, inputs), map(self.wrap_out, outputs)
_inputs = gof.graph.inputs([o.result for o in outputs])
indices = [[input] + self.expand_in(input, _inputs) for input in inputs]
expanded_inputs = reduce(list.__add__, [list(z) for x, y, z in indices], [])
# make the env
env, additional_outputs = std_env(expanded_inputs, outputs, accept_inplace)
self.env = env self.env = env
linker = copy(predefined_linkers.get(linker, linker))
# Fetch the mode and then the optimizer and linker
mode = predefined_modes.get(mode, mode)
optimizer, linker = mode.optimizer, copy(mode.linker)
# optimize the env
optimizer(env)
# initialize the linker
if not hasattr(linker, 'accept'): if not hasattr(linker, 'accept'):
raise ValueError("'linker' parameter of FunctionFactory should be a Linker with an accept method " \ raise ValueError("'linker' parameter of FunctionFactory should be a Linker with an accept method " \
"or one of ['py', 'c', 'c|py', 'c&py']") "or one of %s" % predefined_linkers.keys())
if borrow_outputs:
no_borrow = [output for output, spec in zip(env.outputs, outputs+additional_outputs) if not spec.borrow]
if not no_borrow:
self.linker = linker.accept(env) self.linker = linker.accept(env)
else: else:
self.linker = linker.accept(env, no_recycling = infer_reuse_pattern(env, env.outputs)) self.linker = linker.accept(env, no_recycling = infer_reuse_pattern(env, no_borrow))
self.indices = indices
def create(self, profiler = None, unpack_single = True, strict = 'if_destroyed'): self.inputs = inputs
if strict not in [True, False, 'if_destroyed']: self.expanded_inputs = expanded_inputs
raise ValueError("'strict' parameter of create should be one of [True, False, 'if_destroyed']") self.outputs = outputs
if profiler is None: self.unpack_single = unpack_single
fn = self.linker.make_function(unpack_single=unpack_single) self.mode = mode
else: self.accept_inplace = accept_inplace
fn = self.linker.make_function(unpack_single=unpack_single,
profiler=profiler) def create(self, defaults = None, trustme = False):
for env_input, fn_input in zip(self.env.inputs, fn.inputs): """
if strict is True or (env_input.destroyed_by_user and strict == 'if_destroyed'): Create a function.
fn_input.strict = True
defaults -> a list matching the inputs list and providing default values
if the default for an input is None, then that input is a
required input. For an input with an update, the default
acts as initialization.
trustme -> disables some exceptions, used internally
"""
if defaults is None:
defaults = [None]*len(self.inputs)
input_storage = [] # list of independent one-element lists, will be passed to the linker
_defaults = []
# The following loop is to fill in the input_storage and _defaults lists.
for (input, indices, subinputs), default in zip(self.indices, defaults):
__default = default
# If the default is a gof.Container, this means we want to share
# the same storage. This is done by appending default.storage
# to input_storage
if isinstance(default, gof.Container):
if indices is not None:
raise TypeError("Cannot take a Container instance as default for a SymbolicInputKit.")
input_storage.append(default.storage)
default = None
# If the input is a SymbolicInputKit, it represents more than
# one storage unit. The indices and subinputs lists represent which
# of the kit's inputs are active in this graph, so we make as many
# storage units as needed
elif isinstance(input, SymbolicInputKit):
input_storage += [[None] for i in indices]
# Normal case: one new, independent storage unit
else:
input_storage.append([None])
# Filling _defaults. Each entry is a tuple of three elements:
# (required, refeed, value)
# - required means that the user must provide a value when calling the function
# - refeed means that we want to put the default back in the storage after each function call
# - value is the value that will be put in the storage initially
# Even though a SymbolicInputKit represents more than one input,
# we still only have one entry for the defaults list.
if isinstance(input, SymbolicInputKit):
if default is None:
_defaults.append((True, True, None))
else:
_defaults.append((False, False, default))
elif input.update is not None:
# If the input has an update, then (logically) it is not required since
# it is just a parameter and of course we don't want to refeed the default
# back into the storage as it would defeat the point of updating it. We
# always do this policy.
if default is None:
if trustme or isinstance(__default, gof.Container):
_defaults.append((False, False, default))
else:
# This might catch some bugs early
raise ValueError("A default (initial) value is required for an input which can update itself.", input)
else:
_defaults.append((False, False, default))
else:
if default is None:
# No default, so this is a required input. Nothing to feed back, initial value is None.
_defaults.append((True, False, None))
else:
# Default value. It is not required, but we want to put it back into the storage
# everytime so it behaves like most programming languages' default values
_defaults.append((False, True, default))
defaults = _defaults
# Get a function instance
_fn, _i, _o = self.linker.make_thunk(input_storage = input_storage)
fn = Function(_fn, _i, _o, self.indices, self.outputs, defaults, self.unpack_single, self)
return fn return fn
def partial(self, *first, **kwargs):
fn = self.create(**kwargs) import copy_reg
return lambda *last: fn(*(first + last)) import cPickle
def _pickle_FunctionMaker(fm):
def function(inputs, return (_constructor_FunctionMaker, (fm.inputs, fm.outputs, fm.mode, fm.accept_inplace))
outputs,
linker = 'py', def _constructor_FunctionMaker(*args):
optimizer = std_opt, return FunctionMaker(*args)
borrow_outputs = False,
disown_inputs = False, copy_reg.pickle(FunctionMaker, _pickle_FunctionMaker)
profiler = None,
unpack_single = True,
strict = 'if_destroyed', def _pickle_slice(s):
use_destroy_handler = True): return (slice, (s.start, s.stop, s.step))
ff = FunctionFactory(inputs,
outputs, copy_reg.pickle(slice, _pickle_slice)
linker = linker,
optimizer = optimizer,
borrow_outputs = borrow_outputs,
disown_inputs = disown_inputs, from functools import partial
use_destroy_handler = use_destroy_handler)
return ff.create(profiler = profiler,
unpack_single = unpack_single, DUPLICATE = ['DUPLICATE'] # unique id object used as a placeholder for duplicate entries
strict = strict) class Function(object):
"""
Type of the functions returned by theano.function or theano.FunctionMaker.create.
def eval_outputs(outputs, **kwargs): """
return function([], outputs, **kwargs)()
def __init__(self, fn, input_storage, output_storage, indices, outputs, defaults, unpack_single, maker):
"""
_fcache = {} # it would be nice to use weakref.WeakKeyDictionary() fn -> a function returned by some linker's make_thunk method
input_storage -> list of Container instances used by fn to fetch the inputs
def fast_compute(*outputs): output_storage -> list of Container instances used by fn to store the outputs in
if outputs in _fcache: indices -> list of (SymbolicInput|SymbolicInputKit, indices, [SymbolicInput,...]), one tuple for each input
f = _fcache[outputs] defaults -> list of (required (bool), refeed (bool), value), one tuple for each input
else: required -> whether this input is required or optional
f = function([], outputs, linker = 'c') refeed -> whether this input's contents must be reverted to value after each call or not
_fcache[outputs] = f value -> the initial or default value of the input
return f() unpack_single -> if the function has one output and unpack_single is True, return that output. Else,
return [output].
maker -> FunctionMaker instance used to make this Function (used for copy)
"""
self.fn = fn
self.input_storage = input_storage
self.output_storage = output_storage
self.indices = indices
containers = list(self.input_storage)
finder = {}
inv_finder = {}
def distribute(indices, cs, value):
input.distribute(value, indices, cs)
for c in cs:
c.provided += 1
def set(c, v):
c.data = v
setters = []
# Initialize the storage
for i, ((input, indices, sinputs), (required, refeed, value)) in enumerate(zip(self.indices, defaults)):
if indices is None: # this is true iff input is not a SymbolicInputKit
c = containers[0]
if input.strict:
c.strict = True
if value is not None:
# always initialize the storage
c.data = value
c.required = required
c.provided = 0 # this is a count of how many times the input has been provided (reinitialized to 0 on __call__)
# We set an entry in finder for:
# - the index of the input
# - the result instance the input is based on
# - the name of the input
# All entries map to the container or to DUPLICATE if an ambiguity is detected
finder[i] = c
finder[input.result] = c
finder[input.name] = c if input.name not in finder else DUPLICATE
# inv_finder maps the container to the input (useful for one error message)
inv_finder[c] = input
setters.append(partial(set, c))
containers[:1] = []
else:
# The input is a SymbolicInputKit, so we take as many containers as the Kit provides inputs
cs = containers[:len(indices)]
# distribute does the initialization of the containers
input.distribute(value, indices, cs)
f = partial(distribute, indices, cs)
# Like before, we set a finder entry for the kit. Note that
# we are not mapping to a container but to a function which
# can reinitialize all the containers
finder[i] = f
finder[input] = f
finder[input.name] = f if input.name not in finder else DUPLICATE
setters.append(f)
# For each input in the kit and its corresponding container, we put an entry in finder.
# This allows the user to micro-manage elements of the kit if need be.
# All containers inherit the required field and have their own "provided" counter
for c, sin in zip(cs, sinputs):
finder[sin.result] = c
finder[sin.name] = c
finder[sin.name] = c if sin.name not in finder else DUPLICATE
inv_finder[c] = input
c.required = required
c.provided = 0
containers[:len(indices)] = []
self.finder = finder
self.inv_finder = inv_finder
self.outputs = outputs
self.defaults = defaults
self.unpack_single = unpack_single
self.maker = maker
# this class is important in overriding the square-bracket notation:
# fn.value[x]
# self reference is available via the closure on the class
class ValueAttribute(object):
def __getitem__(self, item):
try:
s = finder[item]
except KeyError:
raise TypeError("Unknown input or state: %s" % item)
if s is DUPLICATE:
raise TypeError("Ambiguous name: %s - please check the names of the inputs of your function for duplicates." % item)
if isinstance(s, gof.Container):
return s.value
else:
raise NotImplementedError
def __setitem__(self, item, value):
try:
s = finder[item]
except KeyError:
raise TypeError("Unknown input or state: %s" % item)
if s is DUPLICATE:
raise TypeError("Ambiguous name: %s - please check the names of the inputs of your function for duplicates." % item)
if isinstance(s, gof.Container):
s.value = value
s.provided += 1
else:
s(value)
# this class is important in overriding the square-bracket notation:
# fn.container[x]
# self reference is available via the closure on the class
class ContainerAttribute(object):
def __getitem__(self, item):
return finder[item]
# You cannot set the container
self._value = ValueAttribute()
self._container = ContainerAttribute()
def __getitem__(self, item):
return self.value[item]
def __setitem__(self, item, value):
self.value[item] = value
def __copy__(self):
defaults = [default for _1, _2, default in self.defaults]
cpy = self.maker.create(defaults, trustme = True)
for (input,_1,_2), here, there in zip(self.indices, self.input_storage, cpy.input_storage):
if input.mutable and here is not None:
there.data = copy(here.data)
else:
there.data = here.data
return cpy
def __call__(self, *args, **kwargs):
# Reinitialize each container's 'provided' counter
for c in self.input_storage:
c.provided = 0
# Set positional arguments
for i, arg in enumerate(args):
self[i] = arg
# Set keyword arguments
for k, arg in kwargs.iteritems():
self[k] = arg
# Check if inputs are missing or if inputs were set more than once
for c in self.input_storage:
if c.required and not c.provided:
raise TypeError("Missing required input: %s" % self.inv_finder[c].result)
if c.provided > 1:
raise TypeError("Multiple values for input: %s" % self.inv_finder[c].result)
# Do the actual work
self.fn()
outputs = [x.data for x in self.output_storage]
# Update the inputs that have an update function
for input, storage in reversed(zip(self.maker.expanded_inputs, self.input_storage)):
if input.update:
storage.data = outputs.pop()
# Put default values back in the storage
for i, (required, refeed, value) in enumerate(self.defaults):
if refeed:
self[i] = value
if self.unpack_single and len(outputs) == 1:
return outputs[0]
else:
return outputs
value = property(
lambda self: self._value,
None, #not settable
doc="""TODOC""")
container = property(
lambda self: self._container,
None,
doc="""TODOC""")
def _pickle_Function(f):
ins = list(f.input_storage)
defaults = []
for (input, indices, inputs), (required, refeed, default) in zip(f.indices, f.defaults):
if isinstance(input, SymbolicInputKit):
defaults.append(default)
ins[:len(indices)] = []
else:
defaults.append(ins[0])
del ins[0]
return (_constructor_Function, (f.maker, defaults, [x.data for x in f.input_storage]))
def _constructor_Function(maker, defaults, data):
f = maker.create(defaults, trustme = True)
for container, x in zip(f.input_storage, data):
container.data = x
return f
copy_reg.pickle(Function, _pickle_Function)
def function(inputs, outputs, mode='FAST_RUN', accept_inplace = False):
"""
Return a function calculating the outputs from the inputs.
inputs -> list of SymbolicInput or In instances
outputs -> a SymbolicOutput or a list of SymbolicOutput or Out instances
The return value of the returned function will match the format of this
argument (either the value itself or a list of one or more return values)
mode -> a descriptive string or a Mode instance; descriptive strings can be one of:
* SANITY_CHECK
* FAST_COMPILE
* FAST_RUN (default)
* EXPENSIVE_OPTIMIZATION
accept_inplace -> True iff the graph can contain inplace operations
prior to the optimization phase (default is False)
Every element of the input list will be upgraded to an In instance if necessary,
using the following rules:
* a Result instance r will be upgraded like In(r)
* a tuple (name, r) will be In(r, name=name)
* a tuple (r, val) will be In(r, value=value, autoname=True)
* a tuple ((r,up), val) will be In(r, value=value, update=up, autoname=True)
* a tuple (name, r, val) will be In(r, name=name, value=value)
* a tuple (name, (r,up), val) will be In(r, name=name, value=val, update=up, autoname=True)
Similarly, every element of the output list will be upgraded to an
Out instance if necessary:
* a Result instance r will be upgraded like Out(r)
"""
def wrap_in(input):
if isinstance(input, (SymbolicInput, SymbolicInputKit)):
return input
elif isinstance(input, gof.Result):
return In(input)
elif isinstance(input, (list, tuple)):
orig = input
if not input:
raise TypeError("Nonsensical input specification: %s" % input)
if isinstance(input[0], str):
name = input[0]
input = input[1:]
else:
name = None
if isinstance(input[0], (list, tuple)):
if len(input[0]) != 2 or len(input) != 2:
raise TypeError("Invalid input syntax: %s (check documentation or use an In instance)" % orig)
(result, update), value = input
elif isinstance(input[0], gof.Result):
if len(input) == 1:
result, update, value = input[0], None, None
elif len(input) == 2:
(result, value), update = input, None
else:
raise TypeError("Invalid input syntax: %s (check documentation or use an In instance)" % orig)
elif isinstance(input[0], (SymbolicInput, SymbolicInputKit)):
if len(input) == 1:
return input[0]
elif len(input) == 2:
input, value = input
if name is not None: input.name = name
input.value = value
return input
return In(result, name=name, value=value, update=update)
else:
raise TypeError("Unknown input type:", type(input), input)
def wrap_out(output):
if isinstance(output, SymbolicOutput):
return output
elif isinstance(output, gof.Result):
return SymbolicOutput(output)
else:
raise TypeError("Unknown output type: %s (%s)" % (type(output), output))
inputs = map(wrap_in, inputs)
outputs = map(wrap_out, outputs) if isinstance(outputs, (list, tuple)) else wrap_out(outputs)
fn = FunctionMaker(inputs, outputs, mode, accept_inplace = accept_inplace).create([getattr(input, 'value', None) for input in inputs])
return fn
...@@ -210,10 +863,6 @@ class OpFromGraph(gof.Op): ...@@ -210,10 +863,6 @@ class OpFromGraph(gof.Op):
""" """
def __init__(self, inputs, outputs, grad_depth = 1, **kwargs): def __init__(self, inputs, outputs, grad_depth = 1, **kwargs):
if kwargs.get('borrow_outputs') or kwargs.get('unpack_single'):
raise ValueError('The borrow_outputs and unpack_single options cannot be True')
kwargs['unpack_single'] = False
kwargs['borrow_outputs'] = False
self.fn = function(inputs, outputs, **kwargs) self.fn = function(inputs, outputs, **kwargs)
self.inputs = inputs self.inputs = inputs
self.outputs = outputs self.outputs = outputs
...@@ -252,263 +901,3 @@ class OpFromGraph(gof.Op): ...@@ -252,263 +901,3 @@ class OpFromGraph(gof.Op):
else: else:
raise NotImplementedError raise NotImplementedError
# class State:
# def __init__(self, init, next = None):
# self.init = init
# self.next = next
# class StateFunctionFactory(Function):
# def __init__(self, inputs, outputs, states, **kwargs):
# states_
# inputs = [state.init for state in states] + inputs
# outputs = [state.next for ]
# class Function:
# """
# An 'executable' compiled from a graph
# This class is meant to be used as a function: the idea is to use
# __call__(*args) and it will compute your graph's function on the args and
# return the value(s) corresponding to the output(s).
# @ivar fn: the return value of L{linker.make_function}(False)
# Additional Attributes if keep_locals == True
# inputs - inputs in the env
# outputs - outputs in the env
# features - features to add to the env
# linker_cls - the linker class
# linker - the linker allocated from env
# env - The env passed to the linker
# @note: B{Re: Memory ownership, aliasing, re-use:}
# That the objects returned by L{Function.__call__}(self, *args) are owned
# by self, and that in general these outputs might be overwritten (in-place)
# by subsequent calls to L{self.__call__}(*args). Why? This behaviour is
# necessary for inplace operations to work, and L{Function}'s linker might re-use
# memory from one execution to the next in order to make each execution faster.
# """
# def __init__(self, inputs, outputs,
# features = [],
# optimizer = default_optimizer,
# linker_cls = gof.link.PerformLinker,
# profiler = None,
# unpack_single = True,
# except_unreachable_input = True,
# keep_locals = True):
# """
# Copy the graph, optimize, and link it.
# @param inputs: a list of results to be this function's inputs
# @param outputs: a list of results to be this function's outputs
# @param features: features to add to the env
# @param optimizer: an optimizer to apply to the copied graph, before linking
# @param linker_cls: a callable that takes an env and returns a Linker
# @param profiler: a L{Profiler} for the produced function (only valid if the
# linker_cls's make_function takes a profiler argument)
# @param unpack_single: unpack return value lists of length 1. @see: L{Linker.make_function}
# @param keep_locals: add the local variables from __init__ to the class
# """
# _mark_indestructible(outputs)
# if len(inputs) != len(set(inputs)):
# raise Exception('duplicate inputs')
# if len(outputs) != len(set(outputs)):
# raise Exception('duplicate outputs')
# #evaluate the orphans, and put these values into the clone of the env
# orphans = list(gof.graph.results_and_orphans(inputs, outputs,
# except_unreachable_input=except_unreachable_input)[1])
# orphan_data = eval_outputs(orphans, unpack_single=False)
# #print 'orphans', orphans
# #print 'ops', gof.graph.ops(inputs, outputs)
# env = gof.env.Env(inputs, outputs)
# #print 'orphans in env', env.orphans()
# env, equiv = env.clone_get_equiv(clone_inputs=True)
# for feature in features:
# env.extend(feature(env))
# env.extend(gof.DestroyHandler(env))
# #print 'orphans after clone', env.orphans()
# for d, o in zip(orphan_data, [equiv[orphan] for orphan in orphans]):
# #print 'assigning orphan value', d
# #o.data = d
# new_o = gof.Constant(o.type, d)
# env.replace(o, new_o)
# assert new_o in env.orphans
# # optimize and link the cloned env
# if None is not optimizer:
# optimizer(env)
# linker = linker_cls(env)
# if keep_locals:# useful flag for debugging!
# self.__dict__.update(locals())
# if profiler is None:
# self.fn = linker.make_function(unpack_single=unpack_single)
# else:
# self.fn = linker.make_function(unpack_single=unpack_single,
# profiler=profiler)
# self.inputs = env.inputs
# self.outputs = env.outputs
# self.features = features
# self.optimizer = optimizer
# self.linker_cls = linker_cls
# self.profiler = profiler
# self.unpack_single = unpack_single
# self.except_unreachable_input = except_unreachable_input
# self.keep_locals = keep_locals
# def __call__(self, *args):
# return self.fn(*args)
# def eval_outputs(outputs,
# features = [],
# optimizer = None,
# linker_cls = gof.link.PerformLinker,
# unpack_single = True,
# keep_locals = True):
# if len(outputs) == 0:
# #print 'returning with no inputs'
# if unpack_single:
# return None
# else:
# return []
# inputs = gof.graph.inputs(outputs)
# if any(not isinstance(input, gof.Constant) for input in inputs):
# raise TypeError("Cannot evaluate outputs because some of the leaves are not Constant.", outputs)
# in_data = [i.data for i in inputs]
# #print 'in_data = ', in_data
# if len(inputs) != len(in_data):
# raise Exception('some input data is unknown')
# env = gof.env.Env(inputs, outputs)
# env.replace_all(dict([(i, i.type()) for i in inputs]))
# env = env.clone(clone_inputs=True)
# _mark_indestructible(env.outputs)
# if None is not optimizer:
# optimizer(env)
# linker = linker_cls(env)
# fn = linker.make_function(unpack_single=unpack_single)
# rval = fn(*in_data)
# return rval
# StateFunction([x, y], [e], (w, w + lr * bla()))
# class _Function:
# def __init__(self,
# inputs,
# outputs,
# optimizer,
# linker_type = 'py',
# unpack_single = True,
# except_unreachable_input = True,
# disposable_inputs = [],
# borrow_outputs = []):
# _mark_indestructible(outputs)
# if len(inputs) != len(set(inputs)):
# raise Exception('duplicate inputs')
# if len(outputs) != len(set(outputs)):
# raise Exception('duplicate outputs')
# orphans = list(gof.graph.results_and_orphans(inputs, outputs,
# except_unreachable_input=except_unreachable_input)[1])
# orphan_data = eval_outputs(orphans, unpack_single=False)
# env = gof.env.Env(inputs, outputs, features + [gof.EquivTool], consistency_check = True)
# env = env.clone(clone_inputs=True)
# for d, o in zip(orphan_data, [env.equiv(orphan) for orphan in orphans]):
# o.data = d
# # optimize and link the cloned env
# if None is not optimizer:
# optimizer(env)
# linker = linker_cls(env)
# if keep_locals:# useful flag for debugging!
# self.__dict__.update(locals())
# if profiler is None:
# self.fn = linker.make_function(inplace=True,
# unpack_single=unpack_single)
# else:
# self.fn = linker.make_function(inplace=True,
# unpack_single=unpack_single,
# profiler=profiler)
# self.inputs = env.inputs
# self.outputs = env.outputs
# self.features = features
# self.optimizer = optimizer
# self.linker_cls = linker_cls
# self.profiler = profiler
# self.unpack_single = unpack_single
# self.except_unreachable_input = except_unreachable_input
# self.keep_locals = keep_locals
# def __call__(self, *args):
# return self.fn(*args)
# def __copy__(self):
# return Function(self.inputs, self.outputs,
# features = self.features,
# optimizer = self.optimizer,
# linker_cls = self.linker_cls,
# profiler = self.profiler,
# unpack_single = self.unpack_single,
# except_unreachable_input = self.except_unreachable_input,
# keep_locals = self.keep_locals)
# class StateFunction:
# def __init__(self, inputs, outputs, *states):
# in_states, out_states = zip(*states)
# env =
...@@ -7,6 +7,7 @@ import scalar ...@@ -7,6 +7,7 @@ import scalar
from scalar import Scalar from scalar import Scalar
import gof import gof
from gof.python25 import all from gof.python25 import all
from copy import copy
# tensor depends on elemwise to provide definitions for several ops # tensor depends on elemwise to provide definitions for several ops
...@@ -231,6 +232,15 @@ class Elemwise(Op): ...@@ -231,6 +232,15 @@ class Elemwise(Op):
else: else:
self.ufunc = None self.ufunc = None
def __getstate__(self):
d = copy(self.__dict__)
d.pop('ufunc')
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.ufunc = numpy.frompyfunc(self.scalar_op.impl, self.scalar_op.nin, self.scalar_op.nout)
def make_node(self, *inputs): def make_node(self, *inputs):
""" """
If the inputs have different number of dimensions, their shape If the inputs have different number of dimensions, their shape
......
...@@ -12,7 +12,7 @@ from graph import \ ...@@ -12,7 +12,7 @@ from graph import \
Apply, Result, Constant, Value, view_roots Apply, Result, Constant, Value, view_roots
from link import \ from link import \
Linker, LocalLinker, PerformLinker, WrapLinker, Profiler Container, Linker, LocalLinker, PerformLinker, WrapLinker, Profiler
from op import \ from op import \
Op Op
...@@ -22,7 +22,8 @@ from opt import \ ...@@ -22,7 +22,8 @@ from opt import \
MergeOptimizer, MergeOptMerge, \ MergeOptimizer, MergeOptMerge, \
LocalOptimizer, local_optimizer, LocalOptGroup, LocalOpKeyOptGroup, \ LocalOptimizer, local_optimizer, LocalOptGroup, LocalOpKeyOptGroup, \
OpSub, OpRemove, PatternSub, \ OpSub, OpRemove, PatternSub, \
NavigatorOptimizer, TopoOptimizer, OpKeyOptimizer NavigatorOptimizer, TopoOptimizer, OpKeyOptimizer, \
PureThenInplaceOptimizer
from toolbox import \ from toolbox import \
Bookkeeper, History, Validator, ReplaceValidate, NodeFinder, PrintListener Bookkeeper, History, Validator, ReplaceValidate, NodeFinder, PrintListener
......
...@@ -631,8 +631,8 @@ class CLinker(link.Linker): ...@@ -631,8 +631,8 @@ class CLinker(link.Linker):
input_storage, input_storage,
output_storage) output_storage)
return thunk, \ return thunk, \
[link.Filter(input, storage) for input, storage in zip(self.env.inputs, input_storage)], \ [link.Container(input, storage) for input, storage in zip(self.env.inputs, input_storage)], \
[link.Filter(output, storage, True) for output, storage in zip(self.env.outputs, output_storage)], \ [link.Container(output, storage, True) for output, storage in zip(self.env.outputs, output_storage)], \
error_storage error_storage
def make_thunk(self, input_storage = None, output_storage = None): def make_thunk(self, input_storage = None, output_storage = None):
...@@ -881,8 +881,8 @@ class OpWiseCLinker(link.LocalLinker): ...@@ -881,8 +881,8 @@ class OpWiseCLinker(link.LocalLinker):
f = link.streamline(env, thunks, order, no_recycling = no_recycling, profiler = profiler) f = link.streamline(env, thunks, order, no_recycling = no_recycling, profiler = profiler)
return f, [link.Filter(input, storage) for input, storage in zip(env.inputs, input_storage)], \ return f, [link.Container(input, storage) for input, storage in zip(env.inputs, input_storage)], \
[link.Filter(output, storage, True) for output, storage in zip(env.outputs, output_storage)], \ [link.Container(output, storage, True) for output, storage in zip(env.outputs, output_storage)], \
thunks, order thunks, order
...@@ -948,6 +948,7 @@ class DualLinker(link.Linker): ...@@ -948,6 +948,7 @@ class DualLinker(link.Linker):
no_recycling = self.no_recycling no_recycling = self.no_recycling
_f, i1, o1, thunks1, order1 = link.PerformLinker().accept(env, no_recycling = no_recycling).make_all(**kwargs) _f, i1, o1, thunks1, order1 = link.PerformLinker().accept(env, no_recycling = no_recycling).make_all(**kwargs)
kwargs.pop('input_storage', None)
_f, i2, o2, thunks2, order2 = OpWiseCLinker().accept(env, no_recycling = no_recycling).make_all(**kwargs) _f, i2, o2, thunks2, order2 = OpWiseCLinker().accept(env, no_recycling = no_recycling).make_all(**kwargs)
def f(): def f():
......
...@@ -184,7 +184,7 @@ class Result(utils.object2): ...@@ -184,7 +184,7 @@ class Result(utils.object2):
else: else:
return str(self.owner.op) + "." + str(self.index) return str(self.owner.op) + "." + str(self.index)
else: else:
return "<?>::" + str(self.type) return "<%s>" % str(self.type)
def __repr__(self): def __repr__(self):
return str(self) return str(self)
def clone(self): def clone(self):
...@@ -422,8 +422,6 @@ def clone_get_equiv(i, o, copy_inputs_and_orphans = True): ...@@ -422,8 +422,6 @@ def clone_get_equiv(i, o, copy_inputs_and_orphans = True):
else: else:
d[input] = input d[input] = input
for apply in io_toposort(i, o): for apply in io_toposort(i, o):
for input in apply.inputs: for input in apply.inputs:
if input not in d: if input not in d:
...@@ -438,6 +436,10 @@ def clone_get_equiv(i, o, copy_inputs_and_orphans = True): ...@@ -438,6 +436,10 @@ def clone_get_equiv(i, o, copy_inputs_and_orphans = True):
for output, new_output in zip(apply.outputs, new_apply.outputs): for output, new_output in zip(apply.outputs, new_apply.outputs):
d[output] = new_output d[output] = new_output
for output in o:
if output not in d:
d[output] = output.clone()
return d return d
def general_toposort(r_out, deps, debug_print = False): def general_toposort(r_out, deps, debug_print = False):
......
"""WRITEME""" """WRITEME"""
import utils import utils
import graph import graph
from type import Type
import sys, traceback import sys, traceback
from copy import copy from copy import copy
...@@ -109,27 +110,32 @@ class Linker(object): ...@@ -109,27 +110,32 @@ class Linker(object):
return execute return execute
class Filter(object): class Container(object):
"""WRITEME""" def __init__(self, r, storage, readonly = False, strict = False, name = None):
def __init__(self, r, storage, readonly = False, strict = False, trace = ()): #self.r = r
self.r = r if isinstance(r, Type):
self.type = r.type self.type = r
else:
self.type = r.type
self.name = name or r.name
self.storage = storage self.storage = storage
self.readonly = readonly self.readonly = readonly
self.strict = strict self.strict = strict
def __get(self): def __get(self):
return self.storage[0] return self.storage[0]
def __set(self, value): def __set(self, value):
if self.readonly:
raise Exception("Cannot set readonly storage: %s" % self.name)
try: try:
if self.readonly:
raise Exception("Cannot set readonly storage.")
if self.strict: if self.strict:
self.storage[0] = self.type.filter(value, strict = True) self.storage[0] = self.type.filter(value, strict = True)
else: else:
self.storage[0] = self.type.filter(value) self.storage[0] = self.type.filter(value)
except: except Exception, e:
raise_with_op(self.r) e.args = e.args + (self.name,)
raise
data = property(__get, __set) data = property(__get, __set)
value = property(__get, __set)
def __str__(self): def __str__(self):
return "<" + str(self.storage[0]) + ">" return "<" + str(self.storage[0]) + ">"
def __repr__(self): def __repr__(self):
...@@ -260,8 +266,8 @@ class PerformLinker(LocalLinker): ...@@ -260,8 +266,8 @@ class PerformLinker(LocalLinker):
f = streamline(env, thunks, order, no_recycling = no_recycling, profiler = profiler) f = streamline(env, thunks, order, no_recycling = no_recycling, profiler = profiler)
return f, [Filter(input, storage) for input, storage in zip(env.inputs, input_storage)], \ return f, [Container(input, storage) for input, storage in zip(env.inputs, input_storage)], \
[Filter(output, storage, True) for output, storage in zip(env.outputs, output_storage)], \ [Container(output, storage, True) for output, storage in zip(env.outputs, output_storage)], \
thunks, order thunks, order
...@@ -333,7 +339,9 @@ class WrapLinker(Linker): ...@@ -333,7 +339,9 @@ class WrapLinker(Linker):
def make_thunk(self, **kwargs): def make_thunk(self, **kwargs):
no_recycling = self.no_recycling no_recycling = self.no_recycling
make_all = [l.make_all(**kwargs) for l in self.linkers] make_all = [self.linkers[0].make_all(**kwargs)]
kwargs.pop('input_storage', None)
make_all += [l.make_all(**kwargs) for l in self.linkers[1:]]
fns, input_lists, output_lists, thunk_lists, order_lists \ fns, input_lists, output_lists, thunk_lists, order_lists \
= zip(*make_all) = zip(*make_all)
......
...@@ -12,6 +12,7 @@ import toolbox ...@@ -12,6 +12,7 @@ import toolbox
import op import op
from copy import copy from copy import copy
from collections import deque from collections import deque
import destroyhandler as dh
class Optimizer: class Optimizer:
...@@ -61,8 +62,7 @@ class FromFunctionOptimizer(Optimizer): ...@@ -61,8 +62,7 @@ class FromFunctionOptimizer(Optimizer):
def __init__(self, fn): def __init__(self, fn):
self.apply = fn self.apply = fn
def add_requirements(self, env): def add_requirements(self, env):
"""WRITEME""" env.extend(toolbox.ReplaceValidate())
env.extend(gof.toolbox.ReplaceValidate)
def optimizer(f): def optimizer(f):
"""WRITEME""" """WRITEME"""
...@@ -215,7 +215,7 @@ class FromFunctionLocalOptimizer(LocalOptimizer): ...@@ -215,7 +215,7 @@ class FromFunctionLocalOptimizer(LocalOptimizer):
def __init__(self, fn): def __init__(self, fn):
self.transform = fn self.transform = fn
def add_requirements(self, env): def add_requirements(self, env):
env.extend(gof.toolbox.ReplaceValidate) env.extend(toolbox.ReplaceValidate())
def local_optimizer(f): def local_optimizer(f):
"""WRITEME""" """WRITEME"""
...@@ -624,6 +624,21 @@ def check_chain(r, *chain): ...@@ -624,6 +624,21 @@ def check_chain(r, *chain):
############
### Misc ###
############
class PureThenInplaceOptimizer(Optimizer):
def __init__(self, pure, inplace):
self.pure = pure
self.inplace = inplace
def apply(self, env):
self.pure(env)
env.extend(dh.DestroyHandler())
self.inplace(env)
......
...@@ -63,6 +63,9 @@ class CLinkerType(object): ...@@ -63,6 +63,9 @@ class CLinkerType(object):
""" """
raise AbstractFunctionError() raise AbstractFunctionError()
def c_init(self, name, sub):
raise AbstractFunctionError()
def c_extract(self, name, sub): def c_extract(self, name, sub):
"""Required: Return c code to extract a PyObject * instance. """Required: Return c code to extract a PyObject * instance.
......
...@@ -86,7 +86,7 @@ class Scalar(Type): ...@@ -86,7 +86,7 @@ class Scalar(Type):
return str(self.dtype) return str(self.dtype)
def __repr__(self): def __repr__(self):
return "Scalar{%s}" % self.dtype return "Scalar(%s)" % self.dtype
def c_literal(self, data): def c_literal(self, data):
if 'complex' in self.dtype: if 'complex' in self.dtype:
...@@ -252,16 +252,17 @@ def upcast_out(*types): ...@@ -252,16 +252,17 @@ def upcast_out(*types):
return Scalar(dtype = Scalar.upcast(*types)), return Scalar(dtype = Scalar.upcast(*types)),
def same_out(type): def same_out(type):
return type, return type,
def transfer_type(i): class transfer_type:
assert type(i) == int def __init__(self, i):
def f(*types): assert type(i) == int
return types[i], self.i = i
f.__name__ = "transfer_type_%i" % i def __call__(self, *types):
return f return types[self.i],
def specific_out(*spec): class specific_out:
def f(*types): def __init__(self, *spec):
return spec self.spec = spec
return f def __call__(self, *types):
return self.spec
def int_out(*types): def int_out(*types):
return int64, return int64,
def float_out(*types): def float_out(*types):
...@@ -283,7 +284,7 @@ class ScalarOp(Op): ...@@ -283,7 +284,7 @@ class ScalarOp(Op):
self.name = name self.name = name
if output_types_preference is not None: if output_types_preference is not None:
if not callable(output_types_preference): if not callable(output_types_preference):
raise TypeError("Expected a callable for the 'output_types_preference' argument to %s." % self.__class__) raise TypeError("Expected a callable for the 'output_types_preference' argument to %s. (got: %s)" % (self.__class__, output_types_preference))
self.output_types_preference = output_types_preference self.output_types_preference = output_types_preference
def make_node(self, *inputs): def make_node(self, *inputs):
......
...@@ -23,7 +23,6 @@ from gof.python25 import partial ...@@ -23,7 +23,6 @@ from gof.python25 import partial
### set up the external interface ### set up the external interface
from elemwise import Elemwise, DimShuffle, CAReduce, Sum from elemwise import Elemwise, DimShuffle, CAReduce, Sum
import tensor_random as random
_constructor_list = [] _constructor_list = []
...@@ -113,7 +112,7 @@ def value(x): ...@@ -113,7 +112,7 @@ def value(x):
class Tensor(Type): class Tensor(Type):
"""Symbolic `Type` representing a numpy.ndarray value.""" """Symbolic `Type` representing a numpy.ndarray value."""
def __init__(self, dtype, broadcastable): def __init__(self, dtype, broadcastable, name = None):
"""Initialize self.dtype and self.broadcastable. """Initialize self.dtype and self.broadcastable.
:Parameters: :Parameters:
...@@ -126,11 +125,13 @@ class Tensor(Type): ...@@ -126,11 +125,13 @@ class Tensor(Type):
must be 1. Secondly, the length of this list is the number of must be 1. Secondly, the length of this list is the number of
dimensions that an associated value must have. See dimensions that an associated value must have. See
:doc:`broadcasting` for an explanation of how this list is used. :doc:`broadcasting` for an explanation of how this list is used.
- `name`: str
Optional name for this type.
""" """
self.dtype = str(dtype) self.dtype = str(dtype)
self.broadcastable = tuple(broadcastable) self.broadcastable = tuple(broadcastable)
self.dtype_specs() # error checking is done there self.dtype_specs() # error checking is done there
self.name = name
def filter(self, data, strict = False): def filter(self, data, strict = False):
"""Convert `data` to something which can be associated to a `TensorResult`. """Convert `data` to something which can be associated to a `TensorResult`.
...@@ -206,10 +207,21 @@ class Tensor(Type): ...@@ -206,10 +207,21 @@ class Tensor(Type):
return TensorResult(self, name = name) return TensorResult(self, name = name)
def __str__(self): def __str__(self):
return "%s(%s)" % (str(self.dtype), str(self.broadcastable)) if self.name:
return self.name
else:
b = self.broadcastable
#bcast = str(self.broadcastable)
bcast = {(): 'scalar',
(False,): 'vector',
(False, True): 'col',
(True, False): 'row',
(False, False): 'matrix'}.get(b, "%iD" % len(b) if not any(b) else str(b))
return "Tensor(%s, %s)" % (str(self.dtype), bcast)
def __repr__(self): def __repr__(self):
return "Tensor{%s, %s}" % (str(self.dtype), str(self.broadcastable)) return str(self)
#"Tensor{%s, %s}" % (str(self.dtype), str(self.broadcastable))
def c_declare(self, name, sub): def c_declare(self, name, sub):
"""Override `CLinkerOp.c_declare` """ """Override `CLinkerOp.c_declare` """
...@@ -1305,11 +1317,12 @@ class MakeVector(Op): ...@@ -1305,11 +1317,12 @@ class MakeVector(Op):
def __init__(self, stype): def __init__(self, stype):
self.stype = stype self.stype = stype
def make_node(self, *inputs): def make_node(self, *inputs):
inputs = map(as_tensor, inputs)
assert all(a.type == self.stype for a in inputs) assert all(a.type == self.stype for a in inputs)
return Apply(self, inputs, [Tensor(broadcastable = (False,), return Apply(self, inputs, [Tensor(broadcastable = (False,),
dtype = self.stype.dtype)()]) dtype = self.stype.dtype)()])
def perform(self, inputs, (out,)): def perform(self, node, inputs, (out,)):
return numpy.asarray([i[0] for i in inputs]) out[0] = numpy.asarray(inputs)
def grad(self, inputs, (gout,)): def grad(self, inputs, (gout,)):
return [None]*len(inputs) return [None]*len(inputs)
...@@ -1374,6 +1387,16 @@ class Concatenate(Op): ...@@ -1374,6 +1387,16 @@ class Concatenate(Op):
[slice(None)] * (n_dims - axis - 1)] \ [slice(None)] * (n_dims - axis - 1)] \
for k in range(len(sizes_along_axis))] for k in range(len(sizes_along_axis))]
def get_vector_length(v):
if isinstance(v, gof.Constant) and v.type.ndim == 1:
return len(v.data)
elif v.owner and isinstance(v.owner.op, MakeVector):
return len(v.owner.inputs)
elif v.owner and v.owner.op == shape:
return v.owner.inputs[0].type.ndim
else:
return None
def concatenate(tensors, axis=0): def concatenate(tensors, axis=0):
""" """
Convenience function to concatenate `Tensor`s along the given axis. Convenience function to concatenate `Tensor`s along the given axis.
...@@ -1395,6 +1418,7 @@ def concatenate(tensors, axis=0): ...@@ -1395,6 +1418,7 @@ def concatenate(tensors, axis=0):
if not hasattr(concatenate, 'obj'): if not hasattr(concatenate, 'obj'):
concatenate.obj = Concatenate() concatenate.obj = Concatenate()
return concatenate.obj(axis, *tensors) return concatenate.obj(axis, *tensors)
>>>>>>> /tmp/tensor.py~other.Lj6QeV
class VerticalStack(Op): class VerticalStack(Op):
""" """
......
...@@ -7,6 +7,7 @@ import tensor as T ...@@ -7,6 +7,7 @@ import tensor as T
import numpy as N import numpy as N
import operator import operator
import itertools import itertools
import sys
# Utilities # Utilities
...@@ -40,8 +41,7 @@ dot_to_gemm = gof.PatternSub((T.dot, 'a', 'b'), ...@@ -40,8 +41,7 @@ dot_to_gemm = gof.PatternSub((T.dot, 'a', 'b'),
allow_multiple_clients = False) allow_multiple_clients = False)
@gof.optimizer def _insert_inplace_optimizer(env):
def insert_inplace_optimizer(self, env):
""" """
Usage: inplace_optimizer.optimize(env) Usage: inplace_optimizer.optimize(env)
...@@ -66,14 +66,16 @@ def insert_inplace_optimizer(self, env): ...@@ -66,14 +66,16 @@ def insert_inplace_optimizer(self, env):
for candidate_input in candidate_inputs: for candidate_input in candidate_inputs:
inplace_pattern = dict(baseline, **{candidate_output: candidate_input}) inplace_pattern = dict(baseline, **{candidate_output: candidate_input})
try: try:
new = Elemwise(op.scalar_op, inplace_pattern).make_node(op.inputs) new = Elemwise(op.scalar_op, inplace_pattern).make_node(*node.inputs)
env.replace_all_validate(dict(zip(node.outputs, new.outputs))) env.replace_all_validate(zip(node.outputs, new.outputs))
except: except Exception, e:
continue continue
candidate_inputs.remove(candidate_input) candidate_inputs.remove(candidate_input)
node = new node = new
baseline = inplace_pattern baseline = inplace_pattern
break break
insert_inplace_optimizer = gof.optimizer(_insert_inplace_optimizer)
inplace_optimizer = gof.SeqOptimizer(out2in(gemm_pattern_1), inplace_optimizer = gof.SeqOptimizer(out2in(gemm_pattern_1),
out2in(dot_to_gemm), out2in(dot_to_gemm),
......
...@@ -4,146 +4,153 @@ import tensor ...@@ -4,146 +4,153 @@ import tensor
import numpy import numpy
import functools import functools
class RandomState(object): from compile import SymbolicInputKit, SymbolicInput
"""The Theano version of numpy.RandomState from copy import copy
This class generates a sequence of L{Op} instances via the gen() and class RandomFunction(gof.Op):
gen_like() methods.
@ivar seed: an integer which determines the initial state of the L{Op} def __init__(self, fn, outtype, *args, **kwargs):
instances returned by gen(), gen_like() """
@type seed: int fn: a random function with the same signature as functions in numpy.random.RandomState
outtype: the type of the output
args: a list of default arguments for the function
kwargs: if the 'inplace' key is there, its value will be used to determine if the op operates inplace or not
"""
self.fn = fn
self.outtype = outtype
self.args = tuple(tensor.as_tensor(arg) for arg in args)
self.inplace = kwargs.pop('inplace', False)
if self.inplace:
self.destroy_map = {0: [0]}
def make_node(self, r, shape, *args):
"""
in: r -> RandomState (gof.generic),
shape -> lvector
args -> the arguments expected by the numpy function
out: r2 -> the new RandomState (gof.generic)
out -> the random numbers we generated
"""
args = map(tensor.as_tensor, args)
shape = tensor.as_tensor(shape)
assert shape.type == tensor.lvector
assert len(args) <= len(self.args)
args += (None,) * (len(self.args) - len(args))
inputs = []
for arg, default in zip(args, self.args):
assert arg is None or default.type.dtype == arg.type.dtype
input = default if arg is None else arg
inputs.append(input)
return gof.Apply(self,
[r, shape] + inputs,
[r.type(), self.outtype()])
def perform(self, node, inputs, (rout, out)):
r, shape, args = inputs[0], inputs[1], inputs[2:]
assert self.outtype.ndim == len(shape)
if not self.inplace:
r = copy(r)
rout[0] = r
out[0] = self.fn(r, *(args + [shape]))
def __eq__(self, other):
return type(self) == type(other) \
and self.fn == other.fn\
and self.outtype == other.outtype\
and self.args == other.args\
and self.inplace == other.inplace
def __hash__(self):
return hash(self.fn) ^ hash(self.outtype) ^ hash(self.args) ^ hash(self.inplace)
def random_function(fn, dtype, *rfargs, **rfkwargs):
""" """
Returns a wrapper around RandomFunction which automatically infers the number
of dimensions of the output from the given shape. If the shape cannot be inferred,
the user can give an integer as first argument, which will be interpreted as the
number of dimensions.
The number of dimensions for the following shape arguments can be inferred:
- shape(x)
- make_lvector(x, y, z, ...)
- constants
"""
def f(ndim, *args, **kwargs):
if isinstance(ndim, int):
r, shape, args = args[0], args[1], args[2:]
else:
r, shape, args = ndim, args[0], args[1:]
shape = tensor.as_tensor(shape)
ndim = tensor.get_vector_length(shape)
if ndim is None:
raise ValueError('Cannot infer the number of dimensions from the shape argument.')
# note: rf should probably be cached for future use
rf = RandomFunction(fn, tensor.Tensor(dtype = dtype, broadcastable = (False,)*ndim), *rfargs, **rfkwargs)
return rf(r, shape, *args, **kwargs)
return f
def __init__(self, seed):
self.seed = seed
def gen(self, dist, shape=(), ndim=None): RS = numpy.random.RandomState
"""
@param dist: identifier of a sampling distribution. See L{_fn_from_dist}.
@param shape: tuple
@return: A tensor of random numbers, with given shape. # we need to provide defaults for all the functions in order to infer the argument types...
@rtype: L{Result} (output of L{Apply} of L{NumpyGenerator} instance) uniform = random_function(RS.uniform, 'float64', 0.0, 1.0)
""" binomial = random_function(RS.binomial, 'int64', 1, 0.5)
self.seed += 1 normal = random_function(RS.normal, 'float64', 0.0, 1.0)
fn = RandomState._fn_from_dist(dist) random_integers = random_function(RS.random_integers, 'int64', 0, 1)
if isinstance(shape, tuple):
return NumpyGenerator(self.seed-1, len(shape),fn) (shape)
return NumpyGenerator(self.seed - 1, ndim, fn)(shape)
def gen_like(self, dist, x):
"""
@param dist: identifier of a sampling distribution. See L{_fn_from_dist}.
@param x: L{Result} of type L{Tensor}
@return: A tensor of random numbers, with the same shape as x. @gof.local_optimizer
@rtype: L{Result} (output of L{Apply} of L{NumpyGenerator} instance) def random_make_inplace(node):
""" op = node.op
self.seed += 1 if isinstance(op, RandomFunction) and not op.inplace:
fn = RandomState._fn_from_dist(dist) return RandomFunction(op.fn, op.outtype, *op.args, **dict(inplace=True)).make_node(*node.inputs).outputs
return NumpyGenerator(self.seed-1, x.type.ndim, fn)(tensor.shape(x))
def uniform_like(self, template, low=0.,high=1.):
"""
Return a multivariate uniform(low,high)
random variable in a tensor of the same shape as template
(template can either be a tensor or a shape tuple). Each element of the
resulting tensor is sampled independently. low and high can
be scalars or have the same shape as the template (or broadcastable
to it).
"""
return self.gen_like(('uniform',{'low':low,'high':high}),template)
def binomial_like(self, template, n=1, p=0.5): import sys
""" from functools import partial
Return a multivariate binomial(n,p) random variable in a tensor of the same shape as template from collections import deque
(template can either be a tensor or a shape tuple). Each element of the
resulting tensor is sampled independently. low and high can
be scalars or have the same shape as the template (or broadcastable
to it).
"""
return self.gen_like(('binomial',{'n':n,'p':p}),template)
@staticmethod class RandomKit(SymbolicInputKit):
def _fn_from_dist(dist, cache={}):
"""Return a function from a distribution description
@param dist: identifier of a sampling distribution. def __init__(self, name, value = None):
@type dist: callable or str or tuple(str, dict) super(RandomKit, self).__init__(name)
self.value = value
@param cache: The optional cache argument implements a closure, which ensures that def gen(self, op, *args, **kwargs):
multiple requests for the same sampling function will get the same r = gof.generic()
sampling function. L{NumpyGenerator}.__hash__ depends on this. new_r, out = op(r, *args, **kwargs)
self.add_input(SymbolicInput(r, update = new_r))
out.rng = r
out.auto = self
return out
@type cache: dict def distribute(self, value, indices, containers):
""" rg = partial(numpy.random.RandomState(value).randint, sys.maxint)
if callable(dist): elems = deque(zip(indices, containers))
return dist i = 0
if isinstance(dist, str): while elems:
return getattr(numpy.random.RandomState, dist) index, container = elems.popleft()
while i <= index:
name, kwargs = dist curr = rg()
key = (name, tuple(kwargs.items())) i += 1
if key not in cache: rs = numpy.random.RandomState(int(curr))
fn = getattr(numpy.random.RandomState, name) container.data = rs
fn = functools.partial(fn, **kwargs)
cache[key] = fn
return cache[key]
class NumpyGenerator(gof.op.Op):
"""Supply a sequence of random tensors of a given shape, from a given
distribution.
@param seed: initial state for instances of this L{Op}.
@type seed: anything that numpy.random.RandomState accepts.
@param ndim: the rank of random tensors produced by this op.
@type ndim: non-negative integer
@param fn: a sampling function
@type fn: a callable that can reply to fn(numpy.RandomState(), size=<tuple>)
"""
destroy_map = {0: [0]}
def __init__(self, seed, ndim, fn, **kwargs): def binomial(self, *args, **kwargs):
gof.op.Op.__init__(self, **kwargs) return self.gen(binomial, *args, **kwargs)
self.seed = seed
self.ndim = ndim def uniform(self, *args, **kwargs):
self.fn = fn return self.gen(uniform, *args, **kwargs)
assert numpy.random.RandomState(seed) #test the seed
assert 'int' in str(type(ndim)) def normal(self, *args, **kwargs):
assert callable(self.fn) return self.gen(normal, *args, **kwargs)
def random_integers(self, *args, **kwargs):
return self.gen(random_integers, *args, **kwargs)
rk = RandomKit('rk', 0xBAD5EED)
def __eq__(self, other):
return (type(self) is type(other))\
and self.__class__ is NumpyGenerator \
and self.seed == other.seed \
and self.ndim == other.ndim \
and self.fn == other.fn
def __hash__(self):
return self.seed ^ self.ndim ^ hash(self.fn)
def make_node(self, _shape):
#TODO: check for constant shape, and guess the broadcastable bits
shape = tensor.convert_to_int64(_shape)
if shape.type.ndim != 1:
raise TypeError('shape argument was not converted to 1-d tensor', _shape)
# we generate one random number with the distribution to determine what dtype to expect
output_dtype = str(self.fn(numpy.random.RandomState(18), size=(1,)).dtype)
inputs = [gof.Value(gof.type.generic, numpy.random.RandomState(self.seed)), shape]
outputs = [tensor.Tensor(dtype=output_dtype, broadcastable = [False]*self.ndim).make_result()]
return gof.Apply(op = self, inputs = inputs, outputs = outputs)
def grad(self, inputs, grad_outputs):
return [None, None]
def perform(self, node, input_storage, output_storage):
rng = input_storage[0]
shape = input_storage[1]
if self.ndim != len(shape):
raise ValueError('shape argument %s had the wrong length (!=%i)' %
(shape, self.ndim) )
output_storage[0][0] = self.fn(rng, size=shape)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论