提交 d68c9e07 authored 作者: Olivier Breuleux's avatar Olivier Breuleux

clever commit message

上级 928ad5c0
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
import gof import gof
import opt import opt
from copy import copy
#prog(inputs, outputs) #prog(inputs, outputs)
#single(*outputs) #single(*outputs)
...@@ -52,8 +53,8 @@ class prog(gof.Prog): ...@@ -52,8 +53,8 @@ class prog(gof.Prog):
def to_func(inputs, outputs): def to_func(inputs, outputs):
# print gof.Env(inputs, outputs).io_toposort() # print gof.Env(inputs, outputs).io_toposort()
## p = prog([copy(input) for input in inputs], gof.graph.clone(inputs, outputs))
p = prog(inputs, outputs) p = prog(inputs, outputs)
print p.env
def f(*args): def f(*args):
for input, value in zip(inputs, args): for input, value in zip(inputs, args):
p[input] = value p[input] = value
......
...@@ -3,6 +3,7 @@ import gof ...@@ -3,6 +3,7 @@ import gof
from gof import current_mode, set_mode, build_mode, eval_mode, build_eval_mode, pop_mode, UNCOMPUTED, UNDEFINED, PythonR from gof import current_mode, set_mode, build_mode, eval_mode, build_eval_mode, pop_mode, UNCOMPUTED, UNDEFINED, PythonR
import numpy import numpy
import weakref
from copy import copy as pycopy from copy import copy as pycopy
...@@ -51,6 +52,9 @@ def print_graph(*rs): ...@@ -51,6 +52,9 @@ def print_graph(*rs):
print as_string(*rs) print as_string(*rs)
literals_db = {}
literals_id_db = weakref.WeakValueDictionary()
def input(x): def input(x):
if isinstance(x, numpy.ndarray): if isinstance(x, numpy.ndarray):
return NumpyR(x) return NumpyR(x)
...@@ -72,37 +76,60 @@ def wrap(x): ...@@ -72,37 +76,60 @@ def wrap(x):
return wrap(x._obj) return wrap(x._obj)
else: else:
return literal(x) return literal(x)
def _hashable(x):
try:
x in {}
return True
except TypeError: # x is unhashable
return False
def _literal_hashable(x):
# try:
# present = x in literals_db
# hashable = True
# except TypeError: # x is unhashable
# present = False
# hashable = False
if x in literals_db:
return literals_db[x]
else:
r = input(x)
r.constant = True
literals_db[x] = r
return r
# elif isinstance(x, numpy.ndarray): # elif isinstance(x, numpy.ndarray):
# return NumpyR(x) # ret = NumpyR(x, constant = True)
# elif isinstance(x, (int, float)): # elif isinstance(x, (int, float)):
# return NumpyR(numpy.array(x)) # ret = NumpyR(numpy.array(x), constant = True)
# elif isinstance(x, gof.Result):
# raise TypeError("%s is already a result." % x)
# else: # else:
# return PythonR(x) # return PythonR(x, constant = True)
def literal(x): # if hashable:
try: # literals_db[x] = ret
present = x in gof.literals_db
hashable = True
except TypeError: # x is unhashable
present = False
hashable = False
if present: # return ret
return gof.literals_db.get(x)
elif isinstance(x, numpy.ndarray):
ret = NumpyR(x, constant = True)
elif isinstance(x, (int, float)):
ret = NumpyR(numpy.array(x), constant = True)
elif isinstance(x, gof.Result):
raise TypeError("%s is already a result." % x)
else:
return PythonR(x, constant = True)
if hashable: def _literal_unhashable(x):
gof.literals_db[x] = ret idx = id(x)
if idx in literals_id_db:
return literals_id_db[idx]
else:
r = input(x)
r.constant = True
literals_id_db[idx] = r
return r
return ret
def literal(x):
if _hashable(x):
return _literal_hashable(x)
else:
return _literal_unhashable(x)
inplace = gof.Destroyer inplace = gof.Destroyer
...@@ -250,6 +277,22 @@ class proto_add_scalar(omega_op): ...@@ -250,6 +277,22 @@ class proto_add_scalar(omega_op):
class add_scalar(proto_add_scalar): class add_scalar(proto_add_scalar):
impl = tensor_scalar_op(numpy.ndarray.__add__) impl = tensor_scalar_op(numpy.ndarray.__add__)
# def c_impl(x, s, z):
# """
# if (*__z == NULL) {
# *__z = new ndarray
# }
# ndarray& z = **__z
# """
# return """
# z.resize_like(x);
# for (int i = 0; i < z.size(); i++) {
# z[i] = x[i] * s;
# }
# return z;
# """
class iadd_scalar(proto_add_scalar, inplace): class iadd_scalar(proto_add_scalar, inplace):
impl = tensor_scalar_op(numpy.ndarray.__iadd__) impl = tensor_scalar_op(numpy.ndarray.__iadd__)
......
...@@ -23,15 +23,15 @@ class Grad(object): ...@@ -23,15 +23,15 @@ class Grad(object):
self.add_output(key,val) self.add_output(key,val)
def __contains__(self, item): def __contains__(self, item):
return id(item) in self.map return item in self.map
def __getitem__(self, item): def __getitem__(self, item):
"""Map item to its id and retrieve it.""" """Map item to its id and retrieve it."""
return self.map[id(item)] return self.map[core.wrap(item)]
def __setitem__(self, item, val): def __setitem__(self, item, val):
"""Map item to its id and store internally.""" """Map item to its id and store internally."""
self.map[id(item)] = val self.map[item] = val
def add_output(self, r, dr): def add_output(self, r, dr):
self.add(r, dr) self.add(r, dr)
...@@ -134,6 +134,7 @@ def grad(cost, param=None, cost_grad = 1.0): ...@@ -134,6 +134,7 @@ def grad(cost, param=None, cost_grad = 1.0):
else: else:
return rval(param) return rval(param)
# #
# UNIT TEST # UNIT TEST
# #
...@@ -141,6 +142,7 @@ import unittest ...@@ -141,6 +142,7 @@ import unittest
import numpy import numpy
import compile import compile
class _testCase (unittest.TestCase): class _testCase (unittest.TestCase):
def setUp(self): def setUp(self):
numpy.random.seed(1) numpy.random.seed(1)
...@@ -194,6 +196,18 @@ class _testCase (unittest.TestCase): ...@@ -194,6 +196,18 @@ class _testCase (unittest.TestCase):
self.assertEqual(('2.67327580893', '0.000438649434819'), self.assertEqual(('2.67327580893', '0.000438649434819'),
self.matinv_compiled(3)) self.matinv_compiled(3))
def test_grad_wrt_ndarray_pointer(self):
"""
Tests if it is possible to index the gradient by a pointer to a ndarray
that is used as a node of the computation graph.
"""
a = numpy.ones((4, 4))
b = numpy.ones((4, 4))
c = numpy.ones((4, 4))
expr = core.sum(core.dot(core.add(a, b), c))
g = grad(expr)
g[a]
def tearDown(self): def tearDown(self):
core.pop_mode() core.pop_mode()
......
# import gof
# gof.stealth.method_wrap(int, '__add__', [2, 1], )
# x = gof.stealth.wrap(3)
# y = gof.stealth.wrap(4)
# print x + y
import gof
import core
import numpy
import compile
import grad
# a = core.NumpyR(numpy.ones((3, 3)))
# b = core.NumpyR(numpy.ones((3, 3)))
# w = core.dot #core.wrapper(numpy.dot)
# core.start_build()
# r = a * (b * b)
# core.end_build()
# #r = w(a, w(b, b))
# print r
# print r.owner
# env = gof.Env([a, b], [r._obj])
# print env
# print r
# gof.ThunkLinker()(env)()
# print r
# core.start_build()
# a += b + c
# a = a + b
# a += a + core.transpose(b)
# core.end_build()
# # env = gof.Env(gof.graph.inputs([a]), [a])
# # print env
# # gof.ThunkLinker()(env)()
# # print a
# print gof.Env(gof.graph.inputs([a]), [a])
# prog = compile.single(a)
# print prog.env
# prog()
# print a
############################
# #core.build_mode()
# dim = core.wrap(())
# dim2 = core.wrap((2, 2))
# a = core.zeros(dim, dtype='int32') #(core.NumpyR(numpy.ones((3, 3))))
# b = core.ones(dim2, 'int32') #(core.NumpyR(numpy.ones((3, 3))))
# c = core.zeros(dim, dtype='int32')
# d = a + (b + b) + c + numpy.ones(())
# e = d + (b * c)
# #core.pop_mode()
# print e
# #print e
# #print gof.graph.ops([dim], [e])
# #1/0
# #print gof.Env([dim], [e])
# #f = compile.to_func([dim], [e])
# # f = compile.to_func([a, b, c], [e])
# # print f(1, 2, 3)
# # #print f((2,2))
############################
# a = core.ones((2, 2))
# b = core.ones((2, 2))
# def f():
# return (a + b) + (a + b)
# r = core.build(f)
# env = gof.Env([a, b], [r])
# print env
# gof.opt.MergeOptimizer().optimize(env)
# print env
# print compile.to_func([a, b], [r])(1, 2)
############################
# a = core.ones((2, 2))
# b = core.ones((2, 2))
# def f():
# return (a + b) + (a + b)
# r = core.build(f)
# g = grad.grad(r, a)
# core.print_graph(g)
# print [id(input) for input in g.owner.inputs]
# print gof.literals_db
# core.print_graph(r)
############################
def dataset_1hot(x, targ, n):
"""Return an looping iterator over 1-hot vectors
This function is a generator for the integers range(n) that works by
side-effect on the numpy ndarray mat.
On each iteration, mat is set (in-place) to the next element of an infinite
sequence of 1-hot vectors.
"""
assert targ.size == 1
for i in xrange(n):
idx = i % x.shape[1]
x[:] = 0
x[0,idx] = 1
targ[0] = idx
yield i
class sigmoid(core.omega_op):
def impl(x):
return 1.0 / (1.0 + numpy.exp(-x))
def grad(x, gz):
return gz * sigmoid(x) * (1 - sigmoid(x))
numpy.random.seed(1)
core.build_eval_mode()
x = core.zeros((1, 10))
w = core.input(numpy.random.rand(10, 15))
core.pop_mode()
# x = numpy.zeros((1, 10))
# w = numpy.random.rand(10, 15)
#print x.data, w.data
# import inspect
# def omega_compile(f):
# args, varargs, kwargs, defaults = inspect.getargspec(f)
# assert not varargs
# assert not kwargs
# def ret(*args):
# outputs = core.build(f, *args)
# return compile.prog(args, outputs)
# return ret
# @omega_compile
def autoassociator(w, x):
forward = sigmoid(core.dot(sigmoid(core.dot(x, w)), w.T))
rec_error = core.sum(core.sqr(x - forward))
w -= 0.1 * grad.grad(rec_error, w)
return w, rec_error
w2, rec_error = core.build(autoassociator, w, x)
f = compile.to_func([w, x], [w2, rec_error])
#f = compile.single(w2, rec_error)
for i in dataset_1hot(x.data, numpy.ndarray((1, )), 10000):
# w.up_to_date = True
# x.up_to_date = True
w2, rec_error = f(w.data, x.data)
if not(i % 1000):
print rec_error
print "done!"
print w.data
############################
# def fun():
# a = core.NumpyR(numpy.zeros(()) + 200)
# # b = numpy.ones(())
# # a = a * core.sqrt(core.isqr(a))
# a = a * core.isqr(a)
# return a
# f = core.build(fun)
# g = compile.to_func(gof.graph.inputs([f]), [f])
############################
# print core.ones((2, 2)) + 1
# print numpy.ones((2, 2)) ** numpy.ones((2, 2))
############################
# x = core.ones((2, 2))
# y = core.zeros((1, 1))
# #print "?", gof.graph.ops([], [x + y])
# # x + x
# # print "1", gof.eval_env#.ops()
# # y + y
# # print "2", gof.eval_env#.ops()
# # x + x
# # print "3", gof.eval_env#.ops()
# core.build_eval_mode()
# x = core.ones((2, 2))
# y = core.ones((2, 2)) * 2
# x += y.T
# # z = core.iadd(x, y)
# # core.iadd(x, y)
# print x
# core.pop_mode()
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论