提交 e3079e41 authored 作者: Hengjean's avatar Hengjean 提交者: Frederic

Began refactoring. Fixed bug.

上级 da877d34
......@@ -1067,24 +1067,12 @@ class FunctionMaker(object):
theano.config.compute_test_value = theano.config.compute_test_value_opt
gof.Op.add_stack_trace_on_call = False
def optimize_graph(fgraph):
'''
params
------
fgraph: the new graph to be optimized, optimized in-place.
{before_opt: after_opt, ....}
return
------
opt_time: timing
'''
from theano.gof.compilelock import get_lock, release_lock
import cPickle
import os.path
graph_db_file = os.path.join(theano.config.compiledir, 'optimized_graphs.pkl')
# the inputs, outputs, and size of the graph to be optimized
inputs_new = fgraph.inputs
outputs_new = fgraph.outputs
inputs_new = [inp.variable for inp in inputs]
outputs_new = [out.variable for out in outputs]
size_new = len(fgraph.apply_nodes)
need_optimize = False
get_lock()
......@@ -1112,14 +1100,14 @@ class FunctionMaker(object):
print 'graph_db is empty'
graph_db = {}
print 'loaded graph_db from %s, size=%d'%(graph_db_file,len(graph_db))
print 'loaded graph_db from %s, size=%d' % (graph_db_file, len(graph_db))
need_optimize = True
# the sole purpose of this loop is to set 'need_optimize'
for i, graph_old in enumerate(graph_db.keys()):
inputs_old = graph_old.inputs
outputs_old = graph_old.outputs
size_old = len(graph_old.apply_nodes)
print 'looping through graph_db %d/%d'%(i+1,len(graph_db))
print 'looping through graph_db %d/%d' % (i + 1, len(graph_db))
# Some heuristics to check is the same graphs have
# already been optimized before.
if len(inputs_new) != len(inputs_old):
......@@ -1140,32 +1128,16 @@ class FunctionMaker(object):
output_new, output_old in zip(outputs_new, outputs_old)):
print 'need to optimize, because outputs are of different types'
continue
elif not len(fgraph.apply_nodes) == len(graph_old.apply_nodes):
elif not size_old == size_new:
print 'need to optimize, because numbers of nodes in graph are different'
continue
else:
# when the both inputs are of the same size
givens = dict(zip(inputs_new, inputs_old))
'''
# strip .fgraph off the givens
i_new = [copy.deepcopy(input_new) for input_new in inputs_new]
i_old = [copy.deepcopy(input_old) for input_old in inputs_old]
for node in i_new:
node.fgraph = None
for node in i_old:
node.fgraph = None
givens = dict(zip(i_new, i_old))
'''
# each element indicates if one of the outputs has the same graph
flags = []
for output_new, output_old, i in zip(outputs_new, outputs_old, range(len(outputs_new))):
print 'loop through outputs node for both graphs'
f1 = output_new.owner.fgraph.clone()
f2 = output_old.owner.fgraph.clone()
# is_same_graph complains if fgraph is not None
t1 = f1.outputs[i]
t1 = output_new
t2 = f2.outputs[i]
def removeAllFgraph(remove):
......@@ -1185,10 +1157,14 @@ class FunctionMaker(object):
del o.fgraph
return remove
t1 = removeAllFgraph(t1)
t2 = removeAllFgraph(t2)
givens = dict(zip(gof.graph.ancestors([t1]),
gof.graph.ancestors([t2])))
givens = dict(zip(gof.graph.inputs([t1]),
gof.graph.inputs([t2])))
temp = dict(zip(gof.graph.inputs([t1]),
gof.graph.inputs([t2])))
for key, value in temp.iteritems():
if key.type != value.type:
del givens[key]
flag = is_same_graph(t1, t2, givens=givens)
flags.append(flag)
......@@ -1221,9 +1197,6 @@ class FunctionMaker(object):
fgraph = graph_db[key]
# release stuff
release_lock()
return opt_time
opt_time = optimize_graph(fgraph)
print 'opt took %s'%opt_time
if profile:
......
......@@ -694,7 +694,7 @@ class VM_Linker(link.LocalLinker):
if k.owner and k.clients:
ls = []
for cl in k.clients:
if cl[0] is not 'output':
if cl[0] != 'output':
ls += cl[0].outputs
dependencies[k] += ls
return dependencies
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论