提交 0d299695 authored 作者: sentient07's avatar sentient07

Cleanups

上级 d73bfb97
......@@ -200,7 +200,7 @@ class SeqOptimizer(Optimizer, list):
opts : List
The List of optimizers to be applied to a node
kw : Dict
Dictonary containing failure call back message
Dictonary containing failure callback. The only supported keyword is `failure_callback`.
"""
if len(opts) == 1 and isinstance(opts[0], (list, tuple)):
......@@ -1238,8 +1238,8 @@ def local_optimizer(tracks, inplace=False, requirements=()):
class LocalOptGroup(LocalOptimizer):
"""
Takes a list of LocalOptimizer and applies them to the nodes.
When apply_all_opts is set to True, it tries multiple optimization to a node.
A node is first optimized and the list of optimizers for the optimized node is applied
If apply_all_opts is False, it will return after the first optimizer applied.
Otherwise, it will start again with the node returned by the previous optimizer.
Parameters
----------
......@@ -1291,35 +1291,30 @@ class LocalOptGroup(LocalOptimizer):
if len(self.opts) == 0:
return
def compute_opts(node):
opts = self.track_map.get(type(node.op), [])
opts += self.track_map.get(node.op, [])
opts += self.track_map.get(None, [])
return opts
def apply_mult_opts(opt_list, node, multiple_opts=False):
def apply_mult_opts(node, multiple_opts=False):
repl = False
opts = []
opts.append(self.track_map.get(type(node.op), []) + self.track_map.get(node.op, []) + self.track_map.get(None, []))
for opt in opt_list:
for opt in opts:
opt_start = time.time()
repl = opt.transform(node)
opt_finish = time.time()
self.time_opts[opt] = opt_start - opt_finish
self.process_count[opt] += 1
if not repl:
continue
else:
self.time_opts[opt] = opt_start - opt_finish
self.process_count[opt] += 1
if not multiple_opts or not repl[0].owner:
return repl
assert len(repl) == 1
# Ensuring not the input of graph
assert repl[0].owner
new_node = repl[0].owner
new_opts = compute_opts(new_node)
apply_mult_opts(new_opts, new_node, True)
apply_mult_opts(new_node, True)
return repl
node_start = time.time()
new_var = apply_mult_opts(compute_opts(node), node, self.apply_all_opts)
new_var = apply_mult_opts(node, self.apply_all_opts)
node_finish = time.time()
self.time_nodes[node] = node_finish - node_start
return new_var
......@@ -1344,7 +1339,7 @@ class LocalOptGroup(LocalOptimizer):
file=stream)
count_opt.sort()
for (t, count, o) in count_opt[::-1]:
print(blanc, ' %.3fs - %d - %d - %s' % (
print(blanc, ' %.3fs - %d - %s' % (
t, count, o), file=stream)
print(blanc, ' %.3fs - in %d optimization that were not used (display those with runtime greater than 0)' % (
not_used_time, len(not_used)), file=stream)
......@@ -1356,7 +1351,6 @@ class LocalOptGroup(LocalOptimizer):
print(file=stream)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
blanc = (' ' * level)
print("%s%s id=%i" % (
(' ' * level), self.__class__.__name__, id(self)), file=stream)
if depth != 0:
......@@ -1874,12 +1868,6 @@ class NavigatorOptimizer(Optimizer):
if u is not None:
fgraph.remove_feature(u)
@staticmethod
def print_profile(stream, prof, level=0):
import pdb
pdb.set_trace()
def process_node(self, fgraph, node, lopt=None):
"""
This function will use `lopt` to `transform` the `node`. The
......@@ -2059,7 +2047,7 @@ class TopoOptimizer(NavigatorOptimizer):
def out2in(*local_opts, **kwargs):
"""
"""
Uses the TopoOptimizer from the output nodes to input nodes of the graph.
"""
name = (kwargs and kwargs.pop('name', None))
......
......@@ -322,7 +322,7 @@ class SequenceDB(DB):
def register(self, name, obj, position, *tags):
super(SequenceDB, self).register(name, obj, *tags)
if position == 'last':
self.__position__[name] = len(self.__position__)
self.position[name] = max(self.position.values())
else:
assert isinstance(position, (integer_types, float))
self.__position__[name] = position
......@@ -417,7 +417,7 @@ class LocalGroupDB(DB):
class TopoDB(DB):
"""
Generate a local optimizer of type TopoOptimizer.
Generate a Global Optimizer of type TopoOptimizer.
"""
......
......@@ -1730,7 +1730,6 @@ compile.optdb.register('local_elemwise_alloc',
1.52, 'fast_run')
@register_canonicalize("fast_compile")
@register_useless
@gof.local_optimizer([T.fill])
def local_useless_fill(node):
......@@ -2558,7 +2557,6 @@ def local_useless_slice(node):
return [out]
@register_useless
@register_canonicalize
@register_specialize
@gof.local_optimizer([Subtensor, AdvancedSubtensor1])
......@@ -3010,7 +3008,13 @@ def local_subtensor_merge(node):
@register_specialize
@gof.local_optimizer([Subtensor])
def local_subtensor_of_alloc(node):
"""alloc[x:y] -> alloc"""
"""
alloc(val)[x:y] -> alloc(val[...])
alloc(val)[x:y] -> alloc(val)
This can be seen as a lift, but it also reduce the number of computation/memory.
"""
if not isinstance(node.op, Subtensor):
return False
u = node.inputs[0]
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论