提交 f13ddff7 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #4614 from nouiz/test_timeout

Split test to help work around travis timeout. They are super fast here.
...@@ -88,6 +88,7 @@ def _atexit_print_fn(): ...@@ -88,6 +88,7 @@ def _atexit_print_fn():
merge = cum.optimizer_profile[0].merge_profile( merge = cum.optimizer_profile[0].merge_profile(
cum.optimizer_profile[1], cum.optimizer_profile[1],
ps.optimizer_profile[1]) ps.optimizer_profile[1])
assert len(merge) == len(cum.optimizer_profile[1])
cum.optimizer_profile = (cum.optimizer_profile[0], merge) cum.optimizer_profile = (cum.optimizer_profile[0], merge)
except Exception as e: except Exception as e:
print("Got an exception while merging profile") print("Got an exception while merging profile")
......
...@@ -315,17 +315,17 @@ class SeqOptimizer(Optimizer, list): ...@@ -315,17 +315,17 @@ class SeqOptimizer(Optimizer, list):
" time - (name, class, index, nodes before, nodes after) - validate time", " time - (name, class, index, nodes before, nodes after) - validate time",
file=stream) file=stream)
ll = [] ll = []
for opt in opts: for (opt, nb_n) in zip(opts, nb_nodes):
if hasattr(opt, "__name__"): if hasattr(opt, "__name__"):
name = opt.__name__ name = opt.__name__
else: else:
name = opt.name name = opt.name
idx = opts.index(opt) idx = opts.index(opt)
ll.append((name, opt.__class__.__name__, ll.append((name, opt.__class__.__name__,
idx)) idx) + nb_n)
lll = sorted(zip(prof, ll, nb_nodes), key=lambda a: a[0]) lll = sorted(zip(prof, ll), key=lambda a: a[0])
for (t, opt, nb_n) in lll[::-1]: for (t, opt) in lll[::-1]:
i = opt[2] i = opt[2]
if sub_validate_time: if sub_validate_time:
val_time = sub_validate_time[i + 1] - sub_validate_time[i] val_time = sub_validate_time[i + 1] - sub_validate_time[i]
...@@ -345,8 +345,8 @@ class SeqOptimizer(Optimizer, list): ...@@ -345,8 +345,8 @@ class SeqOptimizer(Optimizer, list):
Merge 2 profiles returned by this cass apply() fct. Merge 2 profiles returned by this cass apply() fct.
""" """
new_t = [] new_t = [] # the time for the optimization
new_l = [] new_l = [] # the optimization
new_sub_profile = [] new_sub_profile = []
# merge common(same object) opt # merge common(same object) opt
for l in set(prof1[0]).intersection(set(prof2[0])): for l in set(prof1[0]).intersection(set(prof2[0])):
...@@ -399,6 +399,12 @@ class SeqOptimizer(Optimizer, list): ...@@ -399,6 +399,12 @@ class SeqOptimizer(Optimizer, list):
new_sub_profile.append(p[6][idx]) new_sub_profile.append(p[6][idx])
new_opt = SeqOptimizer(*new_l) new_opt = SeqOptimizer(*new_l)
new_nb_nodes = []
for p1, p2 in zip(prof1[8], prof2[8]):
new_nb_nodes.append((p1[0] + p2[0], p1[1] + p2[1]))
new_nb_nodes.extend(prof1[8][len(new_nb_nodes):])
new_nb_nodes.extend(prof2[8][len(new_nb_nodes):])
new_callbacks_times = merge_dict(prof1[9], prof2[9]) new_callbacks_times = merge_dict(prof1[9], prof2[9])
# We need to assert based on the name as we merge also based on # We need to assert based on the name as we merge also based on
# the name. # the name.
...@@ -410,6 +416,7 @@ class SeqOptimizer(Optimizer, list): ...@@ -410,6 +416,7 @@ class SeqOptimizer(Optimizer, list):
return (new_opt, new_t, prof1[2] + prof2[2], return (new_opt, new_t, prof1[2] + prof2[2],
prof1[3] + prof2[3], prof1[3] + prof2[3],
-1, -1, new_sub_profile, [], -1, -1, new_sub_profile, [],
new_nb_nodes,
new_callbacks_times) new_callbacks_times)
...@@ -2313,10 +2320,18 @@ class EquilibriumOptimizer(NavigatorOptimizer): ...@@ -2313,10 +2320,18 @@ class EquilibriumOptimizer(NavigatorOptimizer):
"%f with the theano flag 'optdb.max_use_ratio'." % "%f with the theano flag 'optdb.max_use_ratio'." %
config.optdb.max_use_ratio) config.optdb.max_use_ratio)
fgraph.remove_feature(change_tracker) fgraph.remove_feature(change_tracker)
assert len(loop_process_count) == len(loop_timing)
assert len(loop_process_count) == len(global_opt_timing)
assert len(loop_process_count) == len(nb_nodes)
assert len(loop_process_count) == len(io_toposort_timing)
assert len(loop_process_count) == len(global_sub_profs)
assert len(loop_process_count) == len(final_sub_profs)
assert len(loop_process_count) == len(cleanup_sub_profs)
return (self, loop_timing, loop_process_count, return (self, loop_timing, loop_process_count,
(start_nb_nodes, end_nb_nodes, max_nb_nodes), (start_nb_nodes, end_nb_nodes, max_nb_nodes),
global_opt_timing, nb_nodes, time_opts, io_toposort_timing, global_opt_timing, nb_nodes, time_opts, io_toposort_timing,
node_created, global_sub_profs, final_sub_profs, cleanup_sub_profs) node_created, global_sub_profs, final_sub_profs,
cleanup_sub_profs)
def print_summary(self, stream=sys.stdout, level=0, depth=-1): def print_summary(self, stream=sys.stdout, level=0, depth=-1):
name = getattr(self, 'name', None) name = getattr(self, 'name', None)
......
...@@ -84,14 +84,13 @@ class test_sort(unittest.TestCase): ...@@ -84,14 +84,13 @@ class test_sort(unittest.TestCase):
data = np.random.rand(2, 3, 4).astype(theano.config.floatX) data = np.random.rand(2, 3, 4).astype(theano.config.floatX)
utt.verify_grad(lambda x: sort(x, None), [data]) utt.verify_grad(lambda x: sort(x, None), [data])
def test_grad_negative_axis(self): def test_grad_negative_axis_2d(self):
# test 2D
data = np.random.rand(2, 3).astype(theano.config.floatX) data = np.random.rand(2, 3).astype(theano.config.floatX)
utt.verify_grad(lambda x: sort(x, -1), [data]) utt.verify_grad(lambda x: sort(x, -1), [data])
data = np.random.rand(2, 3).astype(theano.config.floatX) data = np.random.rand(2, 3).astype(theano.config.floatX)
utt.verify_grad(lambda x: sort(x, -2), [data]) utt.verify_grad(lambda x: sort(x, -2), [data])
# test 3D def test_grad_negative_axis_3d(self):
data = np.random.rand(2, 3, 4).astype(theano.config.floatX) data = np.random.rand(2, 3, 4).astype(theano.config.floatX)
utt.verify_grad(lambda x: sort(x, -1), [data]) utt.verify_grad(lambda x: sort(x, -1), [data])
data = np.random.rand(2, 3, 4).astype(theano.config.floatX) data = np.random.rand(2, 3, 4).astype(theano.config.floatX)
...@@ -99,7 +98,7 @@ class test_sort(unittest.TestCase): ...@@ -99,7 +98,7 @@ class test_sort(unittest.TestCase):
data = np.random.rand(2, 3, 4).astype(theano.config.floatX) data = np.random.rand(2, 3, 4).astype(theano.config.floatX)
utt.verify_grad(lambda x: sort(x, -3), [data]) utt.verify_grad(lambda x: sort(x, -3), [data])
# test 4D def test_grad_negative_axis_4d(self):
data = np.random.rand(2, 3, 4, 2).astype(theano.config.floatX) data = np.random.rand(2, 3, 4, 2).astype(theano.config.floatX)
utt.verify_grad(lambda x: sort(x, -1), [data]) utt.verify_grad(lambda x: sort(x, -1), [data])
data = np.random.rand(2, 3, 4, 2).astype(theano.config.floatX) data = np.random.rand(2, 3, 4, 2).astype(theano.config.floatX)
...@@ -109,14 +108,13 @@ class test_sort(unittest.TestCase): ...@@ -109,14 +108,13 @@ class test_sort(unittest.TestCase):
data = np.random.rand(2, 3, 4, 2).astype(theano.config.floatX) data = np.random.rand(2, 3, 4, 2).astype(theano.config.floatX)
utt.verify_grad(lambda x: sort(x, -4), [data]) utt.verify_grad(lambda x: sort(x, -4), [data])
def test_grad_nonnegative_axis(self): def test_grad_nonnegative_axis_2d(self):
# test 2D
data = np.random.rand(2, 3).astype(theano.config.floatX) data = np.random.rand(2, 3).astype(theano.config.floatX)
utt.verify_grad(lambda x: sort(x, 0), [data]) utt.verify_grad(lambda x: sort(x, 0), [data])
data = np.random.rand(2, 3).astype(theano.config.floatX) data = np.random.rand(2, 3).astype(theano.config.floatX)
utt.verify_grad(lambda x: sort(x, 1), [data]) utt.verify_grad(lambda x: sort(x, 1), [data])
# test 3D def test_grad_nonnegative_axis_3d(self):
data = np.random.rand(2, 3, 4).astype(theano.config.floatX) data = np.random.rand(2, 3, 4).astype(theano.config.floatX)
utt.verify_grad(lambda x: sort(x, 0), [data]) utt.verify_grad(lambda x: sort(x, 0), [data])
data = np.random.rand(2, 3, 4).astype(theano.config.floatX) data = np.random.rand(2, 3, 4).astype(theano.config.floatX)
...@@ -124,7 +122,7 @@ class test_sort(unittest.TestCase): ...@@ -124,7 +122,7 @@ class test_sort(unittest.TestCase):
data = np.random.rand(2, 3, 4).astype(theano.config.floatX) data = np.random.rand(2, 3, 4).astype(theano.config.floatX)
utt.verify_grad(lambda x: sort(x, 2), [data]) utt.verify_grad(lambda x: sort(x, 2), [data])
# test 4D def test_grad_nonnegative_axis_4d(self):
data = np.random.rand(2, 3, 4, 2).astype(theano.config.floatX) data = np.random.rand(2, 3, 4, 2).astype(theano.config.floatX)
utt.verify_grad(lambda x: sort(x, 0), [data]) utt.verify_grad(lambda x: sort(x, 0), [data])
data = np.random.rand(2, 3, 4, 2).astype(theano.config.floatX) data = np.random.rand(2, 3, 4, 2).astype(theano.config.floatX)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论