def __init__(self): super().__init__() self._pollctl = _PollController() # Index tasks by test cases self._task_index = {} # A set of all the current tasks. We use an ordered set here, because # we want to preserve the order of the tasks. self._current_tasks = util.OrderedSet() # Quick look up for the partition schedulers including the # `_rfm_local` pseudo-partition self._schedulers = { '_rfm_local': self.local_scheduler } # Tasks per partition self._partition_tasks = { '_rfm_local': util.OrderedSet() } # Retired tasks that need to be cleaned up self._retired_tasks = [] # Job limit per partition self._max_jobs = { '_rfm_local': rt.runtime().get_option('systems/0/max_local_jobs') } self._pipeline_statistics = rt.runtime().get_option( 'systems/0/dump_pipeline_progress' ) self.task_listeners.append(self)
def test_operators(self): s0 = util.OrderedSet(range(10)) s1 = util.OrderedSet(range(20)) s2 = util.OrderedSet(range(10, 20)) assert s0 == set(range(10)) assert set(range(10)) == s0 assert s0 != s1 assert s1 != s0 assert s0 < s1 assert s0 <= s1 assert s0 <= s0 assert s1 > s0 assert s1 >= s0 assert s1 >= s1 assert s0.issubset(s1) assert s1.issuperset(s0) assert (s0 & s1) == s0 assert (s0 & s2) == set() assert (s0 | s2) == s1 assert (s1 - s0) == s2 assert (s2 - s0) == s2 assert (s0 ^ s1) == s2 assert s0.isdisjoint(s2) assert not s0.isdisjoint(s1) assert s0.symmetric_difference(s1) == s2
def test_ordered_set_str(random_seed): l = list(range(10)) random.shuffle(l) s = util.OrderedSet(l) assert str(s) == str(l).replace('[', '{').replace(']', '}') s = util.OrderedSet() assert str(s) == type(s).__name__ + '()'
def _reverse_deps(graph): ret = {} for n, deps in graph.items(): ret.setdefault(n, util.OrderedSet({})) for d in deps: try: ret[d] |= {n} except KeyError: ret[d] = util.OrderedSet({n}) return ret
def toposort(graph, is_subgraph=False): '''Return a list of the graph nodes topologically sorted. If ``is_subgraph`` is ``True``, graph will by treated a subgraph, meaning that any dangling edges will be ignored. ''' test_deps = _reduce_deps(graph) visited = util.OrderedSet() levels = {} def retrieve(d, key, default): try: return d[key] except KeyError: if is_subgraph: return default else: raise def visit(node, path): # We assume an acyclic graph assert node not in path path.add(node) # Do a DFS visit of all the adjacent nodes adjacent = retrieve(test_deps, node, []) for u in adjacent: if u not in visited: visit(u, path) if adjacent: levels[node] = max(levels[u] for u in adjacent) + 1 else: levels[node] = 0 path.pop() visited.add(node) for r in test_deps.keys(): if r not in visited: visit(r, util.OrderedSet()) # Index test cases by test name cases_by_name = {} for c in graph.keys(): c.level = levels[c.check.unique_name] try: cases_by_name[c.check.unique_name].append(c) except KeyError: cases_by_name[c.check.unique_name] = [c] return list( itertools.chain(*(retrieve(cases_by_name, n, []) for n in visited)))
def test_ordered_set_union(random_seed): l0 = list(range(10)) l1 = list(range(10, 20)) l2 = list(range(20, 30)) random.shuffle(l0) random.shuffle(l1) random.shuffle(l2) s0 = util.OrderedSet(l0) s1 = util.OrderedSet(l1) s2 = util.OrderedSet(l2) assert list(s0.union(s1, s2)) == l0 + l1 + l2
def test_difference(self): l0 = list(range(10, 40)) l1 = list(range(20, 40)) l2 = list(range(20, 30)) random.shuffle(l0) random.shuffle(l1) random.shuffle(l2) s0 = util.OrderedSet(l0) s1 = util.OrderedSet(l1) s2 = util.OrderedSet(l2) assert s0.difference(s1, s2) == set(range(10, 20))
def test_intersection(self): l0 = list(range(10, 40)) l1 = list(range(20, 40)) l2 = list(range(20, 30)) random.shuffle(l0) random.shuffle(l1) random.shuffle(l2) s0 = util.OrderedSet(l0) s1 = util.OrderedSet(l1) s2 = util.OrderedSet(l2) assert s0.intersection(s1, s2) == s2
def test_ordered_set_difference(): l0 = list(range(10, 40)) l1 = list(range(20, 40)) l2 = list(range(20, 30)) random.shuffle(l0) random.shuffle(l1) random.shuffle(l2) s0 = util.OrderedSet(l0) s1 = util.OrderedSet(l1) s2 = util.OrderedSet(l2) # OrderedSet must keep the order of elements in s0 assert list(s0.difference(s1, s2)) == [x for x in l0 if x >= 10 and x < 20]
def test_ordered_set_intersection(random_seed): l0 = list(range(10, 40)) l1 = list(range(20, 40)) l2 = list(range(20, 30)) random.shuffle(l0) random.shuffle(l1) random.shuffle(l2) s0 = util.OrderedSet(l0) s1 = util.OrderedSet(l1) s2 = util.OrderedSet(l2) # OrderedSet must keep the order of elements in s0 assert list(s0.intersection(s1, s2)) == [x for x in l0 if x >= 20 and x < 30]
def assert_topological_order(cases, graph): cases_order = [] visited_tests = set() tests = util.OrderedSet() for c in cases: check, part, env = c cases_order.append((check.name, part.fullname, env.name)) tests.add(check.name) visited_tests.add(check.name) # Assert that all dependencies of c have been visited before for d in graph[c]: if d not in cases: # dependency points outside the subgraph continue assert d.check.name in visited_tests # Check the order of systems and prog. environments # We are checking against all possible orderings valid_orderings = [] for partitions in itertools.permutations(['sys0:p0', 'sys0:p1']): for environs in itertools.permutations(['e0', 'e1']): ordering = [] for t in tests: for p in partitions: for e in environs: ordering.append((t, p, e)) valid_orderings.append(ordering) assert cases_order in valid_orderings
def test_ordered_set_construction(random_seed): l = list(range(10)) random.shuffle(l) s = util.OrderedSet(l + l) assert len(s) == 10 for i in range(10): assert i in s assert list(s) == l
def _reduce_deps(graph): '''Reduce test case graph to a test-only graph.''' ret = {} for case, deps in graph.items(): test_deps = util.OrderedSet(d.check.unique_name for d in deps) try: ret[case.check.unique_name] |= test_deps except KeyError: ret[case.check.unique_name] = test_deps return ret
def update(self, hooks, *, denied_hooks=None): '''Update the hook registry with the hooks from another hook registry. The optional ``denied_hooks`` argument takes a set of disallowed hook names, preventing their inclusion into the current hook registry. ''' denied_hooks = denied_hooks or set() for phase, hks in hooks.items(): self.__hooks.setdefault(phase, util.OrderedSet()) for h in hks: if h.__name__ not in denied_hooks: self.__hooks[phase].add(h)
def test_ordered_set_operators(): s0 = util.OrderedSet('abc') s1 = util.OrderedSet('abced') s2 = util.OrderedSet('ed') assert s0 == set('abc') assert s0 == util.OrderedSet('abc') assert set('abc') == s0 assert util.OrderedSet('abc') == s0 assert s0 != s1 assert s1 != s0 assert s0 != util.OrderedSet('cab') assert s0 < s1 assert s0 <= s1 assert s0 <= s0 assert s1 > s0 assert s1 >= s0 assert s1 >= s1 assert s0.issubset(s1) assert s1.issuperset(s0) assert (s0 & s1) == s0 assert (s0 & s2) == set() assert (s0 | s2) == s1 assert (s1 - s0) == s2 assert (s2 - s0) == s2 assert (s0 ^ s1) == s2 assert s0.isdisjoint(s2) assert not s0.isdisjoint(s1) assert s0.symmetric_difference(s1) == s2
def runcase(self, case): super().runcase(case) check, partition, environ = case self._schedulers[partition.fullname] = partition.scheduler # Set partition-based counters, if not set already self._partition_tasks.setdefault(partition.fullname, util.OrderedSet()) self._max_jobs.setdefault(partition.fullname, partition.max_jobs) task = RegressionTask(case, self.task_listeners) self._task_index[case] = task self.stats.add_task(task) getlogger().debug2(f'Added {check.name} on {partition.fullname} ' f'using {environ.name}') self._current_tasks.add(task)
def prune_deps(graph, testcases, max_depth=None): '''Prune the graph so that it contains only the specified cases and their dependencies up to max_depth. Graph is assumed to by a DAG. ''' max_depth = max_depth or sys.maxsize pruned_graph = {} for tc in testcases: unvisited = [tc] curr_depth = 0 while unvisited and curr_depth < max_depth: node = unvisited.pop() pruned_graph.setdefault(node, util.OrderedSet()) for adj in graph[node]: pruned_graph[node].add(adj) curr_depth += 1 if adj not in pruned_graph: unvisited.append(adj) return pruned_graph
def test_toposort(self): # # t0 +-->t5<--+ # ^ | | # | | | # +-->t1<--+ t6 t7 # | | ^ # t2<------t3 | # ^ ^ | # | | t8 # +---t4---+ # t0 = self.create_test('t0') t1 = self.create_test('t1') t2 = self.create_test('t2') t3 = self.create_test('t3') t4 = self.create_test('t4') t5 = self.create_test('t5') t6 = self.create_test('t6') t7 = self.create_test('t7') t8 = self.create_test('t8') t1.depends_on('t0') t2.depends_on('t1') t3.depends_on('t1') t3.depends_on('t2') t4.depends_on('t2') t4.depends_on('t3') t6.depends_on('t5') t7.depends_on('t5') t8.depends_on('t7') deps = dependency.build_deps( executors.generate_testcases([t0, t1, t2, t3, t4, t5, t6, t7, t8]) ) cases = dependency.toposort(deps) cases_order = [] tests = util.OrderedSet() visited_tests = set() for c in cases: check, part, env = c cases_order.append((check.name, part.fullname, env.name)) tests.add(check.name) visited_tests.add(check.name) # Assert that all dependencies of c have been visited before for d in deps[c]: assert d.check.name in visited_tests # Check the order of systems and prog. environments # We are checking against all possible orderings valid_orderings = [] for partitions in itertools.permutations(['sys0:p0', 'sys0:p1']): for environs in itertools.permutations(['e0', 'e1']): ordering = [] for t in tests: for p in partitions: for e in environs: ordering.append((t, p, e)) valid_orderings.append(ordering) assert cases_order in valid_orderings
def toposort(graph): # NOTES on implementation: # # 1. This function assumes a directed acyclic graph. # 2. The purpose of this function is to topologically sort the test cases, # not only the tests. However, since we do not allow cycles between # tests in any case (even if this could be classified a # pseudo-dependency), we first do a topological sort of the tests and we # subsequently sort the test cases by partition and by programming # environment. # 3. To achieve this 3-step sorting with a single sort operations, we rank # the test cases by associating them with an integer key based on the # result of the topological sort of the tests and by choosing an # arbitrary ordering of the partitions and the programming environment. test_deps = _reduce_deps(graph) rev_deps = _reverse_deps(test_deps) # We do a BFS traversal from each root visited = {} roots = set(t for t, deps in test_deps.items() if not deps) for r in roots: unvisited = util.OrderedSet([r]) visited[r] = util.OrderedSet() while unvisited: # Next node is one whose all dependencies are already visited # FIXME: This makes sorting's complexity O(V^2) node = None for n in unvisited: if test_deps[n] <= visited[r]: node = n break # If node is None, graph has a cycle and this is a bug; this # function assumes acyclic graphs only assert node is not None unvisited.remove(node) adjacent = rev_deps[node] unvisited |= util.OrderedSet(n for n in adjacent if n not in visited) visited[r].add(node) # Combine all individual sequences into a single one ordered_tests = util.OrderedSet() for tests in visited.values(): ordered_tests |= tests # Get all partitions and programming environments from test cases partitions = util.OrderedSet() environs = util.OrderedSet() for c in graph.keys(): partitions.add(c.partition.fullname) environs.add(c.environ.name) # Rank test cases; we first need to calculate the base for the rank number base = max(len(partitions), len(environs)) + 1 ranks = {} for i, test in enumerate(ordered_tests): for j, part in enumerate(partitions): for k, env in enumerate(environs): ranks[test, part, env] = i * base**2 + j * base + k return sorted(graph.keys(), key=lambda x: ranks[x.check.name, x.partition.fullname, x. environ.name])
def test_ordered_set_reversed(): l = list(range(10)) random.shuffle(l) s = util.OrderedSet(l) assert list(reversed(s)) == list(reversed(l))
def build_deps(cases): """Build dependency graph from test cases. The graph is represented as an adjacency list in a Python dictionary holding test cases. The dependency information is also encoded inside each test cases. """ # Index cases for quick access cases_by_part = {} cases_revmap = {} for c in cases: cname = c.check.name pname = c.partition.fullname ename = c.environ.name cases_by_part.setdefault((cname, pname), []) cases_revmap.setdefault((cname, pname, ename), None) cases_by_part[cname, pname].append(c) cases_revmap[cname, pname, ename] = c def resolve_dep(target, from_map, *args): errmsg = 'could not resolve dependency: %s' % target try: ret = from_map[args] except KeyError: raise DependencyError(errmsg) else: if not ret: raise DependencyError(errmsg) return ret # NOTE on variable names # # c stands for check or case depending on the context # p stands for partition # e stands for environment # t stands for target graph = {} for c in cases: cname = c.check.name pname = c.partition.fullname ename = c.environ.name for dep in c.check.user_deps(): tname, how, subdeps = dep if how == rfm.DEPEND_FULLY: c.deps.extend(resolve_dep(c, cases_by_part, tname, pname)) elif how == rfm.DEPEND_BY_ENV: c.deps.append(resolve_dep(c, cases_revmap, tname, pname, ename)) elif how == rfm.DEPEND_EXACT: for env, tenvs in subdeps.items(): if env != ename: continue for te in tenvs: c.deps.append( resolve_dep(c, cases_revmap, tname, pname, te)) graph[c] = util.OrderedSet(c.deps) return graph
def update(self, hooks): for phase, hks in hooks.items(): self.__hooks.setdefault(phase, util.OrderedSet()) for h in hks: self.__hooks[phase].add(h)
def build_deps(cases, default_cases=None): '''Build dependency graph from test cases. The graph is represented as an adjacency list in a Python dictionary holding test cases. The dependency information is also encoded inside each test case. ''' # Index cases for quick access def build_partition_index(cases): if cases is None: return {} ret = {} for c in cases: cname, pname = c.check.name, c.partition.fullname ret.setdefault((cname, pname), []) ret[cname, pname].append(c) return ret def build_cases_index(cases): if cases is None: return {} ret = {} for c in cases: cname = c.check.name pname = c.partition.fullname ename = c.environ.name ret.setdefault((cname, pname, ename), c) return ret def resolve_dep(target, from_map, fallback_map, *args): errmsg = 'could not resolve dependency: %s -> %s' % (target, args) try: ret = from_map[args] except KeyError: # try to resolve the dependency in the fallback map try: ret = fallback_map[args] except KeyError: raise DependencyError(errmsg) from None if not ret: raise DependencyError(errmsg) return ret cases_by_part = build_partition_index(cases) cases_revmap = build_cases_index(cases) default_cases_by_part = build_partition_index(default_cases) default_cases_revmap = build_cases_index(default_cases) # NOTE on variable names # # c stands for check or case depending on the context # p stands for partition # e stands for environment # t stands for target # We use an ordered dict here, because we need to keep the order of # partitions and environments graph = collections.OrderedDict() for c in cases: cname = c.check.name pname = c.partition.fullname ename = c.environ.name for dep in c.check.user_deps(): tname, how, subdeps = dep if how == rfm.DEPEND_FULLY: c.deps.extend( resolve_dep(c, cases_by_part, default_cases_by_part, tname, pname)) elif how == rfm.DEPEND_BY_ENV: c.deps.append( resolve_dep(c, cases_revmap, default_cases_revmap, tname, pname, ename)) elif how == rfm.DEPEND_EXACT: for env, tenvs in subdeps.items(): if env != ename: continue for te in tenvs: c.deps.append( resolve_dep(c, cases_revmap, default_cases_revmap, tname, pname, te)) graph[c] = util.OrderedSet(c.deps) # Calculate in-degree of each node for u, adjacent in graph.items(): for v in adjacent: v.in_degree += 1 return graph
def test_ordered_set_repr(): assert repr(util.OrderedSet('abc')) == "{'a', 'b', 'c'}" assert str(util.OrderedSet('abc')) == "{'a', 'b', 'c'}"
def test_ordered_set_construction_error(): with pytest.raises(TypeError): util.OrderedSet(2) with pytest.raises(TypeError): util.OrderedSet(1, 2, 3)
def __init__(self, hooks=None): self.__hooks = util.OrderedSet() if hooks is not None: self.update(hooks)
def test_ordered_set_construction_empty(): s = util.OrderedSet() assert s == set() assert set() == s
def test_construction_empty(self): s = util.OrderedSet() assert s == set() assert set() == s
def test_construction_error(self): with pytest.raises(TypeError): s = util.OrderedSet(2) with pytest.raises(TypeError): s = util.OrderedSet(1, 2, 3)
def build_deps(cases, default_cases=None): '''Build dependency graph from test cases. The graph is represented as an adjacency list in a Python dictionary holding test cases. The dependency information is also encoded inside each test case. ''' # Index cases for quick access def build_index(cases): if cases is None: return {} ret = {} for c in cases: cname = c.check.unique_name ret.setdefault(cname, []) ret[cname].append(c) return ret all_cases_map = build_index(cases) default_cases_map = build_index(default_cases) def resolve_dep(src, dst): errmsg = f'could not resolve dependency: {src!r} -> {dst!r}' try: ret = all_cases_map[dst] except KeyError: # Try to resolve the dependency in the fallback map try: ret = default_cases_map[dst] except KeyError: raise DependencyError(errmsg) from None if not ret: raise DependencyError(errmsg) return ret # NOTE on variable names # # c stands for check or case depending on the context # p stands for partition # e stands for environment # We use an ordered dict here, because we need to keep the order of # partitions and environments graph = collections.OrderedDict() unresolved_cases = [] for c in cases: psrc = c.partition.name esrc = c.environ.name try: for dep in c.check.user_deps(): tname, when = dep for d in resolve_dep(c, tname): pdst = d.partition.name edst = d.environ.name if when((psrc, esrc), (pdst, edst)): c.deps.append(d) except DependencyError as e: getlogger().warning(e) unresolved_cases.append(c) continue graph[c] = util.OrderedSet(c.deps) # Skip also all cases that depend on the unresolved ones skipped_cases = [] skip_nodes = set(unresolved_cases) while skip_nodes: v = skip_nodes.pop() skipped_cases.append(v) for u, adj in graph.items(): if v in adj: skip_nodes.add(u) # Prune graph for c in skipped_cases: # Cases originally discovered (unresolved_cases) are not in the graph, # but we loop over them here; therefore we use pop() graph.pop(c, None) # Calculate in-degree of each node for u, adjacent in graph.items(): for v in adjacent: v.in_degree += 1 msg = 'skipping all dependent test cases\n' for c in skipped_cases: msg += f' - {c}\n' if skipped_cases: getlogger().warning(msg) return graph, skipped_cases