Пример #1
0
    def visit(self, control_path, is_complete_control_path):
        visit_result = super(EdgeCoverageGraphVisitor,
                             self).visit(control_path,
                                         is_complete_control_path)

        if visit_result == VisitResult.CONTINUE and is_complete_control_path:
            is_done = True
            for e in reversed(control_path):
                label = self.labels[e]
                if label == EdgeLabels.UNVISITED:
                    Statistics().num_covered_edges += 1
                    self.labels[e] = EdgeLabels.VISITED

                if is_done and label != EdgeLabels.DONE:
                    all_out_es_done = True
                    for oe in self.graph.get_neighbors(e.dst):
                        if self.labels[oe] != EdgeLabels.DONE:
                            all_out_es_done = False
                            break

                    if all_out_es_done:
                        for ie in self.graph.get_in_edges(e.dst):
                            if self.labels[ie] == EdgeLabels.VISITED:
                                Statistics().num_done += 1
                                self.labels[ie] = EdgeLabels.DONE
                    else:
                        is_done = False

            Statistics().dump()
            print(len(set(self.labels.keys())))
            visit_result = VisitResult.ABORT
            """
            c = 0
            for k, v in self.labels.items():
                if v == EdgeLabels.UNVISITED:
                    print(k)
                    c += 1
                if c == 10:
                    break

            self.ccc = 0
            """
        """
        if visit_result == VisitResult.CONTINUE and not is_complete_control_path:
            path_has_new_edges = False
            for e in control_path:
                if self.labels[e] == EdgeLabels.UNVISITED:
                    path_has_new_edges = True
                    break

            if path_has_new_edges:
                self.ccc = 0

        if visit_result == VisitResult.BACKTRACK:
            self.ccc += 1
        if self.ccc == 100:
            visit_result = VisitResult.ABORT
        """

        return visit_result
Пример #2
0
 def solve(self):
     start_time = time.time()
     Statistics().solver_time.start()
     solver_result = self.solver.check()
     Statistics().num_solver_calls += 1
     Statistics().solver_time.stop()
     logging.debug(
         "Checked %d paths, result=%s, %f seconds" %
         (len(self.paths_data), solver_result, time.time() - start_time))
     return solver_result
Пример #3
0
    def preprocess_edges(self, path, edges):
        if Config().get_random_tlubf():
            shuffle(edges)
            return edges

        custom_order = sorted(
            edges, key=lambda t: Statistics().stats_per_control_path_edge[t])
        return reversed(custom_order)

        visited_es = []
        unvisited_es = []

        path_has_new_edges = False
        for e in path:
            if self.labels[e] == EdgeLabels.UNVISITED:
                path_has_new_edges = True
                break

        for e in edges:
            label = self.labels[e]
            if label == EdgeLabels.UNVISITED:
                unvisited_es.append(e)
            elif label == EdgeLabels.VISITED:
                visited_es.append(e)
            else:
                assert label == EdgeLabels.DONE
                if path_has_new_edges:
                    visited_es.append(e)

        # shuffle(visited_es)
        #shuffle(unvisited_es)
        return list(reversed(visited_es)) + list(reversed(unvisited_es))
Пример #4
0
def generate_test_cases(input_file):
    top = P4_Top()
    top.load_json_file(input_file)

    top.build_graph()
    top.load_extern_backends()

    num_control_paths, num_control_path_nodes, num_control_path_edges = \
        top.in_graph.count_all_paths(top.in_pipeline.init_table_name)
    num_parser_path_edges = top.parser_graph.num_edges()
    Statistics(
    ).num_control_path_edges = num_parser_path_edges + num_control_path_edges

    graph_visitor = ParserGraphVisitor(top.hlir)
    parser_paths = [
        path for path in top.parser_graph.visit_all_paths(
            top.hlir.parsers['parser'].init_state, 'sink', graph_visitor)
    ]

    max_path_len = max([len(p) for p in parser_paths])
    logging.info("Found %d parser paths, longest with length %d"
                 "" % (len(parser_paths), max_path_len))
    if Config().get_show_parser_paths():
        print_parser_paths(parser_paths)

    logging.info(
        "Counted %d paths, %d nodes, %d edges"
        " in parser + ingress control flow graph"
        "" % (len(parser_paths) * num_control_paths, num_control_path_nodes,
              num_parser_path_edges + num_control_path_edges))

    generator = TestCaseGenerator(input_file, top)
    return generator.generate_test_cases_for_parser_paths(parser_paths)
Пример #5
0
    def generate_test_cases_for_parser_paths(self, parser_paths):
        Statistics().init()
        self.total_switch_time = 0.0
        self.parser_path_edge_count = defaultdict(int)

        if Config().get_round_robin_parser_paths():
            results = self.generate_test_cases_round_robin(parser_paths)
        else:
            results = self.generate_test_cases_linearly(parser_paths)

        if self.table_solver is not None:
            self.table_solver.flush()

        logging.info("Final statistics on use of control path edges:")
        Statistics().log_control_path_stats(
            Statistics().stats_per_control_path_edge,
            Statistics().num_control_path_edges)
        self.test_case_writer.cleanup()

        Statistics().dump()
        Statistics().cleanup()

        for result, count in Statistics().stats.items():
            print('{}: {}'.format(result, count))

        if Config().get_dump_test_case():
            str_items = []
            for (parser_path, control_path), v in results.items():
                str_items.append('{}: {}'.format(
                    path_tuple(parser_path, control_path), v))
            print('{{ {} }}'.format(', '.join(str_items)))

        return results
Пример #6
0
    def solve_path(self):
        Statistics().solver_time.start()
        self.solver_result = self.solver.check()
        Statistics().num_solver_calls += 1
        Statistics().solver_time.stop()

        context = self.current_context()
        if self.solver_result != sat:
            result = TestPathResult.NO_PACKET_FOUND
        elif context.uninitialized_reads:
            result = TestPathResult.UNINITIALIZED_READ
        elif context.invalid_header_writes:
            result = TestPathResult.INVALID_HEADER_WRITE
        else:
            result = TestPathResult.SUCCESS

        self.result_history[-2].append(result)
        return result
Пример #7
0
    def visit(self):
        while Statistics().num_covered_edges < Statistics(
        ).num_control_path_edges:
            path = []
            next_node = self.start
            while self.graph.get_neighbors(next_node):
                edges = self.graph.get_neighbors(next_node)
                if len(edges) == 0:
                    break

                edges = self.preprocess_edges(path, edges)
                edge = self.choose_edge(edges)
                path.append(edge)
                next_node = edge.dst

            for e in path:
                if self.path_count[e] == 0:
                    Statistics().num_covered_edges += 1
                self.path_count[e] += 1
            self.visitor.visit(path)
Пример #8
0
    def process_path_solution(self, path_solution):
        if self.table_solver is not None:
            self.table_solver.add_path(path_solution)
            return

        pre_sim_time = time.time()
        test_case, packet_list = self.generate_test_case_for_path(
            path_solution)
        test_case["time_sec_simulate_packet"] = time.time() - pre_sim_time

        self.test_case_writer.write(test_case, packet_list)
        Statistics().num_test_cases += 1
Пример #9
0
    def flush(self):
        logging.info("Flushing %d paths" % (len(self.paths_data), ))
        if not self.paths_data:
            # If no paths have been added then nothing to do
            return

        self.add_final_constraints()
        assert self.solve() == z3.sat

        # If any paths have been added and not removed, they should already be
        # solved satisfiably, if not this will raise an error.
        model = self.solver.model()

        max_test_cases = Config().get_num_test_cases()
        for path_id, path_data in self.paths_data:
            if Statistics().num_test_cases == max_test_cases:
                break
            # TODO: Consider moving solvers to a yield model
            test_case, payloads = self.build_test_case(model, path_id,
                                                       path_data)
            self.test_case_writer.write(test_case, payloads)
            Statistics().num_test_cases += 1

        self.reset()
Пример #10
0
 def count_parser_path_edges(self, parser_path):
     for e in parser_path:
         if self.parser_path_edge_count[e] == 0:
             Statistics().num_covered_edges += 1
         self.parser_path_edge_count[e] += 1
Пример #11
0
def enough_test_cases():
    max_test_cases = Config().get_num_test_cases()
    if max_test_cases is None or max_test_cases <= 0:
        return False
    return Statistics().num_test_cases >= max_test_cases
Пример #12
0
def process_json_file(input_file, debug=False, generate_graphs=False):
    top = P4_Top(debug)
    top.build_from_json(input_file)

    # Get the parser graph
    hlir = P4_HLIR(debug, top.json_obj)
    parser_graph = hlir.get_parser_graph()
    # parser_sources, parser_sinks = parser_graph.get_sources_and_sinks()
    # logging.debug("parser_graph has %d sources %s, %d sinks %s"
    #               "" % (len(parser_sources), parser_sources, len(parser_sinks),
    #                     parser_sinks))

    assert 'ingress' in hlir.pipelines
    in_pipeline = hlir.pipelines['ingress']
    graph, source_info_to_node_name = in_pipeline.generate_CFG()
    logging.debug(graph)
    graph_sources, graph_sinks = graph.get_sources_and_sinks()
    logging.debug(
        "graph has %d sources %s, %d sinks %s"
        "" %
        (len(graph_sources), graph_sources, len(graph_sinks), graph_sinks))

    # Graphviz visualization
    if generate_graphs:
        graph_lcas = {}
        #tmp_time = time.time()
        #for v in graph.get_nodes():
        #    graph_lcas[v] = graph.lowest_common_ancestor(v)
        #lca_comp_time = time.time() - tmp_time
        #logging.info("%.3f sec to compute lowest common ancestors for ingress",
        #             lca_comp_time)
        generate_graphviz_graph(in_pipeline, graph, lcas=graph_lcas)
        eg_pipeline = hlir.pipelines['egress']
        eg_graph, eg_source_info_to_node_name = eg_pipeline.generate_CFG()
        generate_graphviz_graph(eg_pipeline, eg_graph)
        return

    Statistics().init()
    # XXX: move
    labels = defaultdict(lambda: EdgeLabels.UNVISITED)
    translator = Translator(input_file, hlir, in_pipeline)
    results = OrderedDict()
    # TBD: Make this filename specifiable via command line option
    test_case_writer = TestCaseWriter('test-cases.json', 'test.pcap')

    num_control_paths, num_control_path_nodes, num_control_path_edges = \
        graph.count_all_paths(in_pipeline.init_table_name)
    num_parser_path_edges = parser_graph.num_edges()
    Statistics(
    ).num_control_path_edges = num_parser_path_edges + num_control_path_edges

    if Config().get_try_least_used_branches_first():
        p_visitor = TLUBFParserVisitor(graph, labels, translator,
                                       source_info_to_node_name, results,
                                       test_case_writer, in_pipeline)
        lup = LeastUsedPaths(hlir, parser_graph,
                             hlir.parsers['parser'].init_state, p_visitor)
        lup.visit()
        exit(0)

    graph_visitor = ParserGraphVisitor(hlir)
    parser_graph.visit_all_paths(hlir.parsers['parser'].init_state, 'sink',
                                 graph_visitor)
    parser_paths = graph_visitor.all_paths

    num_parser_paths = len(parser_paths)
    num_parser_path_nodes = 0
    #num_parser_paths, num_parser_path_nodes, num_parser_path_edges = \
    #    parser_graph.count_all_paths('start')
    # print('\n'.join([str(p) for p in parser_paths]))

    max_path_len = max([len(p) for p in parser_paths])
    logging.info("Found %d parser paths, longest with length %d"
                 "" % (len(parser_paths), max_path_len))
    if Config().get_show_parser_paths():
        parser_paths_with_len = collections.defaultdict(list)
        for p in parser_paths:
            parser_paths_with_len[len(p)].append(p)
        for plen in sorted(parser_paths_with_len.keys()):
            logging.info("%6d parser paths with len %2d"
                         "" % (len(parser_paths_with_len[plen]), plen))
        for plen in sorted(parser_paths_with_len.keys()):
            logging.info("Contents of %6d parser paths with len %2d:"
                         "" % (len(parser_paths_with_len[plen]), plen))
            i = 0
            for p in parser_paths_with_len[plen]:
                i += 1
                logging.info("Path %d of %d with len %d:"
                             "" % (i, len(parser_paths_with_len[plen]), plen))
                print(p)

    logging.info("Counted %d paths, %d nodes, %d edges"
                 " in parser + ingress control flow graph"
                 "" % (len(parser_paths) * num_control_paths,
                       num_parser_path_nodes + num_control_path_nodes,
                       num_parser_path_edges + num_control_path_edges))

    # The only reason first_time is a list is so we can mutate the
    # global value inside of a sub-method.
    first_time = [True]
    parser_path_num = 0

    # XXX: move
    path_count = defaultdict(int)

    for parser_path in parser_paths:
        for e in parser_path:
            if path_count[e] == 0:
                Statistics().num_covered_edges += 1
            path_count[e] += 1
        parser_path_num += 1
        logging.info("Analyzing parser_path %d of %d: %s"
                     "" % (parser_path_num, len(parser_paths), parser_path))
        if not translator.generate_parser_constraints(parser_path):
            logging.info("Could not find any packet to satisfy parser path: %s"
                         "" % (parser_path))
            # Skip unsatisfiable parser paths
            continue

        graph_visitor = None
        if Config().get_try_least_used_branches_first():
            graph_visitor = EdgeCoverageGraphVisitor(graph, labels, translator,
                                                     parser_path,
                                                     source_info_to_node_name,
                                                     results, test_case_writer)
        else:
            graph_visitor = PathCoverageGraphVisitor(translator, parser_path,
                                                     source_info_to_node_name,
                                                     results, test_case_writer)

        graph.visit_all_paths(in_pipeline.init_table_name, None, graph_visitor)

        # Check if we generated enough test cases
        if Statistics().num_test_cases == Config().get_num_test_cases():
            break

    logging.info("Final statistics on use of control path edges:")
    Statistics().log_control_path_stats(
        Statistics().stats_per_control_path_edge,
        Statistics().num_control_path_edges)
    test_case_writer.cleanup()
    translator.cleanup()

    Statistics().dump()
    Statistics().cleanup()

    for result, count in Statistics().stats.items():
        print('{}: {}'.format(result, count))

    if Config().get_dump_test_case():
        str_items = []
        for k, v in results.items():
            str_items.append('{}: {}'.format(k, v))
        print('{{ {} }}'.format(', '.join(str_items)))

    return results
Пример #13
0
    def record_stats(self, control_path, is_complete_control_path, result):
        if result == TestPathResult.SUCCESS and is_complete_control_path:
            Statistics().avg_full_path_len.record(
                len(self.parser_path + control_path))
            for e in control_path:
                if Statistics().stats_per_control_path_edge[e] == 0:
                    Statistics().num_covered_edges += 1
                Statistics().stats_per_control_path_edge[e] += 1
        if result == TestPathResult.NO_PACKET_FOUND:
            Statistics().avg_unsat_path_len.record(
                len(self.parser_path + control_path))
            Statistics().count_unsat_paths.inc()

        if Config().get_record_statistics():
            Statistics().record(result, is_complete_control_path,
                                self.path_solver)

        if record_path_result(result, is_complete_control_path):
            path = (tuple(self.parser_path), tuple(control_path))
            if path in self.results and self.results[path] != result:
                logging.error("result_path %s with result %s"
                              " is already recorded in results"
                              " while trying to record different result %s"
                              "" % (path, self.results[path], result))
                #assert False
            self.results[path] = result
            if result == TestPathResult.SUCCESS and is_complete_control_path:
                now = time.time()
                self.success_path_count += 1
                # Use real time to avoid printing these details
                # too often in the output log.
                if now - Statistics(
                ).last_time_printed_stats_per_control_path_edge >= 30:
                    Statistics().log_control_path_stats(
                        Statistics().stats_per_control_path_edge,
                        Statistics().num_control_path_edges)
                    Statistics(
                    ).last_time_printed_stats_per_control_path_edge = now
            Statistics().stats[result] += 1
Пример #14
0
    def visit(self, control_path, is_complete_control_path):
        self.path_count += 1
        self.translator.push()
        expected_path, result, test_case, packet_lst = \
            self.translator.generate_constraints(
                self.parser_path, control_path,
                self.source_info_to_node_name, self.path_count, is_complete_control_path)

        if result == TestPathResult.SUCCESS and is_complete_control_path:
            Statistics().avg_full_path_len.record(
                len(self.parser_path + control_path))
        if result == TestPathResult.NO_PACKET_FOUND:
            Statistics().avg_unsat_path_len.record(
                len(self.parser_path + control_path))
            Statistics().count_unsat_paths.inc()

        if Config().get_record_statistics():
            Statistics().record(result, is_complete_control_path, self.translator)

        record_result = (is_complete_control_path
                         or (result != TestPathResult.SUCCESS))
        if record_result:
            # Doing file writing here enables getting at least
            # some test case output data for p4pktgen runs that
            # the user kills before it completes, e.g. because it
            # takes too long to complete.
            self.test_case_writer.write(test_case, packet_lst)
            result_path = [n.src for n in self.parser_path
                           ] + ['sink'] + [(n.src, n) for n in control_path]
            result_path_tuple = tuple(expected_path)
            if result_path_tuple in self.results and self.results[result_path_tuple] != result:
                logging.error("result_path %s with result %s"
                              " is already recorded in results"
                              " while trying to record different result %s"
                              "" % (result_path,
                                    self.results[result_path_tuple], result))
                assert False
            self.results[tuple(result_path)] = result
            if result == TestPathResult.SUCCESS and is_complete_control_path:
                for x in control_path:
                    Statistics().stats_per_control_path_edge[x] += 1
                now = time.time()
                # Use real time to avoid printing these details
                # too often in the output log.
                if now - Statistics(
                ).last_time_printed_stats_per_control_path_edge >= 30:
                    Statistics().log_control_path_stats(
                        Statistics().stats_per_control_path_edge,
                        Statistics().num_control_path_edges)
                    Statistics(
                    ).last_time_printed_stats_per_control_path_edge = now
            Statistics().stats[result] += 1
            self.stats_per_traversal[result] += 1

        tmp_num = Config().get_max_paths_per_parser_path()
        if (tmp_num
                and stats_per_traversal[TestPathResult.SUCCESS] >= tmp_num):
            logging.info("Already found %d packets for parser path %d of %d."
                         "  Backing off so we can get to next parser path ASAP"
                         "" % (stats_per_traversal[TestPathResult.SUCCESS],
                               parser_path_num, len(parser_paths)))
            go_deeper = False
        else:
            go_deeper = (result == TestPathResult.SUCCESS)
        return go_deeper
Пример #15
0
    def generate_parser_constraints(self, parser_path):
        parser_constraints_gen_timer = Timer('parser_constraints_gen')
        parser_constraints_gen_timer.start()

        if Config().get_incremental():
            self.solver.pop()
            self.solver.push()

        self.sym_packet = Packet()
        self.init_context()
        constraints = []

        # XXX: make this work for multiple parsers
        parser = self.hlir.parsers['parser']
        pos = BitVecVal(0, 32)
        logging.info('path = {}'.format(', '.join(
            [str(n) for n in list(parser_path)])))
        for path_transition in parser_path:
            assert isinstance(path_transition, ParserTransition) \
                    or isinstance(path_transition, ParserCompositeTransition) \
                    or isinstance(path_transition, ParserErrorTransition)

            node = path_transition.src
            next_node = path_transition.dst
            logging.debug('{}\tpos = {}'.format(path_transition, pos))
            new_pos = pos
            parse_state = parser.parse_states[node]
            context = self.current_context()
            fail = ''

            for op_idx, parser_op in enumerate(parse_state.parser_ops):
                oob = self.translator.parser_op_oob(context, parser_op)
                if isinstance(path_transition, ParserErrorTransition) \
                        and op_idx == path_transition.op_idx \
                        and path_transition.next_state == 'sink':
                    fail = path_transition.error_str

                    if not oob and fail == 'StackOutOfBounds':
                        # We're on a path where the current parser op over-/
                        # underflows the stack, but in fact that didn't happen,
                        # so the path is unsatisfiable.
                        return False

                if oob and fail != 'StackOutOfBounds':
                    # This parser op over-/underflows, and we're not on a path
                    # that handles that error condition, so the path is
                    # unsatisfiable.
                    return False

                new_pos = self.translator.parser_op_to_smt(
                    context, self.sym_packet, parser_op, fail, pos, new_pos,
                    constraints)

                if fail:
                    break

            if next_node == P4_HLIR.PACKET_TOO_SHORT:
                # Packet needs to be at least one byte too short
                self.sym_packet.set_max_length(simplify(new_pos - 8))
                break

            if fail:
                assert path_transition.next_state == 'sink'
                break

            underflow = any(context.get_stack_parsed_count(f.header_name) == 0
                            for f in parse_state.stack_field_key_elems())
            if isinstance(path_transition, ParserErrorTransition):
                assert path_transition.op_idx is None
                assert path_transition.error_str == 'StackOutOfBounds'
                assert path_transition.next_state == 'sink'
                if not underflow:
                    # On an error path but no underflow: unsatisfiable.
                    return False
                # Otherwise, the complete path is satisfiable.
                fail = path_transition.error_str
                break
            elif underflow:
                # Underflow but not an error path: unsatisfiable.
                return False
            else:
                sym_transition_key = []
                for transition_key_elem in parse_state.transition_key:
                    if isinstance(transition_key_elem, TypeValueField):
                        sym_transition_key.append(self.current_context(
                        ).get_header_field(transition_key_elem.header_name,
                                           transition_key_elem.header_field))
                    elif isinstance(transition_key_elem, TypeValueStackField):
                        sym_transition_key.append(
                            self.current_context().get_last_header_field(
                                transition_key_elem.header_name,
                                transition_key_elem.header_field,
                                self.hlir.get_header_stack(
                                    transition_key_elem.header_name).size))
                    else:
                        raise Exception(
                            'Transition key type not supported: {}'.format(
                                transition_key_elem.__class__))

                # XXX: is this check really necessary?
                if len(sym_transition_key) > 0:
                    if isinstance(path_transition, ParserCompositeTransition):
                        new_constraints = self.parser_composite_transition_constraints(
                            path_transition, parse_state.transitions, sym_transition_key)
                    else:
                        new_constraints = self.parser_transition_constraints(
                            path_transition, parse_state.transitions, sym_transition_key)
                    constraints.extend(new_constraints)

                logging.debug(sym_transition_key)
                pos = simplify(new_pos)

        # XXX: workaround
        context = self.current_context()
        context.set_field_value('meta_meta', 'packet_len',
                                self.sym_packet.packet_size_var)
        if fail:
            context.set_field_value('standard_metadata', 'parser_error',
                                    self.error_bitvec(fail))
        constraints.extend(self.sym_packet.get_packet_constraints())
        self.solver.add(And(constraints))
        self.constraints[0] = constraints

        parser_constraints_gen_timer.stop()
        logging.info('Generate parser constraints: %.3f sec' %
                     (parser_constraints_gen_timer.get_time()))

        Statistics().solver_time.start()
        result = self.solver.check()
        Statistics().num_solver_calls += 1
        Statistics().solver_time.stop()

        if not Config().get_incremental():
            self.solver.reset()

        return result == sat
Пример #16
0
def process_json_file(input_file, debug=False, generate_graphs=False):
    top = P4_Top(debug)
    top.build_from_json(input_file)

    # Get the parser graph
    hlir = P4_HLIR(debug, top.json_obj)
    parser_graph = hlir.get_parser_graph()
    parser_sources, parser_sinks = parser_graph.get_sources_and_sinks()
    logging.debug("parser_graph has %d sources %s, %d sinks %s"
                  "" % (len(parser_sources), parser_sources, len(parser_sinks),
                        parser_sinks))

    assert 'ingress' in hlir.pipelines
    in_pipeline = hlir.pipelines['ingress']
    graph, source_info_to_node_name = in_pipeline.generate_CFG()
    logging.debug(graph)
    graph_sources, graph_sinks = graph.get_sources_and_sinks()
    logging.debug("graph has %d sources %s, %d sinks %s"
                  "" % (len(graph_sources), graph_sources, len(graph_sinks),
                        graph_sinks))
    tmp_time = time.time()
    graph_lcas = {}
    for v in graph.get_nodes():
        graph_lcas[v] = graph.lowest_common_ancestor(v)
    lca_comp_time = time.time() - tmp_time
    logging.info("%.3f sec to compute lowest common ancestors for ingress",
                 lca_comp_time)

    # Graphviz visualization
    if generate_graphs:
        generate_graphviz_graph(in_pipeline, graph, lcas=graph_lcas)
        eg_pipeline = hlir.pipelines['egress']
        eg_graph, eg_source_info_to_node_name = eg_pipeline.generate_CFG()
        generate_graphviz_graph(eg_pipeline, eg_graph)
        return

    graph_visitor = AllPathsGraphVisitor()
    parser_graph.visit_all_paths(hlir.parsers['parser'].init_state, 'sink',
                                 graph_visitor)
    parser_paths = graph_visitor.all_paths

    # paths = [[n[0] for n in path] + ['sink'] for path in paths]
    max_path_len = max([len(p) for p in parser_paths])
    logging.info("Found %d parser paths, longest with length %d"
                 "" % (len(parser_paths), max_path_len))

    num_control_paths, num_control_path_nodes, num_control_path_edges = \
        graph.count_all_paths(in_pipeline.init_table_name)
    logging.info("Counted %d paths, %d nodes, %d edges"
                 " in ingress control flow graph"
                 "" % (num_control_paths, num_control_path_nodes,
                       num_control_path_edges))

    Statistics().init()
    Statistics().num_control_path_edges = num_control_path_edges

    results = OrderedDict()
    translator = Translator(input_file, hlir, in_pipeline)
    # TBD: Make this filename specifiable via command line option
    test_case_writer = TestCaseWriter('test-cases.json', 'test.pcap')
    # The only reason first_time is a list is so we can mutate the
    # global value inside of a sub-method.
    first_time = [True]
    parser_path_num = 0
    for parser_path in parser_paths:
        parser_path_num += 1
        logging.info("Analyzing parser_path %d of %d: %s"
                     "" % (parser_path_num, len(parser_paths), parser_path))
        translator.generate_parser_constraints(parser_path)

        def order_neighbors_by_least_used(node, neighbors):
            custom_order = sorted(
                neighbors,
                key=lambda t: stats_per_control_path_edge[(node, t)])
            if Config().get_debug():
                logging.debug("Edges out of node %s"
                              " ordered from least used to most:", node)
                for n in custom_order:
                    edge = (node, n)
                    logging.debug("    %d %s"
                                  "" % (stats_per_control_path_edge[edge],
                                        edge))
            return custom_order

        if Config().get_try_least_used_branches_first():
            order_cb_fn = order_neighbors_by_least_used
        else:
            # Use default order built into generate_all_paths()
            order_cb_fn = None

        graph_visitor = PathCoverageGraphVisitor(translator, parser_path,
                                                 source_info_to_node_name,
                                                 results, test_case_writer)
        graph.visit_all_paths(in_pipeline.init_table_name, None, graph_visitor)

    logging.info("Final statistics on use of control path edges:")
    Statistics().log_control_path_stats(
        Statistics().stats_per_control_path_edge, num_control_path_edges)
    test_case_writer.cleanup()
    translator.cleanup()
    Statistics().cleanup()

    for result, count in Statistics().stats.items():
        print('{}: {}'.format(result, count))

    if Config().get_dump_test_case():
        str_items = []
        for k, v in results.items():
            str_items.append('{}: {}'.format(k, v))
        print('{{ {} }}'.format(', '.join(str_items)))

    return results