def main(args: Namespace): program = programs.IO.read(args.program) root = program.call_graph.get_root() if args.budget: if args.budget < 2 * len(program): error_message( 'Each subprogram requires at least two instrumentation points; ' 'the given program thus needs at least {}.'.format( len(program) * 2)) max_budget = sum( [subprogram.cfg.number_of_vertices() for subprogram in program]) if args.budget > max_budget: error_message( 'The maximum number of allowed instrumentation points is {}.'. format(max_budget)) with open(args.traces, 'r') as traces_file: line = traces_file.readline() line = line.strip() if line != program.magic: error_message('Traces are not generated by the given program.') verbose_message('Root is {}'.format(root.name)) dfs = graphs.DepthFirstSearch(program.call_graph, root) static_analysis(program, root, dfs) hybrid_analysis(program, root, dfs, args.traces, args.policy, args.budget)
def main(args: Namespace): program = programs.IO.read(args.program) root = program.call_graph.get_root() verbose_message('Root is {}'.format(root.name)) uncovered = set() for subprogram in program: for vertex in subprogram.cfg: uncovered.add(vertex) count = 1 traces = [] total = len(uncovered) while len(uncovered)/total > 0.1 and count <= args.runs: print(count, len(uncovered)/total) traces.append(generate_trace(program, root, uncovered)) count += 1 stem, _ = splitext(program.filename) with open('traces.{}.txt'.format(stem), 'w') as traces_file: traces_file.write('{}\n\n'.format(program.magic)) for trace in traces: for x, y in trace: traces_file.write('{} {}\n'.format(x, y)) traces_file.write('\n')
def intra_procedural_analysis(prog, policy, reinstrument, traces): generated_ipgs = {cfg.name: [] for cfg in prog} for cfg in prog: messages.verbose_message('Analysing subprogram {}'.format(cfg.name)) dot.visualise_control_flow_graph(prog, cfg) ppg = graphs.ProgramPointGraph(cfg) dot.visualise_flow_graph(prog, ppg, '.ppg') for i in range(reinstrument): ipg = graphs.InstrumentationPointGraph.create_from_policy(ppg, policy) ipg.name = '{}.{}'.format(ipg.name, len(generated_ipgs[cfg.name])) generated_ipgs[cfg.name].append(ipg) filters = [lambda v: ipg.predecessors(v) and ipg.successors(v) and len(ipg.predecessors(v)) == 1 and len(ipg.successors(v)) == 1, lambda v: ipg.predecessors(v) and ipg.successors(v)] ipg.reduce(filters) #dot.visualise_instrumentation_point_graph(prog, ipg) for i in range(reinstrument): ipg = graphs.InstrumentationPointGraph.create_from_policy(ppg, policy) ipg.name = '{}.{}'.format(ipg.name, len(generated_ipgs[cfg.name])) generated_ipgs[cfg.name].append(ipg) filters = [lambda v: True] ipg.reduce(filters) # dot.visualise_instrumentation_point_graph(prog, ipg) for name, ipgs in generated_ipgs.items(): ipgs.sort(key=lambda ipg: ipg.number_of_vertices()) ipgs.sort(key=lambda ipg: ipg.number_of_edges()) print("===========================> {} candidates: {}".format(name, len(ipgs))) basic_ipg = graphs.InstrumentationPointGraph.create_from_policy(ppg, policy) print('n={} m={}'.format(basic_ipg.number_of_vertices(), basic_ipg.number_of_edges())) smallest_instrumentation = min(ipgs, key=lambda ipg: ipg.number_of_vertices()) print('min(|V|): {} n={} m={}'.format(smallest_instrumentation.name, smallest_instrumentation.number_of_vertices(), smallest_instrumentation.number_of_edges())) largest_instrumentation = max(ipgs, key=lambda ipg: ipg.number_of_vertices()) print('max(|V|): {} n={} m={}'.format(largest_instrumentation.name, largest_instrumentation.number_of_vertices(), largest_instrumentation.number_of_edges())) smallest_footprint = min(ipgs, key=lambda ipg: ipg.number_of_edges()) print('min(|E|): {} n={} m={}'.format(smallest_footprint.name, smallest_footprint.number_of_vertices(), smallest_footprint.number_of_edges())) largest_footprint = max(ipgs, key=lambda ipg: ipg.number_of_edges()) print('max(|E|): {} n={} m={}'.format(largest_footprint.name, largest_footprint.number_of_vertices(), largest_footprint.number_of_edges()))
def main(**kwargs): prog = program.IO.read(kwargs['filename']) prog.cleanup() with database.Database(kwargs['database']) as db: for subprogram in prog: subprogram.cfg.dotify() ppg = graph.ProgramPointGraph.create_from_control_flow_graph( subprogram.cfg) ppg.dotify() for i in range(0, kwargs['repeat']): lnt = graph.LoopNests(ppg) lnt.dotify() ilp_for_ppg = calculations.create_ilp_for_program_point_graph( ppg, lnt, db) ilp_for_ppg.solve('{}{}.ppg.ilp'.format( prog.basename(), ppg.name)) start = timeit.default_timer() super_graph = graph.SuperBlockGraph(lnt) super_graph.dotify() end = timeit.default_timer() ilp_for_super = calculations.create_ilp_for_super_block_graph( super_graph, lnt, db) ilp_for_super.solve('{}{}.super.ilp'.format( prog.basename(), ppg.name)) ilp_for_super.add_to_construction_time(end - start) messages.verbose_message( '>>>>>', ppg.name, "PASSED" if ilp_for_ppg.wcet == ilp_for_super.wcet else "FAILED " * 10) messages.verbose_message(ilp_for_ppg) messages.verbose_message(ilp_for_super, new_lines=2)
def main(program_filename: str, database_filename: str, repeat: int, subprogram_names: typing.List[str], fold_optimisation: bool, dominator_optimisation: bool): the_program = program.IO.read(program_filename) the_program.cleanup() failures = set() with database.Database(database_filename) as db: messages.verbose_message("Using database '{}'".format(database_filename)) db.load_into_memory() analysable_subprograms = [subprogram for subprogram in the_program if not subprogram_names or (subprogram_names and subprogram.name in subprogram_names)] for subprogram in analysable_subprograms: subprogram.cfg.dotify() ppg = graphs.ProgramPointGraph.create_from_control_flow_graph(subprogram.cfg) ppg.dotify() lnt = graphs.LoopNests(ppg) lnt.dotify() ilp_for_ppg = calculations.create_ilp_for_program_point_graph(ppg, lnt, db) ilp_for_ppg.solve('{}.{}.ppg.ilp'.format(the_program.basename(), ppg.name)) ilp_for_super = calculations.create_ilp_for_super_block_graph(ppg, lnt, db, fold_optimisation, dominator_optimisation) ilp_for_super.solve('{}.{}.super.ilp'.format(the_program.basename(), ppg.name)) messages.verbose_message('>>>>>', ppg.name) if ilp_for_ppg.wcet != ilp_for_super.wcet: messages.verbose_message('FAILED') messages.verbose_message(ilp_for_ppg) messages.verbose_message(ilp_for_super, new_lines=2) failures.add(ppg.name) else: messages.verbose_message('PASSED') ppg_construction_times = [] ppg_solve_times = [] super_construction_times = [] super_solve_times = [] for i in range(0, repeat): ilp_for_ppg = calculations.create_ilp_for_program_point_graph(ppg, lnt, db) ilp_for_ppg.solve('{}.{}.ppg.ilp'.format(the_program.basename(), ppg.name)) ppg_construction_times.append(ilp_for_ppg.construction_time) ppg_solve_times.append(ilp_for_ppg.solve_time) ilp_for_super = calculations.create_ilp_for_super_block_graph(ppg, lnt, db, fold_optimisation, dominator_optimisation) ilp_for_super.solve('{}.{}.super.ilp'.format(the_program.basename(), ppg.name)) super_construction_times.append(ilp_for_super.construction_time) super_solve_times.append(ilp_for_super.solve_time) ppg_solve_time = sum(ppg_solve_times)/repeat ppg_construction_time = sum(ppg_construction_times)/repeat super_solve_time = sum(super_solve_times)/repeat super_construction_time = sum(super_construction_times)/repeat factor = ppg_solve_time/super_solve_time if factor > 1: solve_message = '{:.2}X speed up'.format(factor) else: solve_message = '{:.2}X slow down'.format(1/factor) factor = (ppg_construction_time+ppg_solve_time)/(super_solve_time+super_construction_time) if factor > 1: total_message = '{:.2}X speed up'.format(factor) else: total_message = '{:.2}X slow down'.format(1 / factor) messages.verbose_message('solve={}'.format(solve_message)) messages.verbose_message('total={}'.format(total_message)) messages.verbose_message('solve={:.5f} ' 'construction={:.5f} ' 'total={:.5f} ' 'variables={} ' 'constraints={} ' '[PPG]'.format(ppg_solve_time, ppg_construction_time, ppg_solve_time + ppg_construction_time, ilp_for_ppg.number_of_variables(), ilp_for_ppg.number_of_constraints())) messages.verbose_message('solve={:.5f} ' 'construction={:.5f} ' 'total={:.5f} ' 'variables={} ' 'constraints={} ' '[SUPER]'.format(super_solve_time, super_construction_time, super_solve_time + super_construction_time, ilp_for_super.number_of_variables(), ilp_for_super.number_of_constraints()))
def intra_procedural_analysis(prog, policy, reinstrument, traces): generated_ipgs = {cfg.name: [] for cfg in prog} for cfg in prog: messages.verbose_message('Analysing subprogram {}'.format(cfg.name)) dot.visualise_control_flow_graph(prog, cfg) ppg = graph.ProgramPointGraph(cfg) dot.visualise_flow_graph(prog, ppg, '.ppg') for i in range(reinstrument): ipg = graph.InstrumentationPointGraph.create_from_policy( ppg, policy) ipg.name = '{}.{}'.format(ipg.name, len(generated_ipgs[cfg.name])) generated_ipgs[cfg.name].append(ipg) filters = [ lambda v: ipg.predecessors(v) and ipg.successors(v) and len( ipg.predecessors(v)) == 1 and len(ipg.successors(v)) == 1, lambda v: ipg.predecessors(v) and ipg.successors(v) ] ipg.reduce(filters) #dot.visualise_instrumentation_point_graph(prog, ipg) for i in range(reinstrument): ipg = graph.InstrumentationPointGraph.create_from_policy( ppg, policy) ipg.name = '{}.{}'.format(ipg.name, len(generated_ipgs[cfg.name])) generated_ipgs[cfg.name].append(ipg) filters = [lambda v: True] ipg.reduce(filters) # dot.visualise_instrumentation_point_graph(prog, ipg) for name, ipgs in generated_ipgs.items(): ipgs.sort(key=lambda ipg: ipg.number_of_vertices()) ipgs.sort(key=lambda ipg: ipg.number_of_edges()) print("===========================> {} candidates: {}".format( name, len(ipgs))) basic_ipg = graph.InstrumentationPointGraph.create_from_policy( ppg, policy) print('n={} m={}'.format(basic_ipg.number_of_vertices(), basic_ipg.number_of_edges())) smallest_instrumentation = min( ipgs, key=lambda ipg: ipg.number_of_vertices()) print('min(|V|): {} n={} m={}'.format( smallest_instrumentation.name, smallest_instrumentation.number_of_vertices(), smallest_instrumentation.number_of_edges())) largest_instrumentation = max(ipgs, key=lambda ipg: ipg.number_of_vertices()) print('max(|V|): {} n={} m={}'.format( largest_instrumentation.name, largest_instrumentation.number_of_vertices(), largest_instrumentation.number_of_edges())) smallest_footprint = min(ipgs, key=lambda ipg: ipg.number_of_edges()) print('min(|E|): {} n={} m={}'.format( smallest_footprint.name, smallest_footprint.number_of_vertices(), smallest_footprint.number_of_edges())) largest_footprint = max(ipgs, key=lambda ipg: ipg.number_of_edges()) print('max(|E|): {} n={} m={}'.format( largest_footprint.name, largest_footprint.number_of_vertices(), largest_footprint.number_of_edges()))
def main(program_filename: str, repeat: int, subprogram_names: typing.List[str], verify: bool): the_program = programs.IO.read(program_filename) the_program.cleanup() messages.verbose_message( "Verification is {}".format("ON" if verify else "OFF")) analysable_subprograms = [ subprogram for subprogram in the_program if not subprogram_names or ( subprogram_names and subprogram.name in subprogram_names) ] results = [] speedups = [] slowdowns = [] for subprogram in analysable_subprograms: messages.verbose_message("Analysing", subprogram.name) subprogram.cfg.dotify() betts_times = [] tarjan_times = [] cooper_times = [] for i in range(0, repeat): subprogram.cfg.shuffle_edges() start = time.time() betts_tree = graphs.Betts(subprogram.cfg) betts_times.append(time.time() - start) start = time.time() tarjan_tree = graphs.LengauerTarjan(subprogram.cfg, subprogram.cfg.entry) tarjan_times.append(time.time() - start) start = time.time() cooper_tree = graphs.Cooper(subprogram.cfg) cooper_times.append(time.time() - start) if verify: messages.debug_message("Betts versus Tarjan") do_verification(subprogram.cfg, betts_tree, tarjan_tree) messages.debug_message("Cooper versus Tarjan") do_verification(subprogram.cfg, cooper_tree, tarjan_tree) messages.debug_message("Betts versus Cooper") do_verification(subprogram.cfg, betts_tree, cooper_tree) betts = Time(Algorithms.Betts, sum(betts_times) / repeat) cooper = Time(Algorithms.Cooper, sum(cooper_times) / repeat) tarjan = Time(Algorithms.Tarjan, sum(tarjan_times) / repeat) times = [betts, cooper, tarjan] times.sort(key=lambda t: t.time) results.append(times) messages.verbose_message( '{} vertices={} edges={} branches={} merges={}'.format( subprogram.name, subprogram.cfg.number_of_vertices(), subprogram.cfg.number_of_edges(), subprogram.cfg.number_of_branches(), subprogram.cfg.number_of_merges())) messages.verbose_message('{}:: {:.4f}'.format(times[0].name.value, times[0].time)) messages.verbose_message('{}:: {:.4f} {:.1f}X faster'.format( times[1].name.value, times[1].time, times[1].time / times[0].time)) messages.verbose_message('{}:: {:.4f} {:.1f}X faster'.format( times[2].name.value, times[2].time, times[2].time / times[0].time)) messages.verbose_message("======> Summary") for name in Algorithms: first = [r for r in results if r[0].name == name] if first: messages.verbose_message('{} came first {} times'.format( name.value, len(first))) for name in Algorithms: second = [r for r in results if r[1].name == name] if second: messages.verbose_message('{} came second {} times'.format( name.value, len(second))) for name in Algorithms: third = [r for r in results if r[2].name == name] if third: messages.verbose_message('{} came third {} times'.format( name.value, len(third)))
def main(**kwargs): messages.verbose_message("Reading program from '{}'".format(kwargs['filename'])) prog = program.IO.read(kwargs['filename']) prog.cleanup() failures = set() with database.Database(kwargs['database']) as db: messages.verbose_message("Using database '{}'".format(kwargs['database'])) for subprogram in prog: if not kwargs['subprograms'] or (kwargs['subprograms'] and subprogram.name in kwargs['subprograms']): subprogram.cfg.dotify() ppg = graphs.ProgramPointGraph.create_from_control_flow_graph(subprogram.cfg) ppg.dotify() lnt = graphs.LoopNests(ppg) lnt.dotify() ilp_for_ppg = calculations.create_ilp_for_program_point_graph(ppg, lnt, db) ilp_for_ppg.solve('{}.{}.ppg.ilp'.format(prog.basename(), ppg.name)) ipg = graphs.InstrumentationPointGraph.create(ppg, lnt, db) ipg.dotify() ilp_for_ipg = calculations.create_ilp_for_instrumentation_point_graph(ipg, lnt, db) ilp_for_ipg.solve('{}.{}.ipg.ilp'.format(prog.basename(), ipg.name)) if ilp_for_ppg.wcet != ilp_for_ipg.wcet: messages.verbose_message('>>>>>', ppg.name, 'FAILED' * 2) failures.add(ppg.name) messages.verbose_message(ilp_for_ppg) messages.verbose_message(ilp_for_ipg, new_lines=2) else: messages.verbose_message('>>>>> {} passed {:.5f} {:.5f}'.format(ppg.name, ilp_for_ppg.solve_time, ilp_for_ipg.solve_time)) if len(failures) > 0: messages.verbose_message('The following subprograms failed: {}'.format(', '.join(failures)))
def main(**kwargs): messages.verbose_message("Reading program from '{}'".format( kwargs['filename'])) prog = program.IO.read(kwargs['filename']) prog.cleanup() failures = set() with database.Database(kwargs['database']) as db: messages.verbose_message("Using database '{}'".format( kwargs['database'])) for subprogram in prog: if not kwargs['subprograms'] or (kwargs['subprograms'] and subprogram.name in kwargs['subprograms']): subprogram.cfg.dotify() ppg = graph.ProgramPointGraph.create_from_control_flow_graph( subprogram.cfg) ppg.dotify() lnt = graph.LoopNests(ppg) lnt.dotify() ilp_for_ppg = calculations.create_ilp_for_program_point_graph( ppg, lnt, db) ilp_for_ppg.solve('{}.{}.ppg.ilp'.format( prog.basename(), ppg.name)) ipg = graph.InstrumentationPointGraph.create(ppg, lnt, db) ipg.dotify() ilp_for_ipg = calculations.create_ilp_for_instrumentation_point_graph( ipg, lnt, db) ilp_for_ipg.solve('{}.{}.ipg.ilp'.format( prog.basename(), ipg.name)) if ilp_for_ppg.wcet != ilp_for_ipg.wcet: messages.verbose_message('>>>>>', ppg.name, 'FAILED') failures.add(ppg.name) messages.verbose_message(ilp_for_ppg) messages.verbose_message(ilp_for_ipg, new_lines=2) if len(failures) > 0: messages.verbose_message('The following subprograms failed: {}'.format( ', '.join(failures)))
def main(program_filename: str, database_filename: str, repeat: int, subprogram_names: typing.List[str], fold_optimisation: bool, dominator_optimisation: bool): the_program = programs.IO.read(program_filename) the_program.cleanup() failures = set() with database.Database(database_filename) as db: messages.verbose_message( "Using database '{}'".format(database_filename)) db.load_into_memory() analysable_subprograms = [ subprogram for subprogram in the_program if not subprogram_names or ( subprogram_names and subprogram.name in subprogram_names) ] all_ppg_solve_times = [] all_super_solve_times = [] for subprogram in analysable_subprograms: subprogram.cfg.dotify() ppg = graphs.ProgramPointGraph.create_from_control_flow_graph( subprogram.cfg) ppg.dotify() lnt = graphs.LoopNests(ppg) lnt.dotify() ilp_for_ppg = calculations.create_ilp_for_program_point_graph( ppg, lnt, db) ilp_for_ppg.solve('{}.{}.ppg.ilp'.format(the_program.basename(), ppg.name)) ilp_for_super = calculations.create_ilp_for_super_block_graph( ppg, lnt, db, fold_optimisation, dominator_optimisation) ilp_for_super.solve('{}.{}.super.ilp'.format( the_program.basename(), ppg.name)) messages.verbose_message('>>>>>', ppg.name) if ilp_for_ppg.wcet != ilp_for_super.wcet: messages.verbose_message('FAILED') messages.verbose_message(ilp_for_ppg) messages.verbose_message(ilp_for_super, new_lines=2) failures.add(ppg.name) else: messages.verbose_message('PASSED') ppg_construction_times = [] ppg_solve_times = [] super_construction_times = [] super_solve_times = [] for i in range(0, repeat): ilp_for_ppg = calculations.create_ilp_for_program_point_graph( ppg, lnt, db) ilp_for_ppg.solve('{}.{}.ppg.ilp'.format( the_program.basename(), ppg.name)) ppg_construction_times.append( ilp_for_ppg.construction_time) ppg_solve_times.append(ilp_for_ppg.solve_time) ilp_for_super = calculations.create_ilp_for_super_block_graph( ppg, lnt, db, fold_optimisation, dominator_optimisation) ilp_for_super.solve('{}.{}.{}.super.ilp'.format( the_program.basename(), i, ppg.name)) super_construction_times.append( ilp_for_super.construction_time) super_solve_times.append(ilp_for_super.solve_time) ppg_solve_time = sum(ppg_solve_times) / repeat ppg_construction_time = sum(ppg_construction_times) / repeat super_solve_time = sum(super_solve_times) / repeat super_construction_time = sum( super_construction_times) / repeat all_ppg_solve_times.extend(ppg_solve_times) all_super_solve_times.extend(super_solve_times) factor = ppg_solve_time / super_solve_time if factor > 1: solve_message = '{:.2}X speed up'.format(factor) else: solve_message = '{:.2}X slow down'.format(1 / factor) factor = (ppg_construction_time + ppg_solve_time) / ( super_solve_time + super_construction_time) if factor > 1: total_message = '{:.2}X speed up'.format(factor) else: total_message = '{:.2}X slow down'.format(1 / factor) messages.verbose_message('solve={}'.format(solve_message)) messages.verbose_message('total={}'.format(total_message)) messages.verbose_message( 'solve={:.5f} ' 'construction={:.5f} ' 'total={:.5f} ' 'variables={} ' 'constraints={} ' '[PPG]'.format(ppg_solve_time, ppg_construction_time, ppg_solve_time + ppg_construction_time, ilp_for_ppg.number_of_variables(), ilp_for_ppg.number_of_constraints())) messages.verbose_message( 'solve={:.5f} ' 'construction={:.5f} ' 'total={:.5f} ' 'variables={} ' 'constraints={} ' '[SUPER]'.format( super_solve_time, super_construction_time, super_solve_time + super_construction_time, ilp_for_super.number_of_variables(), ilp_for_super.number_of_constraints())) total_trials = repeat * len(analysable_subprograms) factor = sum(all_ppg_solve_times) / sum(all_super_solve_times) if factor > 1: solve_message = '{:.2}X speed up'.format(factor) else: solve_message = '{:.2}X slow down'.format(1 / factor) messages.verbose_message('Average solve [PPG]={:.5f}'.format( sum(all_ppg_solve_times) / total_trials)) messages.verbose_message('Average solve [SUPER]={:.5f}'.format( sum(all_super_solve_times) / total_trials)) messages.verbose_message('solve={}'.format(solve_message))