def measure_compilation_time(build_dir):
    # Create the sheet
    print('************ Measuring the compilation times **********')
    sheet = pyexcel.Sheet()
    rownames = [benchmark for benchmark, _ in support.benchmarks_gen()]
    sheet.column += rownames

    # Time how long it takes to build the benchmark using the timeit module, and the commands in the make.out
    # file present in the build directory.
    def build_time(benchmark_dir):
        os.chdir(benchmark_dir)
        return (timeit.timeit(
            'subprocess.check_call(["bash", "make.out"], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)',
            number=30,
            setup='import subprocess') / 30)

    # Fill in the sheet
    times = []
    for benchmark, _ in support.benchmarks_gen():
        print('************ ' + benchmark + ' **********')
        benchmark_dir = os.path.join(build_dir, benchmark)
        times.append(build_time(benchmark_dir))
    sheet.column += times

    # Create the report book and write it out
    report = pyexcel.Book(sheets={'Compilation times': sheet})
    report.save_as(os.path.join(config.reports_dir, 'compilation_times.ods'))
def main():
    # Create the sheet
    print('************ Creating report on binary sizes **********')
    sheet = pyexcel.Sheet()
    rownames = [benchmark for benchmark, _ in support.benchmarks_gen()]
    sheet.column += ['Binary default'] + rownames + ['Binary diversified'
                                                     ] + rownames

    # Get all the sizes of the default binaries
    sizes = ['']
    for (benchmark, name) in support.benchmarks_gen():
        binary = os.path.join(support.create_path_for_seeds(config.build_dir),
                              benchmark, name)
        if os.path.exists(binary):
            sizes.append(get_binary_stripped_size(binary))
        else:
            sizes.append('FAIL')

    # Create the AVG and MAX columns
    sheet.column += sizes + ['AVG'] + [''] * len(rownames)
    sheet.column += sizes + ['MAX'] + [''] * len(rownames)

    for seeds in support.all_seeds_gen():
        # Get all the sizes of the diversified binaries
        sizes = [''] * (len(rownames) + 2)
        for (benchmark, name) in support.benchmarks_gen():
            binary = os.path.join(
                support.create_path_for_seeds(config.build_dir, *seeds),
                benchmark, name)
            if os.path.exists(binary):
                sizes.append(get_binary_stripped_size(binary))
            else:
                sizes.append('FAIL')

        sheet.column += sizes

    # Calculate in the average and the max
    for row in (row for row in sheet.rows()
                if not row[0].startswith('Binary')):
        sizes = [elem for elem in row[3:] if isinstance(elem, int)]
        if sizes:
            row[1] = sum(sizes) // len(sizes)
            row[2] = max(sizes)

    # Create the report book and write it out
    report = pyexcel.Book(sheets={'Sizes': sheet})
    report.save_as(os.path.join(config.reports_dir, 'binary_sizes.ods'))
Example #3
0
def main(encrypt=True):
    # Inject the delta data for every binary
    print('************ Injecting delta data **********')
    with concurrent.futures.ProcessPoolExecutor() as executor:
        for seed_tuple in support.all_seeds_gen():
            for subset in support.subsets_gen(seed_tuple, False):
                for (benchmark, name) in support.benchmarks_gen():
                    executor.submit(inject_data, benchmark, name, subset,
                                    encrypt)
def main():
    # Create the report
    print('************ Creating report on delta data sizes **********')
    sheets = {}
    for subset in support.subsets_gen(seed.get_types(), False):
        # Create the sheet for this subset and put it in the dictionary
        name = ','.join(
            [t.__name__ for t in subset]
        )  # Create the sheet name out of the typenames of the seeds in the subset
        sheet = pyexcel.Sheet(name=name)
        sheets[name] = sheet

        # Create the first few columns. The first is for the benchmarks, second is average, and third is max (to be filled in later).
        rownames = [benchmark for benchmark, _ in support.benchmarks_gen()]
        sheet.column += ['Delta data'] + rownames
        sheet.column += ['AVG'] + [''] * len(rownames)
        sheet.column += ['MAX'] + [''] * len(rownames)
        for seed_tuple in support.seeds_gen(*subset):
            # Empty cell
            sizes = ['']

            # Get all the sizes of the patches
            for benchmark, _ in support.benchmarks_gen():
                dd = os.path.join(
                    support.create_path_for_seeds(config.patches_dir,
                                                  *seed_tuple), benchmark,
                    'delta_data')
                if os.path.exists(dd):
                    sizes.append(os.stat(dd).st_size)
                else:
                    sizes.append('FAIL')

            sheet.column += sizes

        # Calculate in the average and the max
        for row in list(sheet.rows())[1:]:
            sizes = [elem for elem in row[3:] if isinstance(elem, int)]
            if sizes:
                row[1] = sum(sizes) // len(sizes)
                row[2] = max(sizes)

    # Create the report book and write it out
    report = pyexcel.Book(sheets=sheets)
    report.save_as(os.path.join(config.reports_dir, 'delta_data_sizes.ods'))
Example #5
0
def main():
    # Start with destroying the previous directory structure, if it exists
    shutil.rmtree(config.patches_dir, True)

    # Create the patches by submitting tasks to the executor
    print('************ Creating patches **********')
    with concurrent.futures.ProcessPoolExecutor() as executor:
        for seed_tuple in support.all_seeds_gen():
            for subset in support.subsets_gen(seed_tuple, False):
                for (benchmark, _) in support.benchmarks_gen():
                    executor.submit(create_patch, benchmark, subset)
Example #6
0
def main():
    # Create the sheet
    print('************ Creating report on opportunity log sizes **********')
    sheet = pyexcel.Sheet()
    rownames = [benchmark for benchmark, _ in support.benchmarks_gen()]
    sheet.column += ['Opportunity log'] + rownames

    # Get all the sizes of the opportunity logs
    sizes = ['']
    for (benchmark, name) in support.benchmarks_gen():
        data_dir = os.path.join(support.create_path_for_seeds(config.data_dir),
                                benchmark)
        sizes.append(get_opportunity_log_size(data_dir))

    sheet.column += sizes

    # Create the report book and write it out
    report = pyexcel.Book(sheets={'Sizes': sheet})
    report.save_as(
        os.path.join(config.reports_dir, 'opportunity_log_sizes.ods'))
def measure_benchmark_time(build_dir, build_dir_opt):
    # Create the sheet
    print('************ Measuring the benchmark timing **********')
    sheet = pyexcel.Sheet()
    rownames = [benchmark for benchmark, _ in support.benchmarks_gen()]
    sheet.column += ['Without Clang OPT'] + rownames + ['With Clang OPT'
                                                        ] + rownames
    sheet.column += ['AVG'] + [''] * len(rownames) + ['AVG'
                                                      ] + [''] * len(rownames)
    sheet.column += ['MAX'] + [''] * len(rownames) + ['MAX'
                                                      ] + [''] * len(rownames)
    nr_of_measurements = 10
    for _ in range(nr_of_measurements):
        sheet.column += [''] * (2 + 2 * len(rownames))

    for bi, (_, name) in enumerate(support.benchmarks_gen()):
        print('************ Benchmark ' + name + ' **********')

        # Burn some benchmarks to prepare cache/environment
        time_benchmark(build_dir, name)
        time_benchmark(build_dir_opt, name)

        # Do the actual measurements, alternating between the version with and without optimization
        for iii in range(nr_of_measurements):
            col = iii + 3
            sheet[1 + bi, col] = time_benchmark(build_dir, name)
            sheet[2 + len(rownames) + bi,
                  col] = time_benchmark(build_dir_opt, name)

    # Calculate in the average and the max
    for row in (row for row in sheet.rows() if not row[0].startswith('With')):
        times = [elem for elem in row[3:] if isinstance(elem, float)]
        if times:
            row[1] = float(sum(times) / len(times))
            row[2] = max(times)

    # Create the report book and write it out
    report = pyexcel.Book(sheets={'Execution time': sheet})
    report.save_as(os.path.join(config.reports_dir, 'benchmark_times.ods'))
Example #8
0
def extract_data(subset, pickle_symfiles=False):
    try:
        # Unpack all the seeds we need and create the relative path
        relpath = support.relpath_for_seeds(*subset)

        print('************ Extracting for path ' + relpath + ' **********')
        for (benchmark, name) in support.benchmarks_gen():
            # Create the output directory for this benchmark
            build_dir = os.path.join(config.build_dir, relpath, benchmark)
            data_dir = os.path.join(config.data_dir, relpath, benchmark)
            os.makedirs(data_dir)
            os.symlink(build_dir, os.path.join(data_dir, 'build'))

            with open(os.path.join(data_dir, 'symfile'), 'w') as f_sym:
                # Extract the actual symfile using dump_syms. This tool creates a LOT of warnings so we redirect stderr to /dev/null
                subprocess.check_call([config.dump_syms, os.path.join(build_dir, name)], stdout=f_sym, stderr=subprocess.DEVNULL)

            # If we're dealing with the base we have to do some more stuff
            if not subset:
                # For every protection, copy over the opportunity log, if any
                for opportunity_log in [s.opportunity_log for s in seed.get_types() if s.opportunity_log]:
                    shutil.copy2(os.path.join(support.create_path_for_seeds(config.build_dir), benchmark, opportunity_log), data_dir)

                # Copy over the linker map
                shutil.copy2(os.path.join(build_dir, name + '.map'), os.path.join(data_dir, 'map'))

                # Extract the section alignment information
                linker.gather_section_alignment(os.path.join(build_dir, name + '.map'), os.path.join(data_dir, 'sections'))

                if pickle_symfiles:
                    # Get the symfile
                    symfile_path = os.path.join(os.path.join(data_dir, 'symfile'))
                    symfile = SymFile().read_f(symfile_path)

                    # Pickle it
                    with open(os.path.join(data_dir, 'pickled_symfile'), 'wb') as f_pickle:
                        pickle.dump(symfile, f_pickle)

        if not subset:
            data_dir = os.path.join(config.data_dir, relpath)
            # For every protection, copy over the opportunity logs for the extra build archives/objects (which are the same for every benchmark), if any.
            # Also copy over the build_prefix (in symlink form).
            for a in os.listdir(support.create_path_for_seeds(config.extra_build_dir)):
                a_path = os.path.join(support.create_path_for_seeds(config.extra_build_dir), a)
                os.symlink(os.readlink(os.path.join(a_path, 'build')), os.path.join(data_dir, 'build.' + a))
                for opportunity_log in [s.opportunity_log for s in seed.get_types() if s.opportunity_log]:
                    shutil.copy2(os.path.join(a_path, opportunity_log), os.path.join(data_dir, opportunity_log + '.' + a))

    except Exception:
        logging.getLogger().exception('Data extraction failed for ' + relpath)
def measure_crash_report_time(build_dir):
    # Create the sheet
    print(
        '************ Measuring the time it takes to create stack traces from a minidump **********'
    )
    sheet = pyexcel.Sheet()
    rownames = [benchmark for benchmark, _ in support.benchmarks_gen()]
    sheet.column += rownames

    # Generate the times and fill in the sheet
    run_dir = os.path.join(config.tmp_dir, 'measurements')
    shutil.rmtree(run_dir, True)
    os.mkdir(run_dir)
    times = []
    for (benchmark, name) in support.benchmarks_gen():
        print('************ ' + benchmark + ' **********')
        times.append(time_report_creation(benchmark, name, build_dir, run_dir))
    sheet.column += times

    # Create the report book and write it out
    report = pyexcel.Book(sheets={'Crash report creation times': sheet})
    report.save_as(
        os.path.join(config.reports_dir, 'crash_report_creation_times.ods'))
Example #10
0
def main():
    # Create the sheet
    print('************ Creating report on patch timing **********')
    sheet = pyexcel.Sheet()
    rownames = [benchmark for benchmark, _ in support.benchmarks_gen()]
    sheet.column += ['Patch creation'] + rownames + ['Patch application'
                                                     ] + rownames
    sheet.column += [''] * (len(rownames) + 1) + ['AVG'] + [''] * len(rownames)
    sheet.column += [''] * (len(rownames) + 1) + ['MAX'] + [''] * len(rownames)

    for seeds in support.all_seeds_gen():
        # Empty cell
        times = ['']

        for (benchmark, name) in support.benchmarks_gen():
            # Determine all paths
            base_data = os.path.join(
                support.create_path_for_seeds(config.data_dir), benchmark)
            div_symfile_path = os.path.join(
                support.create_path_for_seeds(config.data_dir, *seeds),
                benchmark, 'symfile')

            # Time patch creation
            def time_function1():
                patch.patch(base_data,
                            seeds,
                            div_symfile_path=div_symfile_path,
                            output_dir=config.tmp_dir)

            times.append(timeit.timeit(time_function1, number=1))
            shutil.copyfile(os.path.join(config.tmp_dir, 'patch'),
                            os.path.join(config.tmp_dir, 'patch.' + name))

        # Empty cell
        times.append('')

        for (benchmark, name) in support.benchmarks_gen():
            base_data = os.path.join(
                support.create_path_for_seeds(config.data_dir), benchmark)

            # Time patch application
            def time_function2():
                patch.patch(base_data,
                            seeds,
                            patch_path=os.path.join(config.tmp_dir,
                                                    'patch.' + name),
                            output_dir=config.tmp_dir)

            times.append(timeit.timeit(time_function2, number=1))

        sheet.column += times

    # Calculate in the average and the max
    for row in (row for row in sheet.rows() if not row[0].startswith('Patch')):
        times = [elem for elem in row[3:] if isinstance(elem, float)]
        if times:
            row[1] = float(sum(times) / len(times))
            row[2] = max(times)

    # Create the report book and write it out
    report = pyexcel.Book(sheets={'Timing': sheet})
    report.save_as(os.path.join(config.reports_dir, 'patch_timing.ods'))
def main():
    # Parsing the arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('-a',
                        '--arguments',
                        nargs=argparse.REMAINDER,
                        default=[],
                        help='Extra compiler arguments to be used.')
    args = parser.parse_args()

    build_dir = os.path.join(config.tmp_dir, 'spec_measurements')
    spec_config_name = 'measuring'

    # Get all the sizes of the default binaries
    sheets = {}
    print('************ Building default binaries... **********')
    default_compile_options = build_binaries.get_default_compile_options(
        False) + args.arguments
    build_binaries.build_spec(build_dir, ' '.join(default_compile_options),
                              spec_config_name)
    for (benchmark, name) in support.benchmarks_gen():
        sheet = pyexcel.Sheet(name=benchmark)
        sheets[benchmark] = sheet
        mapfile = os.path.join(build_dir, benchmark, name + '.map')
        functions = get_functions(mapfile)

        # Add the names and original sizes columns
        names = [''] + [name for (name, _) in functions]
        sheet.column += names
        sizes = ['Original'] + [size for _, size in functions]
        sheet.column += sizes

    # Then compile for diversified binaries (stackpadding only)
    for padding in range(config.default_padding, config.max_padding + 8, 8):
        print('************ Building stackpadded binary with SP ' +
              str(padding) + '... **********')
        # Add compile options for this amount of padding
        compile_options = default_compile_options + seed.SPSeed.compile_options_for_padding(
            padding)
        build_binaries.build_spec(build_dir, ' '.join(compile_options),
                                  spec_config_name)
        for (benchmark, name) in support.benchmarks_gen():
            sheet = sheets[benchmark]
            mapfile = os.path.join(build_dir, benchmark, name + '.map')
            functions = get_functions(mapfile)

            # Add the sizes and size increases columns
            sizes = ['Pad ' + str(padding)] + [size for _, size in functions]
            sheet.column += sizes
            increases = ['Increase ' + str(padding)]
            for size1, size2 in list(zip(sheet.column[1], sizes))[1:]:
                increases.append(size2 - size1)
            sheet.column += increases
            increases = ['Increase2base ' + str(padding)]
            for size1, size2 in list(zip(sheet.column[2], sizes))[1:]:
                increases.append(size2 - size1)
            sheet.column += increases

    # Create the report book and write it out
    for (benchmark, name) in support.benchmarks_gen():
        report = sheets[benchmark]
        report.save_as(os.path.join(config.reports_dir, benchmark + '_sp.csv'))
Example #12
0
def main():
    # We prepare by copying the binaries to the location where they'll be protected
    print(
        '************ Preparing for link-time protections by copying binaries **********'
    )
    for link_protections in support.link_subsets_gen(seed.get_types(), False):
        for build_protections in support.build_subsets_gen(seed.get_types()):
            for build_seeds, link_seeds in zip(
                    support.seeds_gen(*build_protections),
                    support.seeds_gen(*link_protections)):
                support.copy_spec_tree(
                    support.create_path_for_seeds(config.build_dir,
                                                  *build_seeds),
                    support.create_path_for_seeds(config.build_dir,
                                                  *build_seeds, *link_seeds))

    for (benchmark, name) in support.benchmarks_gen():
        # Get all the sections from all the objects in the build directory
        print('************************* ' + benchmark +
              ' **********************')
        print(
            '************************* Gathering sections **********************'
        )
        linkermap = Map(
            os.path.join(support.create_path_for_seeds(config.build_dir),
                         benchmark, name + '.map'))

        # Get all the pre_sections and make linker rules out of them. We do this so that they can't change order (which apparently, they can...).
        # Use a '*' (even though it is unnecessary) so Diablo's map parser will recognize it as a pattern.
        # Remove the name of the encompassing archive from an object (if it exists), else Diablo can't handle this
        pre_sections = [[
            support.get_objname(section.obj) + '*(' + section.name + ')'
        ] for section in linkermap.pre_sections]

        # We want to create a list of all linker rules that can be altered. Ideally we would simply take all sections currently in the
        # linkermap (that we want to change) and convert them into a rule that matches only that section from its specific object.
        # Unfortunately the linker is a bit fickle in its handling of weak symbols. If N sections (coming from N different objects)
        # exist that define the same symbol, the linker will select only one (from one object) to place in the binary and discard
        # the rest. The problem is that during the second, protected link (using the linker script we generate here) the
        # linker won't necessarily select a weak symbol section from the same object as it did in the first link. The custom rule
        # (which includes the name of the object the section came from the first link) won't match and, for example, the section would
        # be put after the sections that ARE shuffled. To avoid this, we also keep all discarded sections, and then create N rules for the
        # section (one for each object). These rules stay together during the protections, thus guaranteeing the right location of the section.
        main_sections = []
        for section in linkermap.main_sections:
            rule_list = []

            # Create the linker rules and insert them in the list
            # Use a '*' (even though it is unnecessary) so Diablo's map parser will recognize it as a pattern.
            # Remove the name of the encompassing archive from an object (if it exists), else Diablo can't handle this
            suffix = '*(' + section.name + ')'
            rule_list.append(support.get_objname(section.obj) + suffix)
            for discarded in linkermap.discarded_sections:
                if discarded.name == section.name:
                    rule_list.append(
                        support.get_objname(discarded.obj) + suffix)

            # Add the rule list to the list of lists
            main_sections.append(rule_list)

        # Perform the actual link-time protections by creating a new linker script (in which sections can change order) and relinking
        for link_protections in support.link_subsets_gen(
                seed.get_types(), False):
            # First create a new linker script for every combination of link protections
            for link_seeds in support.seeds_gen(*link_protections):
                print('************ Protecting binary at link level for ' +
                      ' '.join([repr(s)
                                for s in link_seeds]) + ' ... **********')

                # Copy the list so that every time we start from the original array
                protected_pre_sections = list(pre_sections)
                protected_main_sections = list(main_sections)
                for s in link_seeds:
                    protected_pre_sections, protected_main_sections = s.diversify_link(
                        protected_pre_sections, protected_main_sections)

                # Create diversified link script
                linker.create_linker_script(
                    protected_pre_sections + protected_main_sections,
                    os.path.join(
                        support.create_path_for_seeds(config.build_dir,
                                                      *link_seeds), benchmark,
                        'link.xc'))

            for build_protections in support.build_subsets_gen(
                    seed.get_types()):
                for build_seeds, link_seeds in zip(
                        support.seeds_gen(*build_protections),
                        support.seeds_gen(*link_protections)):
                    # Get the link command, then adapt it to use our new linker script
                    directory = os.path.join(
                        support.create_path_for_seeds(config.build_dir,
                                                      *build_seeds,
                                                      *link_seeds), benchmark)
                    with open(os.path.join(directory, 'make.out'), 'r') as f:
                        cmd = list(f)[-1].rstrip()

                    new_script = os.path.join(
                        support.create_path_for_seeds(config.build_dir,
                                                      *link_seeds), benchmark,
                        'link.xc')
                    cmd = cmd.replace(config.link_script, new_script)

                    # Execute them with our diversified linker script
                    subprocess.check_call(shlex.split(cmd), cwd=directory)