def execute(argv):
    """Generate random sequences for each benchmark"""

    del argv

    FLAGS = flags.FLAGS

    # The benchmarks
    benchmarks = IO.load_yaml(FLAGS.benchmarks_filename)
    if not benchmarks:
        logging.fatal('There are no benchmarks to process')

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Process each benchmark
    for benchmark in tqdm(benchmarks, desc='Processing'):
        index = benchmark.find('.')
        bench_dir = benchmark[:index]
        bench_name = benchmark[index + 1:]

        bench_in_dir = os.path.join(FLAGS.benchmarks_directory, bench_dir,
                                    bench_name)

        if not os.path.isdir(bench_in_dir):
            continue

        bench_out_dir = os.path.join(FLAGS.results_directory, bench_dir)

        # Create the results directory for the suite
        try:
            os.makedirs(bench_out_dir)
        except FileExistsError:
            pass

        filename = '{}/{}.yaml'.format(bench_out_dir, bench_name)
        if FLAGS.verify_report and os.path.isfile(filename):
            continue

        results = {}
        for compiler in ['opt', 'llvm']:
            for level in FLAGS.levels:
                goal_value = Engine.evaluate(
                    Goals.prepare_goals(FLAGS.goals,
                                        FLAGS.weights), '-{}'.format(level),
                    compiler, bench_in_dir, FLAGS.working_set, FLAGS.times,
                    FLAGS.tool, FLAGS.verify_output)
                compiler_name = 'clang' if compiler == 'llvm' else 'opt'
                if compiler_name not in results:
                    results[compiler_name] = {}
                results[compiler_name][level] = {
                    'goal': goal_value,
                    'seq': ['-{}'.format(level)]
                }

        IO.dump_yaml(results, filename)
def execute(argv):
    """Generate random sequences for each benchmark"""

    del argv

    FLAGS = flags.FLAGS

    # The benchmarks
    benchmarks = IO.load_yaml(FLAGS.benchmarks_filename)
    if not benchmarks:
        logging.error('There are no benchmarks to process')
        sys.exit(1)

    # Verify benchmark directory
    if not os.path.isdir(FLAGS.benchmarks_directory):
        logging.error('Benchmarks directory {} does not exist.'.format(
            FLAGS.benchmarks_directory))
        sys.exit(1)

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Initialize a Random object
    rnd = Random(FLAGS.nof_sequences, FLAGS.minimum, FLAGS.maximum,
                 FLAGS.factor, FLAGS.ssa, FLAGS.shuffle, FLAGS.update,
                 FLAGS.repetition, FLAGS.original, FLAGS.passes_filename,
                 Goals.prepare_goals(FLAGS.goals, FLAGS.weights), 'opt',
                 FLAGS.benchmarks_directory, FLAGS.working_set, FLAGS.times,
                 FLAGS.tool, FLAGS.verify_output)

    # Process each benchmark
    for benchmark in tqdm(benchmarks, desc='Processing'):
        index = benchmark.find('.')
        bench_dir = benchmark[:index]
        bench_name = benchmark[index + 1:]

        bench_dir = os.path.join(FLAGS.results_directory, bench_dir)

        # Create the results directory for the suite
        try:
            os.makedirs(bench_dir)
        except FileExistsError:
            pass

        filename = '{}/{}.yaml'.format(bench_dir, bench_name)
        if FLAGS.verify_report and os.path.isfile(filename):
            continue

        rnd.run(benchmark)
        if rnd.results:
            IO.dump_yaml(rnd.results, filename, FLAGS.report_only_the_best)
예제 #3
0
def execute(argv):
    """Generate the report"""

    del argv

    FLAGS = flags.FLAGS

    results_files = glob.glob('{}/*.yaml'.format(FLAGS.data_directory))

    report = {}
    strategies = []
    for results in tqdm(results_files, desc='Processing'):
        index = results.rfind('_')
        benchmark = results[:index]
        benchmark = benchmark.replace('{}/'.format(FLAGS.data_directory), '')
        strategy = results[index+1:]
        strategy = strategy.replace('.yaml', '')
        if strategy not in strategies:
            strategies.append(strategy)

        if benchmark not in report:
            report[benchmark] = {}

        data = IO.load_yaml(results)
        values = [str_data['goal'] for _, str_data in data.items()]
        report[benchmark][strategy] = sum(values)

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    filename = '{}/{}'.format(FLAGS.results_directory, FLAGS.report_filename)
    IO.dump_yaml(report, filename)

    strategies.sort()
    csv_filename = filename.replace('.yaml', '.csv')

    with open(csv_filename, 'w') as f:
        w = csv.DictWriter(f, ['bench']+strategies)
        w.writeheader()
        line = {}
        for bench_name, data in report.items():
            line['bench'] = bench_name
            for strategy_name, strategy_value in data.items():
                line[strategy_name] = strategy_value
            w.writerow(line)
def find_sequences(test_benchmark, training_benchmaks, distance_directory,
                   training_data_directory, nof_sequences):
    """Get N sequences from the most similar benchmark"""

    # Test suite and benchmark
    index = test_benchmark.find('.')
    test_suite_name = test_benchmark[:index]
    test_bench_name = test_benchmark[index + 1:]

    # Find the training suites
    training_suites = []
    for training_benchmark in training_benchmaks:
        index = training_benchmark.find('.')
        training_suite_name = training_benchmark[:index]
        if training_suite_name not in training_suites:
            training_suites.append(training_suite_name)

    # Find the closer benchmark
    closer = []
    for training_suite in training_suites:
        d_directory = os.path.join(distance_directory, test_suite_name,
                                   training_suite)
        filename = '{}/{}.yaml'.format(d_directory, test_bench_name)
        distance_data = IO.load_yaml(filename)
        closer += [(distance, training_suite, training_bench)
                   for training_bench, distance in distance_data.items()]

    closer.sort()
    closer_suite_name = closer[0][1]
    closer_bench_name = closer[0][2]

    # Load closer benchmark data
    d_directory = os.path.join(training_data_directory, closer_suite_name)
    filename = '{}/{}.yaml'.format(d_directory, closer_bench_name)
    training_data = IO.load_yaml_or_fail(filename)

    # Rank sequences
    rank = [(seq_data['goal'], seq_key)
            for seq_key, seq_data in training_data.items()]
    rank.sort()

    # Extract N sequences
    best = {}
    for i, (_, seq_key) in enumerate(rank):
        best[seq_key] = training_data[seq_key].copy()
        if i + 1 == nof_sequences:
            break
    return closer_suite_name, best
예제 #5
0
def execute(argv):
    """Create a small sequence"""

    del argv

    FLAGS = flags.FLAGS

    # The benchmarks
    benchmarks = IO.load_yaml(FLAGS.benchmarks_filename)
    if not benchmarks:
        logging.fatal('There are no benchmarks to process')

    # Verify directory
    if not os.path.isdir(FLAGS.benchmarks_directory):
        logging.error('Benchmarks directory {} does not exist.'.format(
            FLAGS.benchmarks_directory))
        sys.exit(1)

    if not os.path.isdir(FLAGS.training_directory):
        logging.error('Training directory {} does not exist.'.format(
            FLAGS.training_directory))
        sys.exit(1)

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Initialize a BenchmarkReduction object
    bred = BenchmarkReduction(FLAGS.baseline, FLAGS.benchmarks_directory,
                              FLAGS.results_directory)

    # Reduce
    for benchmark in tqdm(benchmarks, desc='Processing'):
        index = benchmark.find('.')
        bench_dir = benchmark[:index]
        bench_name = benchmark[index + 1:]

        bench_dir = os.path.join(FLAGS.training_directory, bench_dir)

        sequences = IO.load_yaml_or_fail('{}/{}.yaml'.format(
            bench_dir, bench_name))
        sequence = Sequence.get_the_best(sequences)
        for _, seq_data in sequence.items():
            sequence = seq_data['seq']

        bred.run(benchmark, sequence)
예제 #6
0
def execute(argv):
    """Generate the report"""

    del argv

    FLAGS = flags.FLAGS

    results_files = glob.glob('{}/*.yaml'.format(FLAGS.data_directory))

    report = {}
    strategies = []
    for results in tqdm(results_files, desc='Processing'):
        benchmark = results.replace('{}/'.format(FLAGS.data_directory), '')
        benchmark = benchmark.replace('.yaml', '')

        if benchmark not in report:
            report[benchmark] = {}

        data = IO.load_yaml(results)
        for compiler_name, compiler_data in data.items():
            for level_name, level_data in compiler_data.items():
                strategy = '{}_{}'.format(compiler_name, level_name)
                if strategy not in strategies:
                    strategies.append(strategy)
                report[benchmark][strategy] = level_data['goal']

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    csv_filename = '{}/{}'.format(FLAGS.results_directory,
                                  FLAGS.report_filename)
    with open(csv_filename, 'w') as f:
        w = csv.DictWriter(f, ['bench'] + strategies)
        w.writeheader()
        line = {}
        for bench_name, bench_data in report.items():
            line['bench'] = bench_name
            for level, level_value in bench_data.items():
                line[level] = level_value
            w.writerow(line)
예제 #7
0
def execute(argv):
    """Generate a CSV from YAML reports"""

    del argv

    FLAGS = flags.FLAGS

    results_files = glob.glob('{}/*.yaml'.format(FLAGS.data_directory))

    report = {}
    strategies = []
    for results in tqdm(results_files, desc='Report'):
        benchmark = results.replace('{}/'.format(FLAGS.data_directory), '')
        benchmark = benchmark.replace('.yaml', '')

        if benchmark not in report:
            report[benchmark] = {}

        data = IO.load_yaml(results)
        for seq_name, seq_data in data.items():
            if seq_name not in strategies:
                strategies.append(seq_name)
            report[benchmark][seq_name] = seq_data['goal']

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    csv_filename = '{}/{}'.format(FLAGS.results_directory,
                                  FLAGS.report_filename)

    with open(csv_filename, 'w') as f:
        w = csv.DictWriter(f, ['bench'] + strategies)
        w.writeheader()
        line = {}
        for bench_name, data in report.items():
            line['bench'] = bench_name
            for seq_name, goal_value in data.items():
                line[seq_name] = goal_value
            w.writerow(line)
예제 #8
0
def execute(argv):
    """Create a small sequence"""

    del argv

    FLAGS = flags.FLAGS

    # Verify directories
    if not os.path.isdir(FLAGS.benchmarks_directory):
        logging.error('Benchmarks directory {} does not exist.'.format(
            FLAGS.benchmarks_directory))
        sys.exit(1)

    if not os.path.isdir(FLAGS.training_directory):
        logging.error('Training directory {} does not exist.'.format(
            FLAGS.training_directory))
        sys.exit(1)

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # The benchmarks
    if FLAGS.benchmarks_filename:
        benchmarks = IO.load_yaml(FLAGS.benchmarks_filename)
        if not benchmarks:
            logging.exit('There are no benchmarks to process')
            sys.exit(1)
    else:
        benchmarks = glob.glob('{}/*.yaml'.format(FLAGS.training_directory))
        benchmarks = [
            b.replace('{}/'.format(FLAGS.training_directory),
                      '').replace('.yaml', '') for b in benchmarks
        ]

    # Initialize a SequenceReduction object
    seqred = SequenceReduction(Goals.prepare_goals(FLAGS.goals,
                                                   FLAGS.weights), 'opt',
                               FLAGS.benchmarks_directory, FLAGS.working_set,
                               FLAGS.times, FLAGS.tool, FLAGS.verify_output)

    # Reduce
    for benchmark in tqdm(benchmarks, desc='Processing'):
        index = benchmark.find('.')
        bench_dir = benchmark[:index]
        bench_name = benchmark[index + 1:]

        bench_dir_result = os.path.join(FLAGS.results_directory, bench_dir)
        filename_result = '{}/{}.yaml'.format(bench_dir_result, bench_name)

        # Create the results directory for the suite
        try:
            os.makedirs(bench_dir_result)
        except FileExistsError:
            pass

        if FLAGS.verify_report and os.path.isfile(filename_result):
            continue

        bench_dir_training = os.path.join(FLAGS.training_directory, bench_dir)
        filename_training = '{}/{}.yaml'.format(bench_dir_training, bench_name)

        sequences = IO.load_yaml(filename_training)
        sequences = Sequence.get_the_best(sequences, FLAGS.nof_sequences)

        results = {}
        counter = 0
        for _, data in sequences.items():
            seqred.run(data['seq'], benchmark)

            if not FLAGS.report_only_the_small:
                results[counter] = {
                    'seq': seqred.results[0]['seq'],
                    'goal': seqred.results[0]['goal']
                }
                counter += 1

            results[counter] = {
                'seq': seqred.results[1]['seq'],
                'goal': seqred.results[1]['goal']
            }
            counter += 1

        IO.dump_yaml(results, filename_result)
예제 #9
0
def execute(argv):
    """Generate genetic sequences for each benchmark"""

    del argv

    FLAGS = flags.FLAGS

    # The benchmarks
    benchmarks = IO.load_yaml(FLAGS.benchmarks_filename)
    if not benchmarks:
        logging.error('There are no benchmarks to process')
        sys.exit(1)

    # Verify benchmark directory
    if not os.path.isdir(FLAGS.benchmarks_directory):
        logging.error('Benchmarks directory {} does not exist.'.format(
            FLAGS.benchmarks_directory)
        )
        sys.exit(1)

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Initialize a SGA object
    sga = SGA(FLAGS.generations,
              FLAGS.population,
              FLAGS.cr,
              FLAGS.m,
              FLAGS.param_m,
              FLAGS.param_s,
              FLAGS.crossover,
              FLAGS.mutation,
              FLAGS.selection,
              FLAGS.seed,
              FLAGS.dimension,
              FLAGS.passes_filename,
              Goals.prepare_goals(FLAGS.goals, FLAGS.weights),
              'opt',
              FLAGS.benchmarks_directory,
              FLAGS.working_set,
              FLAGS.times,
              FLAGS.tool,
              FLAGS.verify_output)

    # Process each benchmark
    for benchmark in tqdm(benchmarks, desc='Processing'):
        index = benchmark.find('.')
        bench_dir = benchmark[:index]
        bench_name = benchmark[index+1:]

        bench_dir = os.path.join(FLAGS.results_directory,
                                 bench_dir)

        # Create the results directory for the suite
        try:
            os.makedirs(bench_dir)
        except FileExistsError:
            pass

        filename = '{}/{}.yaml'.format(bench_dir, bench_name)
        if FLAGS.verify_report and os.path.isfile(filename):
            continue

        sga.run(benchmark)

        if sga.results:
            IO.dump_yaml(sga.results,
                         filename,
                         FLAGS.report_only_the_best)
def execute(argv):
    """Generate the report"""

    del argv

    FLAGS = flags.FLAGS

    from_files = glob.glob('{}/*.yaml'.format(FLAGS.from_directory))

    report = {}
    strategies = []
    statistics = {}
    for from_file in tqdm(from_files, desc='Processing'):
        index = from_file.rfind('_')
        benchmark = from_file[:index]
        benchmark = benchmark.replace('{}/'.format(FLAGS.from_directory), '')
        strategy = from_file[index + 1:]
        strategy = strategy.replace('.yaml', '')
        if strategy not in strategies:
            strategies.append(strategy)
            statistics[strategy] = []

        data = IO.load_yaml(from_file)
        the_best = Sequence.get_the_best(data)
        the_best_key = list(the_best.keys()).pop()

        filename = '{}*{}.yaml'.format(benchmark, strategy)
        to_files = glob.glob('{}/{}'.format(FLAGS.to_directory, filename))
        for to_file in to_files:
            benchmark = to_file.replace('{}/'.format(FLAGS.to_directory), '')
            benchmark = benchmark.replace('_{}.yaml'.format(strategy), '')

            if benchmark not in report:
                report[benchmark] = {}

            data = IO.load_yaml(to_file)
            report[benchmark][strategy] = the_best.copy()
            statistics[strategy].append(data[the_best_key]['goal'])

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # YAML Report
    filename = '{}/{}'.format(FLAGS.results_directory, FLAGS.report_filename)
    IO.dump_yaml(report, filename)

    # CSV Report
    strategies.sort()
    filename = filename.replace('.yaml', '.csv')

    with open(filename, 'w') as f:
        w = csv.DictWriter(f, ['bench'] + strategies)
        w.writeheader()
        line = {}
        for bench_name, data in report.items():
            line['bench'] = bench_name
            for strategy_name, strategy_value in data.items():
                line[strategy_name] = strategy_value
            w.writerow(line)

    # Statistics
    filename = filename.replace('.csv', '.stat')
    stats = {}
    for strategy, values in statistics.items():
        stats[strategy] = {
            'min': float(np.min(values)),
            'max': float(np.max(values)),
            'mean': float(np.mean(values)),
            'median': float(np.median(values)),
            '1quantile': float(np.quantile(values, 0.25)),
            '2quantile': float(np.quantile(values, 0.50)),
            '3quantile': float(np.quantile(values, 0.75))
        }

    IO.dump_yaml(stats, filename)
예제 #11
0
    def run(self, training_benchmarks, compiler, baseline, k):
        """
        Best-k

        Fast and effective orchestration of compiler optimizations
        for automatic performance tuning
        Z. Pan and R. Eigenmann
        International Symposium on Code Generation and Optimization
        2006
        10.1109/CGO.2006.38

        Argument
        --------
        training_benchmarks : list

        compiler : str

        baseline : str

        k : int
            Number of sequences
        """
        # Create the dictionary
        dictionary = {}
        best_sequences = {}
        for training_benchmark in training_benchmarks:
            index = training_benchmark.find('.')
            bench_dir = training_benchmark[:index]
            bench_name = training_benchmark[index + 1:]

            training_dir = os.path.join(self.__flags.training_directory,
                                        bench_dir)
            baseline_dir = os.path.join(self.__flags.baseline_directory,
                                        bench_dir)

            training_sequences = IO.load_yaml('{}/{}.yaml'.format(
                training_dir, bench_name))

            if not training_sequences:
                continue

            baseline_goal_value = IO.load_yaml_or_fail('{}/{}.yaml'.format(
                baseline_dir, bench_name))
            baseline_goal_value = baseline_goal_value[compiler][baseline][
                'goal']

            # For each sequence
            for seq in training_sequences.keys():
                if seq not in dictionary.keys():
                    dictionary[seq] = []
                    best_sequences[seq] = training_sequences[seq]['seq']

                goal_value = training_sequences[seq]['goal']

                # Store the fitness
                if goal_value < baseline_goal_value:
                    improvement = ((baseline_goal_value - goal_value) /
                                   baseline_goal_value) * 100
                    dictionary[seq].append((training_benchmark, improvement))

        # Find the best dictionary entries
        if dictionary:
            bestk = []
            self.__covering = {}
            for _ in range(k):

                progs = []
                for _, data in dictionary.items():
                    progs += [p for p, _ in data if p not in progs]
                if len(progs) == 0:
                    break

                key = self.__get_maximum(dictionary)
                dictionary_entry = dictionary[key].copy()
                self.__covering[key] = len(dictionary_entry)

                bestk.append(key)

                for key, data in dictionary.items():
                    for program, improvement in dictionary_entry:
                        index = self.__program_in_dictionary(program, data)
                        if index > -1:
                            del dictionary[key][index]

            # Store the best k sequences
            self.__results = {}
            for best in bestk:
                self.__results[best] = {'x': best_sequences[best]}