def execute(argv):
    """Generate random sequences for each benchmark"""

    del argv

    FLAGS = flags.FLAGS

    # The benchmarks
    benchmarks = IO.load_yaml(FLAGS.benchmarks_filename)
    if not benchmarks:
        logging.fatal('There are no benchmarks to process')

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Process each benchmark
    for benchmark in tqdm(benchmarks, desc='Processing'):
        index = benchmark.find('.')
        bench_dir = benchmark[:index]
        bench_name = benchmark[index + 1:]

        bench_in_dir = os.path.join(FLAGS.benchmarks_directory, bench_dir,
                                    bench_name)

        if not os.path.isdir(bench_in_dir):
            continue

        bench_out_dir = os.path.join(FLAGS.results_directory, bench_dir)

        # Create the results directory for the suite
        try:
            os.makedirs(bench_out_dir)
        except FileExistsError:
            pass

        filename = '{}/{}.yaml'.format(bench_out_dir, bench_name)
        if FLAGS.verify_report and os.path.isfile(filename):
            continue

        results = {}
        for compiler in ['opt', 'llvm']:
            for level in FLAGS.levels:
                goal_value = Engine.evaluate(
                    Goals.prepare_goals(FLAGS.goals,
                                        FLAGS.weights), '-{}'.format(level),
                    compiler, bench_in_dir, FLAGS.working_set, FLAGS.times,
                    FLAGS.tool, FLAGS.verify_output)
                compiler_name = 'clang' if compiler == 'llvm' else 'opt'
                if compiler_name not in results:
                    results[compiler_name] = {}
                results[compiler_name][level] = {
                    'goal': goal_value,
                    'seq': ['-{}'.format(level)]
                }

        IO.dump_yaml(results, filename)
    def run(self, benchmark):
        """Random algorithm.

        Argument
        ---------
        benchmark: str
        """

        self.__results = {}
        counter = 0
        stop = False

        while True:
            # Create N sequences
            sequences = Sequence.create_random_sequences(
                self.__flags.nof_sequences, self.__flags.minimum,
                self.__flags.maximum, self.__flags.factor, self.__flags.ssa,
                self.__flags.shuffle, self.__flags.update,
                self.__flags.repetition, self.__flags.original,
                self.__flags.passes_filename)

            # For each sequence
            for _, data in sequences.items():
                sequence = data['seq']
                # Calculate the fitness
                if Sequence.exist(sequence, self.__results):
                    continue

                index = benchmark.find('.')

                goal_value = Engine.evaluate(
                    self.__flags.goals, Sequence.name_pass_to_string(sequence),
                    self.__flags.compiler,
                    os.path.join(self.__flags.benchmarks_directory,
                                 benchmark[:index], benchmark[index + 1:]),
                    self.__flags.working_set, self.__flags.times,
                    self.__flags.tool, self.__flags.verify_output)

                if goal_value == float('inf'):
                    continue

                # Store the results
                self.__results[counter] = {'seq': sequence, 'goal': goal_value}

                counter += 1
                if counter == self.__flags.nof_sequences:
                    stop = True
                    break

            if stop:
                break
示例#3
0
 def fitness(self,
             sequence):
     """Calculate and return the fitness."""
     sequence = Sequence.fix_index(list(sequence))
     sequence = Sequence.sanitize(sequence)
     sequence = Sequence.index_pass_to_list(sequence,
                                            self.passes_dict)
     goal_value = Engine.evaluate(self.goal,
                                  Sequence.name_pass_to_string(
                                      sequence
                                  ),
                                  self.compiler,
                                  self.benchmark_directory,
                                  self.working_set,
                                  self.times,
                                  self.tool,
                                  self.verify_output)
     return [goal_value]
def execute(argv):
    """Evaluate N sequences"""

    del argv

    FLAGS = flags.FLAGS

    # The benchmarks
    benchmarks = IO.load_yaml_or_fail(FLAGS.benchmarks_filename)
    if not benchmarks:
        logging.error('There are no benchmarks to process')
        sys.exit(1)

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Process each benchmark
    for benchmark in tqdm(benchmarks, desc='Processing'):
        # The benchmark
        index = benchmark.find('.')
        suite = benchmark[:index]
        bench_name = benchmark[index + 1:]

        # Create the results directory for the suite
        results_dir = os.path.join(FLAGS.results_directory, suite)

        try:
            os.makedirs(results_dir)
        except FileExistsError:
            pass

        # Verify report
        if FLAGS.suffix:
            output_filename = '{}/{}_{}.yaml'.format(results_dir, bench_name,
                                                     FLAGS.suffix)
        else:
            output_filename = '{}/{}.yaml'.format(results_dir, bench_name)

        if FLAGS.verify_report and os.path.isfile(output_filename):
            continue

        # Benchmark directory
        bench_dir = os.path.join(FLAGS.benchmarks_directory, suite, bench_name)

        if not os.path.isdir(bench_dir):
            logging.error('Benchmark {} does not exist.'.format(benchmark))
            sys.exit(1)

        # The training data
        training_dir = os.path.join(FLAGS.training_directory, suite)
        filename = '{}/{}.yaml'.format(training_dir, bench_name)

        sequences = IO.load_yaml_or_fail(filename)
        if not sequences:
            logging.error('There are no sequences to process')
            sys.exit(1)

        best_sequence = Sequence.get_the_best(sequences)

        # Verify if the best sequence is better than the baseline
        baseline_dir = os.path.join(FLAGS.baseline_directory, suite)
        filename = '{}/{}.yaml'.format(baseline_dir, bench_name)
        baseline_data = IO.load_yaml_or_fail(filename)
        if not baseline_data:
            logging.error('There are no baseline data')
            sys.exit(1)

        baseline_goal = baseline_data[FLAGS.compiler][FLAGS.baseline]['goal']
        for _, data in best_sequence.items():
            best_sequence_goal = data['goal']

        if not (best_sequence_goal < baseline_goal):
            continue

        sequences = split_sequence(best_sequence)

        # Process the sequences
        results = {}
        for key, data in sequences.items():
            goal_value = Engine.evaluate(
                Goals.prepare_goals(FLAGS.goals, FLAGS.weights),
                Sequence.name_pass_to_string(data['seq']), 'opt', bench_dir,
                FLAGS.working_set, FLAGS.times, FLAGS.tool,
                FLAGS.verify_output)
            results[key] = {'seq': data['seq'], 'goal': goal_value}

        # Store the results
        IO.dump_yaml(results, output_filename)
示例#5
0
    def run(self,
            benchmark,
            sequence):
        """This method invokes C-Reduce to create a small benchmark.

        Argument
        ----------
        benchmark : str

        sequence : list

        Return
        ------
        ret : bool
            True if reduction ok, otherwise False.
        """
        index = benchmark.find('.')
        bench_in_dir = os.path.join(self.__flags.benchmarks_directory,
                                    benchmark[:index],
                                    benchmark[index+1:])

        if not os.path.isdir(bench_in_dir):
            lg.error('The directory {} does not exist.'.format(bench_in_dir))
            sys.exit(1)

        # 1. Calculate the difference (gain)

        # 1.1. Baseline's goal value
        baseline_goal_value = Engine.evaluate(self.__flags.goals,
                                              self.__flags.baseline,
                                              self.__flags.compiler,
                                              bench_in_dir)

        # 1.2. Sequence's goal value
        sequence_goal_value = Engine.evaluate(self.__flags.goals,
                                              Sequence.name_pass_to_string(
                                                  sequence
                                              ),
                                              self.__flags.compiler,
                                              bench_in_dir)

        diff = int(baseline_goal_value - sequence_goal_value)
        if diff <= 0:
            lg.info(
                'Warning: It is not possible to reduce the code ({}).'.format(
                    diff
                )
            )
            return False

        # 2. Prepare the benchmark
        bench_out_dir = os.path.join(self.__flags.output_directory,
                                     benchmark[:index],
                                     benchmark[index+1:])
        try:
            os.makedirs(bench_out_dir)
        except FileExistsError:
            pass

        cmdline = 'cp {0}/*.c {1}'.format(bench_in_dir,
                                          bench_out_dir)

        try:
            subprocess.run(cmdline,
                           shell=True,
                           check=True,
                           capture_output=False)
        except subprocess.CalledProcessError:
            lg.fatal('Prepare the benchmark')

        # 3. Create the C-Reduce script
        filename = '{}/test.sh'.format(bench_out_dir)

        fout = open(filename, 'w')
        fout.write('#!/bin/bash\n')
        fout.write('DIFF={}\n'.format(diff))
        fout.write('LIB=libMilepostStaticFeatures.so\n')
        fout.write('PASSES="{}"\n'.format(
            Sequence.name_pass_to_string(sequence))
        )
        fout.write('clang -Xclang -disable-O0-optnone -S -w -c -emit-llvm *.c\n')
        fout.write('llvm-link *.ll -S -o creduce.ll\n')
        fout.write('opt --disable-output -load $LIB {} \
        --msf creduce.ll 2> msf.txt\n'.format(self.__flags.baseline))
        fout.write('size_Oz=`grep f25 msf.txt | \
        awk \'{total += $NF} END { print total }\'`\n')
        fout.write('rm -f msf.txt\n')
        fout.write('opt --disable-output -load $LIB \
        $PASSES --msf creduce.ll 2> msf.txt\n')
        fout.write('size_PASSES=`grep f25 msf.txt | \
        awk \'{total += $NF} END { print total }\'`\n')
        fout.write('rm -f msf.txt\n')
        fout.write('diff=$(($size_Oz-$size_PASSES))\n')
        fout.write('if [[ $diff -eq $DIFF ]]; then\n')
        fout.write('   exit 0\n')
        fout.write('else\n')
        fout.write('   exit 1\n')
        fout.write('fi\n')
        fout.close()

        cmdline = 'chmod +x {}/test.sh'.format(bench_out_dir)

        try:
            subprocess.run(cmdline,
                           shell=True,
                           check=True,
                           capture_output=False)
        except subprocess.CalledProcessError:
            lg.fatal('Writing C-Reduce script')

        # 4. Invoke C-Reduce
        prefix = '{}/'.format(bench_out_dir)
        sources = glob.glob('{}*.c'.format(prefix))
        sources = [source.replace(prefix, '').replace('.c', '')
                   for source in sources]
        sources = ''.join(sources)

        cmdline = 'cd {0} ; creduce ./test.sh \
        {1}.c > creduce.log 2> creduce.log'.format(bench_out_dir,
                                                   sources)

        try:
            subprocess.run(cmdline,
                           shell=True,
                           check=True,
                           capture_output=False)
        except subprocess.CalledProcessError:
            lg.fatal('Invoke C-Reduce')

        # 5. Create the new benchmark directory
        cmdline = 'cp {0}/Makefile.* {0}/compile.sh {1}'.format(bench_in_dir,
                                                                bench_out_dir)

        try:
            subprocess.run(cmdline,
                           shell=True,
                           check=True,
                           capture_output=False)
        except subprocess.CalledProcessError:
            lg.fatal('Invoke C-Reduce - Move the small file')

        return True
示例#6
0
    def run(self, sequence, benchmark):
        """Sequence Reduction algorithm.

        Suresh Purini and Lakshya Jain.
        Finding Good Optimization Sequences Covering Program Space.
        TACO.
        2013

        Argument
        --------
        sequence : list

        benchmark : str
        """
        # Calculate the initial value of the goal.
        index = benchmark.find('.')

        bench_dir = os.path.join(self.__flags.benchmarks_directory,
                                 benchmark[:index], benchmark[index + 1:])

        goal_value = Engine.evaluate(self.__flags.goals,
                                     Sequence.name_pass_to_string(sequence),
                                     self.__flags.compiler, bench_dir,
                                     self.__flags.working_set,
                                     self.__flags.times, self.__flags.tool,
                                     self.__flags.verify_output)

        # Store the initial value of the goal.
        self.__results[0] = {'seq': sequence, 'goal': goal_value}

        # Sequence Reduction algorithm
        lst_best_sequence = sequence.copy()
        best_goal_value = goal_value
        change = True

        while change:

            change = False
            bestseqlen = len(lst_best_sequence)

            for i in range(bestseqlen):

                vector = [1 for i in range(bestseqlen)]
                vector[i] = 0

                lst_new_sequence = Sequence.remove_passes(
                    lst_best_sequence, vector)

                goal_value = Engine.evaluate(
                    self.__flags.goals,
                    Sequence.name_pass_to_string(lst_new_sequence),
                    self.__flags.compiler, bench_dir, self.__flags.working_set,
                    self.__flags.times, self.__flags.tool,
                    self.__flags.verify_output)

                if goal_value <= best_goal_value:
                    best_goal_value = goal_value
                    lst_best_sequence = lst_new_sequence[:]
                    change = True
                    break

        goal_value = Engine.evaluate(
            self.__flags.goals,
            Sequence.name_pass_to_string(lst_best_sequence),
            self.__flags.compiler, bench_dir, self.__flags.working_set,
            self.__flags.times, self.__flags.tool, self.__flags.verify_output)

        # Store the final value of the goal.
        self.__results[1] = {'seq': lst_best_sequence, 'goal': goal_value}
def execute(argv):
    """Evaluate N sequences"""

    del argv

    FLAGS = flags.FLAGS

    # The benchmarks
    benchmarks = IO.load_yaml_or_fail(FLAGS.benchmarks_filename)
    if not benchmarks:
        logging.error('There are no benchmarks to process')
        sys.exit(1)

    # The sequences
    sequences = IO.load_yaml_or_fail(FLAGS.sequences_filename)
    if not sequences:
        logging.error('There are no benchmarks to process')
        sys.exit(1)

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Process each benchmark
    for benchmark in tqdm(benchmarks, desc='Processing'):
        index = benchmark.find('.')
        bench_dir = benchmark[:index]
        bench_name = benchmark[index+1:]

        bench_in_dir = os.path.join(FLAGS.benchmarks_directory,
                                    bench_dir,
                                    bench_name)

        if not os.path.isdir(bench_in_dir):
            continue

        bench_out_dir = os.path.join(FLAGS.results_directory,
                                     bench_dir)

        # Create the results directory for the suite
        try:
            os.makedirs(bench_out_dir)
        except FileExistsError:
            pass

        # Verify report
        if FLAGS.suffix:
            filename = '{}/{}_{}.yaml'.format(
                bench_out_dir,
                bench_name,
                FLAGS.suffix
            )
        else:
            filename = '{}/{}.yaml'.format(bench_out_dir, bench_name)
        if FLAGS.verify_report and os.path.isfile(filename):
            continue

        # Process the sequences
        results = {}
        for key, data in sequences.items():
            goal_value = Engine.evaluate(
                Goals.prepare_goals(FLAGS.goals, FLAGS.weights),
                Sequence.name_pass_to_string(data['seq']),
                'opt',
                bench_in_dir,
                FLAGS.working_set,
                FLAGS.times,
                FLAGS.tool,
                FLAGS.verify_output
            )
            results[key] = {'seq': data['seq'], 'goal': goal_value}

        # Store the results
        IO.dump_yaml(results, filename)
def execute(argv):
    """Generate random sequences for each benchmark"""

    FLAGS = flags.FLAGS

    results_directory = FLAGS.results_directory

    # Test benchmarks
    test_benchmarks = IO.load_yaml_or_fail(FLAGS.test_benchs_filename)
    if not test_benchmarks:
        logging.fatal('There are no test benchmarks to process')

    # Training benchmarks
    training_benchmarks = IO.load_yaml_or_fail(FLAGS.training_benchs_filename)
    if not training_benchmarks:
        logging.fatal('There are no training benchmarks to process')

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Extract the representation for test programs
    print(bold('1. EXTRACTING THE REPRESENTATION'))
    FLAGS.results_directory = os.path.join(results_directory, 'representation')
    FLAGS.benchmarks_filename = FLAGS.test_benchs_filename
    representation.execute(argv)

    # Distance: test --> training
    print(bold('2. MEASURING THE DISTANCE'))
    distance_results_directory = os.path.join(results_directory, 'distance')
    FLAGS.results_directory = distance_results_directory
    FLAGS.test_representation_directory = os.path.join(results_directory,
                                                       'representation')
    distance.execute(argv)

    # Process test benchmarks
    print(bold('3. PROCESSING THE BENCHMARKS'))
    for nof_sequences in tqdm(FLAGS.nof_sequences, desc='Processing'):
        for test_benchmark in test_benchmarks:
            index = test_benchmark.find('.')
            suite_name = test_benchmark[:index]
            bench_name = test_benchmark[index + 1:]

            # Find the best N sequences
            training_suite, sequences = find_sequences(
                test_benchmark, training_benchmarks,
                distance_results_directory, FLAGS.training_data_directory,
                int(nof_sequences))

            # Goal_name
            if len(FLAGS.goals) > 1:
                goal_name = '_'.join(FLAGS.goals)
            else:
                goal_name = FLAGS.goals[0]

            # Create the results directory for the suite
            results_dir = os.path.join(results_directory,
                                       'predictive_compilation',
                                       training_suite, goal_name)
            try:
                os.makedirs(results_dir)
            except FileExistsError:
                pass

            filename = '{}/{}_j{}.yaml'.format(results_dir, bench_name,
                                               nof_sequences)

            if FLAGS.verify_report and os.path.isfile(filename):
                continue

            results = {}

            for key, data in sequences.items():
                goal_value = Engine.evaluate(
                    Goals.prepare_goals(FLAGS.goals, FLAGS.weights),
                    Sequence.name_pass_to_string(data['seq']), 'opt',
                    os.path.join(FLAGS.benchmarks_directory, suite_name,
                                 bench_name), FLAGS.working_set, FLAGS.times,
                    FLAGS.tool, FLAGS.verify_output)
                results[key] = {'seq': data['seq'], 'goal': goal_value}

            IO.dump_yaml(results, filename)