コード例 #1
0
def execute(argv):
    """Generate random sequences for each benchmark"""

    del argv

    FLAGS = flags.FLAGS

    # The benchmarks
    benchmarks = IO.load_yaml(FLAGS.benchmarks_filename)
    if not benchmarks:
        logging.fatal('There are no benchmarks to process')

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Process each benchmark
    for benchmark in tqdm(benchmarks, desc='Processing'):
        index = benchmark.find('.')
        bench_dir = benchmark[:index]
        bench_name = benchmark[index + 1:]

        bench_in_dir = os.path.join(FLAGS.benchmarks_directory, bench_dir,
                                    bench_name)

        if not os.path.isdir(bench_in_dir):
            continue

        bench_out_dir = os.path.join(FLAGS.results_directory, bench_dir)

        # Create the results directory for the suite
        try:
            os.makedirs(bench_out_dir)
        except FileExistsError:
            pass

        filename = '{}/{}.yaml'.format(bench_out_dir, bench_name)
        if FLAGS.verify_report and os.path.isfile(filename):
            continue

        results = {}
        for compiler in ['opt', 'llvm']:
            for level in FLAGS.levels:
                goal_value = Engine.evaluate(
                    Goals.prepare_goals(FLAGS.goals,
                                        FLAGS.weights), '-{}'.format(level),
                    compiler, bench_in_dir, FLAGS.working_set, FLAGS.times,
                    FLAGS.tool, FLAGS.verify_output)
                compiler_name = 'clang' if compiler == 'llvm' else 'opt'
                if compiler_name not in results:
                    results[compiler_name] = {}
                results[compiler_name][level] = {
                    'goal': goal_value,
                    'seq': ['-{}'.format(level)]
                }

        IO.dump_yaml(results, filename)
コード例 #2
0
def execute(argv):
    """Generate random sequences for each benchmark"""

    del argv

    FLAGS = flags.FLAGS

    # The benchmarks
    benchmarks = IO.load_yaml(FLAGS.benchmarks_filename)
    if not benchmarks:
        logging.error('There are no benchmarks to process')
        sys.exit(1)

    # Verify benchmark directory
    if not os.path.isdir(FLAGS.benchmarks_directory):
        logging.error('Benchmarks directory {} does not exist.'.format(
            FLAGS.benchmarks_directory))
        sys.exit(1)

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Initialize a Random object
    rnd = Random(FLAGS.nof_sequences, FLAGS.minimum, FLAGS.maximum,
                 FLAGS.factor, FLAGS.ssa, FLAGS.shuffle, FLAGS.update,
                 FLAGS.repetition, FLAGS.original, FLAGS.passes_filename,
                 Goals.prepare_goals(FLAGS.goals, FLAGS.weights), 'opt',
                 FLAGS.benchmarks_directory, FLAGS.working_set, FLAGS.times,
                 FLAGS.tool, FLAGS.verify_output)

    # Process each benchmark
    for benchmark in tqdm(benchmarks, desc='Processing'):
        index = benchmark.find('.')
        bench_dir = benchmark[:index]
        bench_name = benchmark[index + 1:]

        bench_dir = os.path.join(FLAGS.results_directory, bench_dir)

        # Create the results directory for the suite
        try:
            os.makedirs(bench_dir)
        except FileExistsError:
            pass

        filename = '{}/{}.yaml'.format(bench_dir, bench_name)
        if FLAGS.verify_report and os.path.isfile(filename):
            continue

        rnd.run(benchmark)
        if rnd.results:
            IO.dump_yaml(rnd.results, filename, FLAGS.report_only_the_best)
コード例 #3
0
def execute(argv):
    """Create a small sequence"""

    del argv

    FLAGS = flags.FLAGS

    # Verify directories
    if not os.path.isdir(FLAGS.benchmarks_directory):
        logging.error('Benchmarks directory {} does not exist.'.format(
            FLAGS.benchmarks_directory))
        sys.exit(1)

    if not os.path.isdir(FLAGS.training_directory):
        logging.error('Training directory {} does not exist.'.format(
            FLAGS.training_directory))
        sys.exit(1)

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # The benchmarks
    if FLAGS.benchmarks_filename:
        benchmarks = IO.load_yaml(FLAGS.benchmarks_filename)
        if not benchmarks:
            logging.exit('There are no benchmarks to process')
            sys.exit(1)
    else:
        benchmarks = glob.glob('{}/*.yaml'.format(FLAGS.training_directory))
        benchmarks = [
            b.replace('{}/'.format(FLAGS.training_directory),
                      '').replace('.yaml', '') for b in benchmarks
        ]

    # Initialize a SequenceReduction object
    seqred = SequenceReduction(Goals.prepare_goals(FLAGS.goals,
                                                   FLAGS.weights), 'opt',
                               FLAGS.benchmarks_directory, FLAGS.working_set,
                               FLAGS.times, FLAGS.tool, FLAGS.verify_output)

    # Reduce
    for benchmark in tqdm(benchmarks, desc='Processing'):
        index = benchmark.find('.')
        bench_dir = benchmark[:index]
        bench_name = benchmark[index + 1:]

        bench_dir_result = os.path.join(FLAGS.results_directory, bench_dir)
        filename_result = '{}/{}.yaml'.format(bench_dir_result, bench_name)

        # Create the results directory for the suite
        try:
            os.makedirs(bench_dir_result)
        except FileExistsError:
            pass

        if FLAGS.verify_report and os.path.isfile(filename_result):
            continue

        bench_dir_training = os.path.join(FLAGS.training_directory, bench_dir)
        filename_training = '{}/{}.yaml'.format(bench_dir_training, bench_name)

        sequences = IO.load_yaml(filename_training)
        sequences = Sequence.get_the_best(sequences, FLAGS.nof_sequences)

        results = {}
        counter = 0
        for _, data in sequences.items():
            seqred.run(data['seq'], benchmark)

            if not FLAGS.report_only_the_small:
                results[counter] = {
                    'seq': seqred.results[0]['seq'],
                    'goal': seqred.results[0]['goal']
                }
                counter += 1

            results[counter] = {
                'seq': seqred.results[1]['seq'],
                'goal': seqred.results[1]['goal']
            }
            counter += 1

        IO.dump_yaml(results, filename_result)
コード例 #4
0
def execute(argv):
    """Generate genetic sequences for each benchmark"""

    del argv

    FLAGS = flags.FLAGS

    # The benchmarks
    benchmarks = IO.load_yaml(FLAGS.benchmarks_filename)
    if not benchmarks:
        logging.error('There are no benchmarks to process')
        sys.exit(1)

    # Verify benchmark directory
    if not os.path.isdir(FLAGS.benchmarks_directory):
        logging.error('Benchmarks directory {} does not exist.'.format(
            FLAGS.benchmarks_directory)
        )
        sys.exit(1)

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Initialize a SGA object
    sga = SGA(FLAGS.generations,
              FLAGS.population,
              FLAGS.cr,
              FLAGS.m,
              FLAGS.param_m,
              FLAGS.param_s,
              FLAGS.crossover,
              FLAGS.mutation,
              FLAGS.selection,
              FLAGS.seed,
              FLAGS.dimension,
              FLAGS.passes_filename,
              Goals.prepare_goals(FLAGS.goals, FLAGS.weights),
              'opt',
              FLAGS.benchmarks_directory,
              FLAGS.working_set,
              FLAGS.times,
              FLAGS.tool,
              FLAGS.verify_output)

    # Process each benchmark
    for benchmark in tqdm(benchmarks, desc='Processing'):
        index = benchmark.find('.')
        bench_dir = benchmark[:index]
        bench_name = benchmark[index+1:]

        bench_dir = os.path.join(FLAGS.results_directory,
                                 bench_dir)

        # Create the results directory for the suite
        try:
            os.makedirs(bench_dir)
        except FileExistsError:
            pass

        filename = '{}/{}.yaml'.format(bench_dir, bench_name)
        if FLAGS.verify_report and os.path.isfile(filename):
            continue

        sga.run(benchmark)

        if sga.results:
            IO.dump_yaml(sga.results,
                         filename,
                         FLAGS.report_only_the_best)
def execute(argv):
    """Evaluate N sequences"""

    del argv

    FLAGS = flags.FLAGS

    # The benchmarks
    benchmarks = IO.load_yaml_or_fail(FLAGS.benchmarks_filename)
    if not benchmarks:
        logging.error('There are no benchmarks to process')
        sys.exit(1)

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Process each benchmark
    for benchmark in tqdm(benchmarks, desc='Processing'):
        # The benchmark
        index = benchmark.find('.')
        suite = benchmark[:index]
        bench_name = benchmark[index + 1:]

        # Create the results directory for the suite
        results_dir = os.path.join(FLAGS.results_directory, suite)

        try:
            os.makedirs(results_dir)
        except FileExistsError:
            pass

        # Verify report
        if FLAGS.suffix:
            output_filename = '{}/{}_{}.yaml'.format(results_dir, bench_name,
                                                     FLAGS.suffix)
        else:
            output_filename = '{}/{}.yaml'.format(results_dir, bench_name)

        if FLAGS.verify_report and os.path.isfile(output_filename):
            continue

        # Benchmark directory
        bench_dir = os.path.join(FLAGS.benchmarks_directory, suite, bench_name)

        if not os.path.isdir(bench_dir):
            logging.error('Benchmark {} does not exist.'.format(benchmark))
            sys.exit(1)

        # The training data
        training_dir = os.path.join(FLAGS.training_directory, suite)
        filename = '{}/{}.yaml'.format(training_dir, bench_name)

        sequences = IO.load_yaml_or_fail(filename)
        if not sequences:
            logging.error('There are no sequences to process')
            sys.exit(1)

        best_sequence = Sequence.get_the_best(sequences)

        # Verify if the best sequence is better than the baseline
        baseline_dir = os.path.join(FLAGS.baseline_directory, suite)
        filename = '{}/{}.yaml'.format(baseline_dir, bench_name)
        baseline_data = IO.load_yaml_or_fail(filename)
        if not baseline_data:
            logging.error('There are no baseline data')
            sys.exit(1)

        baseline_goal = baseline_data[FLAGS.compiler][FLAGS.baseline]['goal']
        for _, data in best_sequence.items():
            best_sequence_goal = data['goal']

        if not (best_sequence_goal < baseline_goal):
            continue

        sequences = split_sequence(best_sequence)

        # Process the sequences
        results = {}
        for key, data in sequences.items():
            goal_value = Engine.evaluate(
                Goals.prepare_goals(FLAGS.goals, FLAGS.weights),
                Sequence.name_pass_to_string(data['seq']), 'opt', bench_dir,
                FLAGS.working_set, FLAGS.times, FLAGS.tool,
                FLAGS.verify_output)
            results[key] = {'seq': data['seq'], 'goal': goal_value}

        # Store the results
        IO.dump_yaml(results, output_filename)
コード例 #6
0
    def evaluate(goals,
                 sequence,
                 compiler,
                 benchmark_directory,
                 working_set=0,
                 times=1,
                 tool='perf',
                 verify_output=False,
                 warmup_cache=False,
                 runtime=0):
        """This method compiles the benchmark and
           returns the value of the goal.

        Arguments
        ----------
        goals : dict
            {goal: weight}

        sequence : str
            The sequence to compile the benchmark.

        compiler : str
            The compiler to use.

        benchmark_directory : str

        working_set : int
            The dataset that the benchmark will execute.

        times : int
            Execution times

        tool : str
            Execution tool

        verify_output : bool
            Verify the status of the execution

        warmup_cache: bool

        runtime: int
            The runtime execution (timeout)

        Return
        ------
        goal : float
            The value of the goal.
        """
        # Compile the benchmark N times if compile_time is the goal.
        times_ = times if times > 2 else 3

        Engine.compilee(benchmark_directory, compiler, sequence, working_set,
                        times_ if Goals.has_compile_time(goals) else 1)

        if verify_output and Goals.only_compile_time_goal(goals):
            out_ok = Engine.execute(benchmark_directory, working_set, 1, tool,
                                    verify_output, warmup_cache, runtime)
        elif Goals.has_dynamic_goal(goals):
            out_ok = Engine.execute(benchmark_directory, working_set, times_,
                                    tool, verify_output, warmup_cache, runtime)

        if verify_output and not out_ok:
            Engine.cleanup(benchmark_directory, compiler)
            return float('inf')

        goal_value = 0.0
        for goal, weight in goals.items():
            # Extract the value of the goal.
            if goal == 'llvm_instructions':
                goal_value += Goals.llvm_instructions(
                    benchmark_directory) * weight
            elif goal == 'binary_size':
                goal_value += Goals.binary_size(benchmark_directory) * weight
            elif goal == 'compile_time':
                goal_value += Goals.compile_time(benchmark_directory) * weight
            elif goal == 'runtime':
                goal_value += Goals.runtime(benchmark_directory, tool) * weight
            elif goal == 'cycles':
                goal_value += Goals.cycles(benchmark_directory) * weight
            elif goal == 'instructions':
                goal_value += Goals.instructions(benchmark_directory) * weight

        Engine.cleanup(benchmark_directory, compiler)

        return goal_value
コード例 #7
0
def execute(argv):
    """Evaluate N sequences"""

    del argv

    FLAGS = flags.FLAGS

    # The benchmarks
    benchmarks = IO.load_yaml_or_fail(FLAGS.benchmarks_filename)
    if not benchmarks:
        logging.error('There are no benchmarks to process')
        sys.exit(1)

    # The sequences
    sequences = IO.load_yaml_or_fail(FLAGS.sequences_filename)
    if not sequences:
        logging.error('There are no benchmarks to process')
        sys.exit(1)

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Process each benchmark
    for benchmark in tqdm(benchmarks, desc='Processing'):
        index = benchmark.find('.')
        bench_dir = benchmark[:index]
        bench_name = benchmark[index+1:]

        bench_in_dir = os.path.join(FLAGS.benchmarks_directory,
                                    bench_dir,
                                    bench_name)

        if not os.path.isdir(bench_in_dir):
            continue

        bench_out_dir = os.path.join(FLAGS.results_directory,
                                     bench_dir)

        # Create the results directory for the suite
        try:
            os.makedirs(bench_out_dir)
        except FileExistsError:
            pass

        # Verify report
        if FLAGS.suffix:
            filename = '{}/{}_{}.yaml'.format(
                bench_out_dir,
                bench_name,
                FLAGS.suffix
            )
        else:
            filename = '{}/{}.yaml'.format(bench_out_dir, bench_name)
        if FLAGS.verify_report and os.path.isfile(filename):
            continue

        # Process the sequences
        results = {}
        for key, data in sequences.items():
            goal_value = Engine.evaluate(
                Goals.prepare_goals(FLAGS.goals, FLAGS.weights),
                Sequence.name_pass_to_string(data['seq']),
                'opt',
                bench_in_dir,
                FLAGS.working_set,
                FLAGS.times,
                FLAGS.tool,
                FLAGS.verify_output
            )
            results[key] = {'seq': data['seq'], 'goal': goal_value}

        # Store the results
        IO.dump_yaml(results, filename)
コード例 #8
0
def execute(argv):
    """Generate random sequences for each benchmark"""

    FLAGS = flags.FLAGS

    results_directory = FLAGS.results_directory

    # Test benchmarks
    test_benchmarks = IO.load_yaml_or_fail(FLAGS.test_benchs_filename)
    if not test_benchmarks:
        logging.fatal('There are no test benchmarks to process')

    # Training benchmarks
    training_benchmarks = IO.load_yaml_or_fail(FLAGS.training_benchs_filename)
    if not training_benchmarks:
        logging.fatal('There are no training benchmarks to process')

    # Create the results directory
    try:
        os.makedirs(FLAGS.results_directory)
    except FileExistsError:
        pass

    # Extract the representation for test programs
    print(bold('1. EXTRACTING THE REPRESENTATION'))
    FLAGS.results_directory = os.path.join(results_directory, 'representation')
    FLAGS.benchmarks_filename = FLAGS.test_benchs_filename
    representation.execute(argv)

    # Distance: test --> training
    print(bold('2. MEASURING THE DISTANCE'))
    distance_results_directory = os.path.join(results_directory, 'distance')
    FLAGS.results_directory = distance_results_directory
    FLAGS.test_representation_directory = os.path.join(results_directory,
                                                       'representation')
    distance.execute(argv)

    # Process test benchmarks
    print(bold('3. PROCESSING THE BENCHMARKS'))
    for nof_sequences in tqdm(FLAGS.nof_sequences, desc='Processing'):
        for test_benchmark in test_benchmarks:
            index = test_benchmark.find('.')
            suite_name = test_benchmark[:index]
            bench_name = test_benchmark[index + 1:]

            # Find the best N sequences
            training_suite, sequences = find_sequences(
                test_benchmark, training_benchmarks,
                distance_results_directory, FLAGS.training_data_directory,
                int(nof_sequences))

            # Goal_name
            if len(FLAGS.goals) > 1:
                goal_name = '_'.join(FLAGS.goals)
            else:
                goal_name = FLAGS.goals[0]

            # Create the results directory for the suite
            results_dir = os.path.join(results_directory,
                                       'predictive_compilation',
                                       training_suite, goal_name)
            try:
                os.makedirs(results_dir)
            except FileExistsError:
                pass

            filename = '{}/{}_j{}.yaml'.format(results_dir, bench_name,
                                               nof_sequences)

            if FLAGS.verify_report and os.path.isfile(filename):
                continue

            results = {}

            for key, data in sequences.items():
                goal_value = Engine.evaluate(
                    Goals.prepare_goals(FLAGS.goals, FLAGS.weights),
                    Sequence.name_pass_to_string(data['seq']), 'opt',
                    os.path.join(FLAGS.benchmarks_directory, suite_name,
                                 bench_name), FLAGS.working_set, FLAGS.times,
                    FLAGS.tool, FLAGS.verify_output)
                results[key] = {'seq': data['seq'], 'goal': goal_value}

            IO.dump_yaml(results, filename)