Beispiel #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("n_values", type=int, help="Number of values to generate")
    parser.add_argument('-u', '--unroll', action='store_true', help='Generate parameters for unroll speedup prediction')
    args = parser.parse_args()
    n_values = args.n_values
    unroll = args.unroll

    var_name = 'LORE_PROC_CLANG_PATH' if unroll else 'LORE_PROC_PATH'
    check_config([var_name])
    proc_dir = os.path.abspath(os.environ[var_name])

    dirs = os.listdir(proc_dir)
    n_dirs = len(dirs)

    parsed = 0
    failed = 0

    df_meta = get_df_meta()

    for i, file_name in enumerate(dirs):
        if not os.path.isdir(os.path.join(proc_dir, file_name)):
            failed += 1
            continue

        print('[' + str(i) + '/' + str(n_dirs) + '] Generating params for ' + file_name)

        try:
            with open(os.path.join(proc_dir, file_name, file_name + '_params.txt'), 'w') as fout, \
                    open(os.path.join(proc_dir, file_name, file_name + '_params_names.txt'), 'r') as fin_names, \
                    open(os.path.join(proc_dir, file_name, file_name + '_max_param.txt'), 'r') as fin_max:

                max_param = int(fin_max.read())
                param_names = fin_names.read().strip().split(',')
                try:
                    loop_depth = df_meta.loc[file_name, 'loop_depth']
                except KeyError:
                    print('\tCannot generate params - unknown loop depth')
                    failed += 1
                    continue

                if len(param_names) > 0 and len(param_names[0]) > 0:
                    for k in range(1, n_values + 1):
                        defines = ['-D ' + p + '=' + str(intermediate_value(k, n_values, loop_depth, max_param))
                                   for p in param_names]
                        fout.write(' '.join(defines) + '\n')
                else:
                    fout.write('\n')

                parsed += 1

        except FileNotFoundError:
            failed += 1
            print('\tFile (...)_params_names.txt or (...)_max_param.txt is missing.')

    print('========')
    print(str(parsed) + ' parsed, ' + str(failed) + ' skipped')
Beispiel #2
0
def main():
    check_config(['LORE_ORIG_PATH', 'LORE_PROC_PATH'])

    argparser = argparse.ArgumentParser()
    argparser.add_argument('-v',
                           '--verbose',
                           action='store_true',
                           help='Verbose')
    argparser.add_argument('-u',
                           '--unroll',
                           action='store_true',
                           help='If enabled prepare code for loop unrolling')
    args = argparser.parse_args()
    verbose = args.verbose
    unroll = args.unroll
    orig_path = os.path.abspath(os.environ['LORE_ORIG_PATH'])
    proc_path = os.path.abspath(os.environ['LORE_PROC_PATH'])

    df_meta = pd.DataFrame(columns=('alg', 'max_arr_dim', 'loop_depth'))
    print(df_meta)

    if not os.path.isdir(proc_path):
        os.makedirs(proc_path)

    dirs = os.listdir(orig_path)
    n_dirs = len(dirs)
    parsed = 0
    failed = 0

    for i, file_name in enumerate(dirs):
        try:
            if not file_name.endswith(".c"):
                continue

            print('[' + str(i + 1) + '/' + str(n_dirs) +
                  '] Parsing %s' % file_name)

            file_path = os.path.join(orig_path, file_name)
            file_name = str(file_name[:-2])
            out_dir = os.path.join(proc_path, file_name)

            with open(file_path, 'r') as fin:
                code = fin.read()
                includes, code = split_code(code)

                ct = CodeTransformer(includes=includes,
                                     code=code,
                                     papi_scope='pragma',
                                     verbose=verbose,
                                     main_name='loop',
                                     modifiers_to_remove=['extern'],
                                     gen_mallocs=True,
                                     rename_bounds=(not unroll),
                                     add_pragma_unroll=unroll)

                code = ct.transform()

                if not os.path.isdir(out_dir):
                    os.makedirs(out_dir)

                with open(os.path.join(out_dir, file_name + '.c'),
                          'w') as fout:
                    fout.write(code)

                with open(os.path.join(out_dir, file_name + '_max_param.txt'),
                          'w') as fout:
                    fout.write(str(ct.max_param))

                with open(
                        os.path.join(out_dir, file_name + '_params_names.txt'),
                        'w') as fout:
                    fout.write(','.join(
                        ['PARAM_' + b.upper() for b in ct.pp.bounds]))

                df_meta = df_meta.append(pd.DataFrame(
                    [[file_name, ct.max_arr_dim, ct.loop_depth]],
                    columns=df_meta.columns),
                                         ignore_index=True)

                parsed += 1

        except Exception as e:
            failed += 1
            print('\t', e)

    df_meta.to_csv(os.path.join(proc_path, 'metadata.csv'),
                   index_label=False,
                   index=False)

    print('========')
    print(str(parsed) + ' parsed, ' + str(failed) + ' skipped')
Beispiel #3
0
from __future__ import print_function
import argparse
import os
from sklearn.externals import joblib
from popsicle.ml_utils.file_loader import FileLoader
from popsicle.utils import check_config

check_config(['OUT_DIR', 'MODELS_DIR'])

out_dir = os.path.abspath(os.environ['OUT_DIR'])
model_dir = os.path.abspath(os.environ['MODELS_DIR'])


def load_models():
    scaler = joblib.load(os.path.join(model_dir, 'scaler.pkl'))
    dim_reducer = joblib.load(os.path.join(model_dir, 'dim_reducer.pkl'))
    regr = joblib.load(os.path.join(model_dir, 'regr.pkl'))
    return scaler, dim_reducer, regr


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--input', action='append', required=True,
                        help='<Required> input files in CSV format (names without extensions). You can provide multiple'
                             ' files (-i file1 -i file2...).')
    args = parser.parse_args()
    files = args.input

    scaler, dim_reducer, regr = load_models()
    fl = FileLoader(files, mode='p', scaler=scaler)
Beispiel #4
0
import os
from typing import Tuple, List
from random import shuffle
import pandas as pd
from popsicle.ml_utils.data_set import DataSet
from popsicle.utils import check_config

check_config(['LORE_PROC_PATH'])
proc_dir = os.path.abspath(os.environ['LORE_PROC_PATH'])


def df_aggregate(df: pd.DataFrame) -> pd.DataFrame:
    """
    Of all measurements of the same program with same parameters, take the minimum.
    Minimum should be always the best approximation of the actual program characteristics without any overhead.
    """
    return df.groupby(['alg', 'run']).min()


def df_get_index_col(df: pd.DataFrame, col: str) -> List:
    """
    Finds a column with given name in DataFrame's index.
    """
    col_level = df.index.names.index(col)
    return df.index.get_level_values(col_level)


def df_scale_by_tot_ins(df: pd.DataFrame) -> pd.DataFrame:
    """
    Divides values of all PAPI values outputs by PAPI_TOT_INS in order to normalise them
    """
Beispiel #5
0
import argparse
import pandas as pd
# noinspection PyCompatibility
import urllib.parse
# noinspection PyCompatibility
import urllib.request
import os
from popsicle.utils import check_config

check_config(['LORE_ORIG_PATH'])

lore_url = 'https://vectorization.computer/AJAX/get_src.php'
out_dir = os.path.abspath(os.environ["LORE_ORIG_PATH"])


def main():
    """
    The input file can be obtained from LORE repository online by running the query:
        SELECT id, application, benchmark, file, line, function, version FROM loops
    at https://vectorization.computer/query.html

    (valid as of Aug 2018)
    """

    parser = argparse.ArgumentParser()
    parser.add_argument("input_file",
                        help="Path to CSV file obtained from LORE query.")
    args = parser.parse_args()
    input_file = args.input_file

    if not os.path.isdir(out_dir):
Beispiel #6
0
import os
import pandas as pd
from typing import List
from popsicle.ml_utils.data import Data
from popsicle.ml_utils.df_utils import df_aggregate, df_sort_cols, df_scale_by_tot_ins, get_df_meta
from popsicle.utils import check_config

check_config(['OUT_DIR'])
out_dir = os.path.abspath(os.environ['OUT_DIR'])
min_time = 100


class FileLoader:
    def __init__(self, files, mode='gcc', dim=None, scaler=None):
        if mode in ('predict', 'p') and scaler is None:
            raise ValueError('Scaler must be provided to make predictions')

        self.data = None

        self.files = []
        for file in files:
            if file.endswith('.csv'):
                print('Warning: ' + file.split('/')[-1] + ' interpreted as ' +
                      file.split('/')[-1][:-4] +
                      ' (file names should be provided without extensions).')
                self.files.append(file[:-4])
            else:
                self.files.append(file)

        self.dim = dim
        self.scaler = scaler
Beispiel #7
0
import os
import re
from popsicle.utils import check_config

check_config('PAPI_UTILS_PATH')
papi_utils_path = os.path.abspath(os.environ['PAPI_UTILS_PATH'])


class CodeTransformerStr:
    def __init__(self, includes, code):
        self.includes = includes
        self.code = code

    def add_includes(self, other_includes=None):
        """
        Adds all necessary #include instructions to the code.
        """
        if other_includes is None:
            other_includes = []

        self.includes += '\n'
        self.includes += '#include <papi.h>\n'
        self.includes += '#include <time.h>\n'

        for path in other_includes:
            self.includes += '#include <' + path + '>\n'

        self.includes += '#include "' + os.path.abspath(
            os.path.join(papi_utils_path, 'papi_utils.h')) + '"\n'

    def add_max_macro(self):