Esempio n. 1
0
def get_next_memory_windows(trace_file, cache_type, cache_size, n_early_stop,
                            seq_start, parameters, args_global):
    # use 80% warmup, manually check for unimodal behavior
    n_warmup = int(0.8 * n_early_stop)
    # per cache size parameters overwrite other parameters
    df = database.load_reports(
        trace_file=trace_file,
        cache_type=cache_type,
        cache_size=str(cache_size),
        seq_start=str(seq_start),
        n_early_stop=str(n_early_stop),
        n_warmup=n_warmup,
        version=parameters['version'],  # use version as a strong key
        dburi=parameters["dburi"],
        dbcollection=parameters["dbcollection"],
    )
    if len(df) == 0:
        next_windows = np.linspace(1,
                                   int(0.4 * n_early_stop),
                                   args_global['n_beam'] + 1,
                                   dtype=int)[1:]
    else:
        # window at most 40% of length
        parameter_space = ParameterSpace(
            [ContinuousParameter('x', 1, int(0.4 * n_early_stop))])
        bo = GPBayesianOptimization(
            variables_list=parameter_space.parameters,
            X=df['memory_window'].values.reshape(-1, 1),
            Y=df['byte_miss_ratio'].values.reshape(-1, 1),
            batch_size=args_global['n_beam'])
        next_windows = bo.suggest_new_locations().reshape(-1).astype(int)
    return next_windows
Esempio n. 2
0
def get_tasks_per_cache_size(trace_file, cache_type, cache_size, parameters,
                             args_global):
    n_req = parameters['n_req']
    n_validation = int(n_req * args_global['ratio_validation'])
    n_warmup = int(0.8 * n_validation)
    # TODO: ideally need to match all parameters. So this means doesn't support multiple values for same lrb
    #  parameter
    df = database.load_reports(
        trace_file=trace_file,
        cache_type=cache_type,
        # no cache size because may check smaller cache sizes
        n_early_stop=str(n_validation),
        n_warmup=n_warmup,
        lrb_version=parameters['lrb_version'],  # use version as a strong key
        dburi=parameters["dburi"],
        dbcollection=parameters["dbcollection"],
    )
    tasks = get_validation_tasks_per_cache_size(trace_file, cache_type,
                                                cache_size, parameters,
                                                args_global, df)
    if len(tasks) != 0:
        return tasks
    need_fitting = check_need_fitting(cache_size, parameters, args_global, df)
    if need_fitting == False:
        return get_evaluation_task(trace_file, cache_type, cache_size,
                                   parameters, args_global, df)
    return get_fitting_task(trace_file, cache_type, cache_size, parameters,
                            args_global, df)
Esempio n. 3
0
def _get_evaluation_task(trace_file, cache_type, cache_size, memory_window, parameters):
    n_early_stop = parameters['n_early_stop']
    # TODO: ideally need to match all parameters. So this means doesn't support multiple values for same lrb
    #  parameter
    df = database.load_reports(
        trace_file=trace_file,
        cache_type=cache_type,
        cache_size=str(cache_size),
        n_early_stop=str(n_early_stop),
        n_warmup=0,
        memory_window=str(memory_window),
        version=parameters['version'],  # use version as a strong key
        dburi=parameters["dburi"],
        dbcollection=parameters["dbcollection"],
    )
    if len(df) != 0:
        # test this before
        return []
    else:
        task = _get_task(trace_file, cache_type, cache_size, parameters, n_early_stop, memory_window)
        return [task]