Exemple #1
0
def compute_EI_from_posteriors(configurations,
                               param_space,
                               objective_weights,
                               objective_limits,
                               threshold,
                               iteration_number,
                               model_weight,
                               regression_models,
                               classification_model,
                               model_type,
                               good_prior_normalization_limits,
                               posterior_floor=10**-8,
                               posterior_normalization_limits=None,
                               debug=False):
    """
    Compute EI acquisition function for a list of configurations based on the priors provided by the user and the BO model.
    :param configurations: list of configurations to compute EI.
    :param param_space: Space object for the optimization problem
    :param objective_weights: objective weights for multi-objective optimization. Not implemented yet.
    :param objective_limits: objective limits for multi-objective optimization. Not implemented yet.
    :param threshold: threshold that separates configurations into good or bad for the model.
    :param iteration_number: current optimization iteration.
    :param model_weight: weight hyperparameter given to the model during posterior computation.
    :param regression_models: regression models to compute the probability of a configuration being good according to BO's model.
    :param classification_model: classification model to compute the probability of feasibility.
    :param model_type: type of the regression model, either GP or RF for now.
    :param good_prior_normalization_limits: lower and upper limits to normalize the prior. Will be updated if any value exceeds the limits.
    :param posterior_floor: lower limit for posterior computation. Used when normalizing the priors and in the probability of feasibility.
    :param posterior_normalization_limits:
    :param debug: whether to run in debug mode.
    """
    user_prior_t0 = datetime.datetime.now()
    prior_good = compute_probability_from_prior(configurations, param_space,
                                                objective_weights)

    # if prior is non-normalized, we have to normalize it
    if good_prior_normalization_limits is not None:
        good_prior_normalization_limits[0] = min(
            good_prior_normalization_limits[0], min(prior_good))
        good_prior_normalization_limits[1] = max(
            good_prior_normalization_limits[1], max(prior_good))

        # limits will be equal if all values are the same, in this case, just set the prior to 1 everywhere
        if good_prior_normalization_limits[
                0] == good_prior_normalization_limits[1]:
            prior_good = [1] * len(prior_good)
        else:
            prior_good = [posterior_floor + ((1-posterior_floor)*(x - good_prior_normalization_limits[0]))/(good_prior_normalization_limits[1] - good_prior_normalization_limits[0]) \
                        for x in prior_good]

    prior_good = np.array(prior_good, dtype=np.float64)
    prior_bad = np.array(1 - prior_good, dtype=np.float64)

    prior_bad[prior_bad < posterior_floor] = posterior_floor

    sys.stdout.write_to_logfile(
        ("EI: user prior time %10.4f sec\n" %
         ((datetime.datetime.now() - user_prior_t0).total_seconds())))

    model_t0 = datetime.datetime.now()
    bufferx = dict_list_to_matrix(
        configurations
    )  # prediction methods require a matrix instead of list of dictionaries
    number_of_predictions = len(bufferx)
    model_stds = {}

    model_means, model_stds = models.compute_model_mean_and_uncertainty(
        bufferx, regression_models, model_type, param_space, var=False)

    # If classification model is trained, there are feasibility constraints
    if classification_model != None:
        classification_prediction_results = models.model_probabilities(
            bufferx, classification_model, param_space)
        feasible_parameter = param_space.get_feasible_parameter()[0]
        true_value_index = classification_model[
            feasible_parameter].classes_.tolist().index(
                True
            )  # predictor gives both probabilities (feasible and infeasible), find the index of feasible probabilities
        feasibility_indicator = classification_prediction_results[
            feasible_parameter][:, true_value_index]
        feasibility_indicator[feasibility_indicator == 0] = posterior_floor
        feasibility_indicator = np.log(feasibility_indicator)

        # Normalize the feasibility indicator to 0, 1.
        feasibility_indicator = [posterior_floor + ((1-posterior_floor)*(x - np.log(posterior_floor)))/(np.log(1) - np.log(posterior_floor)) \
                                  for x in feasibility_indicator]
        feasibility_indicator = np.array(feasibility_indicator)

    else:
        feasibility_indicator = [
            1
        ] * number_of_predictions  # if classification model is not trained, all points are feasible

    model_good = compute_probability_from_model(model_means,
                                                model_stds,
                                                param_space,
                                                objective_weights,
                                                threshold,
                                                compute_bad=False)
    model_good = np.array(model_good, dtype=np.float64)

    model_bad = compute_probability_from_model(model_means,
                                               model_stds,
                                               param_space,
                                               objective_weights,
                                               threshold,
                                               compute_bad=True)
    sys.stdout.write_to_logfile(
        ("EI: model time %10.4f sec\n" %
         ((datetime.datetime.now() - model_t0).total_seconds())))
    posterior_t0 = datetime.datetime.now()
    good_bad_ratios = np.zeros(len(configurations), dtype=np.float64)

    with np.errstate(divide='ignore'):
        log_posterior_good = np.log(prior_good) + (
            iteration_number / model_weight) * np.log(model_good)
        log_posterior_bad = np.log(
            prior_bad) + (iteration_number / model_weight) * np.log(model_bad)

    good_bad_ratios = log_posterior_good - log_posterior_bad

    # If we have feasibility constraints, normalize good_bad_ratios to 0, 1
    if posterior_normalization_limits is not None:
        tmp_gbr = copy.deepcopy(good_bad_ratios)
        tmp_gbr = np.array(tmp_gbr)

        # Do not consider -inf and +inf when computing the limits
        tmp_gbr[tmp_gbr == float("-inf")] = float("inf")
        posterior_normalization_limits[0] = min(
            posterior_normalization_limits[0], min(tmp_gbr))
        tmp_gbr[tmp_gbr == float("inf")] = float("-inf")
        posterior_normalization_limits[1] = max(
            posterior_normalization_limits[1], max(tmp_gbr))

        # limits will be equal if all values are the same, in this case, just set the prior to 1 everywhere
        if posterior_normalization_limits[0] == posterior_normalization_limits[
                1]:
            good_bad_ratios = [1] * len(good_bad_ratios)
        else:
            new_gbr = []
            for x in good_bad_ratios:
                new_x = posterior_floor + (
                    (1 - posterior_floor) *
                    (x - posterior_normalization_limits[0])) / (
                        posterior_normalization_limits[1] -
                        posterior_normalization_limits[0])
                new_gbr.append(new_x)
            good_bad_ratios = new_gbr
        good_bad_ratios = np.array(good_bad_ratios)

    good_bad_ratios = good_bad_ratios + feasibility_indicator
    good_bad_ratios = -1 * good_bad_ratios
    good_bad_ratios = list(good_bad_ratios)

    sys.stdout.write_to_logfile(
        ("EI: posterior time %10.4f sec\n" %
         ((datetime.datetime.now() - posterior_t0).total_seconds())))
    sys.stdout.write_to_logfile(
        ("EI: total time %10.4f sec\n" %
         ((datetime.datetime.now() - user_prior_t0).total_seconds())))

    # local search expects the optimized function to return the values and a feasibility indicator
    return good_bad_ratios, feasibility_indicator
def ucb(bufferx,
        objective_weights,
        regression_models,
        param_space,
        scalarization_method,
        objective_limits,
        iteration_number,
        model_type,
        classification_model=None,
        number_of_cpus=0):
    """
    Multi-objective ucb acquisition function as detailed in https://arxiv.org/abs/1805.12168.
    The mean and variance of the predictions are computed as defined by Hutter et al.: https://arxiv.org/pdf/1211.0906.pdf
    :param bufferx: a list of tuples containing the points to predict and scalarize.
    :param objective_weights: a list containing the weights for each objective.
    :param regression_models: the surrogate models used to evaluate points.
    :param param_space: a space object containing the search space.
    :param scalarization_method: a string indicating which scalarization method to use.
    :param evaluations_per_optimization_iteration: how many configurations to return.
    :param objective_limits: a dictionary with estimated minimum and maximum values for each objective.
    :param iteration_number: an integer for the current iteration number, used to compute the beta
    :param classification_model: the surrogate model used to evaluate feasibility constraints
    :param number_of_cpus: an integer for the number of cpus to be used in parallel.
    :return: a list of scalarized values for each point in bufferx.
    """
    beta = np.sqrt(0.125 * np.log(2 * iteration_number + 1))
    augmentation_constant = 0.05
    prediction_means = {}
    prediction_variances = {}
    number_of_predictions = len(bufferx)
    tmp_objective_limits = copy.deepcopy(objective_limits)

    prediction_means, prediction_variances = models.compute_model_mean_and_uncertainty(
        bufferx, regression_models, model_type, param_space, var=True)

    if classification_model != None:
        classification_prediction_results = models.model_probabilities(
            bufferx, classification_model, param_space)
        feasible_parameter = param_space.get_feasible_parameter()[0]
        true_value_index = classification_model[
            feasible_parameter].classes_.tolist().index(True)
        feasibility_indicator = classification_prediction_results[
            feasible_parameter][:, true_value_index]
    else:
        feasibility_indicator = [
            1
        ] * number_of_predictions  # if no classification model is used, then all points are feasible

    # Compute scalarization
    if (scalarization_method == "linear"):
        scalarized_predictions = np.zeros(number_of_predictions)
        beta_factor = 0
        for objective in regression_models:
            scalarized_predictions += objective_weights[
                objective] * prediction_means[objective]
            beta_factor += objective_weights[objective] * prediction_variances[
                objective]
        scalarized_predictions -= beta * np.sqrt(beta_factor)
        scalarized_predictions = scalarized_predictions * feasibility_indicator
    # The paper does not propose this, I applied their methodology to the original tchebyshev to get the approach below
    # Important: since this was not proposed in the paper, their proofs and bounds for the modified_tchebyshev may not be valid here.
    elif (scalarization_method == "tchebyshev"):
        scalarized_predictions = np.zeros(number_of_predictions)
        total_values = np.zeros(number_of_predictions)
        for objective in regression_models:
            scalarized_values = objective_weights[objective] * np.absolute(
                prediction_means[objective] -
                beta * np.sqrt(prediction_variances[objective]))
            total_values += scalarized_values
            scalarized_predictions = np.maximum(scalarized_values,
                                                scalarized_predictions)
        scalarized_predictions += augmentation_constant * total_values
        scalarized_predictions = scalarized_predictions * feasibility_indicator
    elif (scalarization_method == "modified_tchebyshev"):
        scalarized_predictions = np.full((number_of_predictions), float("inf"))
        reciprocated_weights = reciprocate_weights(objective_weights)
        for objective in regression_models:
            scalarized_value = reciprocated_weights[objective] * (
                prediction_means[objective] -
                beta * np.sqrt(prediction_variances[objective]))
            scalarized_predictions = np.minimum(scalarized_value,
                                                scalarized_predictions)
        scalarized_predictions = scalarized_predictions * feasibility_indicator
        scalarized_predictions = -scalarized_predictions  # We will minimize later, but we want to maximize instead, so we invert the sign
    else:
        print("Error: unrecognized scalarization method:",
              scalarization_method)
        raise SystemExit

    return scalarized_predictions, tmp_objective_limits
Exemple #3
0
def main(config, black_box_function=None, output_file=""):
    """
    Run design-space exploration using bayesian optimization.
    :param config: dictionary containing all the configuration parameters of this optimization.
    :param output_file: a name for the file used to save the dse results.
    """
    start_time = (datetime.datetime.now())
    run_directory = config["run_directory"]
    bopro_mode = config["bopro_mode"]["mode"]

    # Start logging
    log_file = deal_with_relative_and_absolute_path(run_directory, config["log_file"])
    sys.stdout.change_log_file(log_file)
    if (bopro_mode == 'client-server'):
        sys.stdout.switch_log_only_on_file(True)

    # Log the json configuration for this optimization
    sys.stdout.write_to_logfile(str(config) + "\n")

    # Create parameter space object and unpack hyperparameters from json
    param_space = space.Space(config)
    application_name = config["application_name"]
    optimization_metrics = config["optimization_objectives"]
    optimization_iterations = config["optimization_iterations"]
    evaluations_per_optimization_iteration = config["evaluations_per_optimization_iteration"]
    batch_mode = evaluations_per_optimization_iteration > 1
    number_of_cpus = config["number_of_cpus"]
    print_importances = config["print_parameter_importance"]
    epsilon_greedy_threshold = config["epsilon_greedy_threshold"]
    acquisition_function = config["acquisition_function"]
    weight_sampling = config["weight_sampling"]
    scalarization_method = config["scalarization_method"]
    scalarization_key = config["scalarization_key"]
    doe_type = config["design_of_experiment"]["doe_type"]
    number_of_doe_samples = config["design_of_experiment"]["number_of_samples"]
    model_type = config["models"]["model"]
    optimization_method = config["optimization_method"]
    time_budget = config["time_budget"]
    input_params = param_space.get_input_parameters()
    number_of_objectives = len(optimization_metrics)
    objective_limits = {}
    data_array = {}
    fast_addressing_of_data_array = {}
    objective_bounds = None
    exhaustive_search_data_array = None
    normalize_objectives = False
    debug = False

    if "feasible_output" in config:
        feasible_output = config["feasible_output"]
        feasible_output_name = feasible_output["name"]
        enable_feasible_predictor = feasible_output["enable_feasible_predictor"]
        enable_feasible_predictor_grid_search_on_recall_and_precision = feasible_output["enable_feasible_predictor_grid_search_on_recall_and_precision"]
        feasible_predictor_grid_search_validation_file = feasible_output["feasible_predictor_grid_search_validation_file"]
        feasible_parameter = param_space.get_feasible_parameter()
        number_of_trees = config["models"]["number_of_trees"]

    if (weight_sampling == "bounding_box"):
        objective_bounds = {}
        user_bounds = config["bounding_box_limits"]
        if (len(user_bounds) == 2):
            if (user_bounds[0] > user_bounds[1]):
                user_bounds[0], user_bounds[1] = user_bounds[1], user_bounds[0]
            for objective in optimization_metrics:
                objective_bounds[objective] = user_bounds
                objective_limits[objective] = user_bounds
        elif (len(user_bounds) == number_of_objectives*2):
            idx = 0
            for objective in optimization_metrics:
                objective_bounds[objective] = user_bounds[idx:idx+2]
                if (objective_bounds[objective][0] > objective_bounds[objective][1]):
                    objective_bounds[objective][0], objective_bounds[objective][1] = objective_bounds[objective][1], objective_bounds[objective][0]
                objective_limits[objective] = objective_bounds[objective]
                idx += 2
        else:
            print("Wrong number of bounding boxes, expected 2 or", 2*number_of_objectives, "got", len(user_bounds))
            raise SystemExit
    else:
        for objective in optimization_metrics:
            objective_limits[objective] = [float("inf"), float("-inf")]

    if output_file == "":
        output_data_file = config["output_data_file"]
        if output_data_file == "output_samples.csv":
            output_data_file = application_name + "_" + output_data_file
    else:
        output_data_file = output_file

    exhaustive_search_data_array = None
    exhaustive_search_fast_addressing_of_data_array = None
    if bopro_mode == 'exhaustive':
        exhaustive_file = config["bopro_mode"]["exhaustive_search_file"]
        exhaustive_search_data_array, exhaustive_search_fast_addressing_of_data_array = param_space.load_data_file(exhaustive_file, debug=False, number_of_cpus=number_of_cpus)

    # Check if some parameters are correctly defined
    if bopro_mode == "default":
        if black_box_function == None:
            print("Error: the black box function must be provided")
            raise SystemExit
        if not callable(black_box_function):
            print("Error: the black box function parameter is not callable")
            raise SystemExit

    if (model_type == "gaussian_process") and (acquisition_function == "TS"):
        print("Error: The TS acquisition function with Gaussian Process models is still under implementation")
        print("Using EI acquisition function instead")
        config["acquisition_function"] = "EI"

    if number_of_cpus > 1:
        print("Warning: BOPrO supports only sequential execution for now. Running on a single cpu.")
        number_of_cpus = 1

    # If priors are present, use prior-guided optimization
    user_priors = False
    for input_param in config["input_parameters"]:
        if config["input_parameters"][input_param]["prior"] != "uniform":
            if number_of_objectives == 1:
                user_priors = True
            else:
                print("Warning: prior optimization does not work with multiple objectives yet, priors will be uniform")
                config["input_parameters"][input_param]["prior"] = "uniform"

    user_priors = True
    if user_priors:
        bo_method = prior_guided_optimization
    else:
        bo_method = random_scalarizations
        normalize_objectives = True

    ### Resume previous optimization, if any
    beginning_of_time = param_space.current_milli_time()
    absolute_configuration_index = 0
    doe_t0 = datetime.datetime.now()
    if config["resume_optimization"] == True:
        resume_data_file = config["resume_optimization_data"]

        if not resume_data_file.endswith('.csv'):
            print("Error: resume data file must be a CSV")
            raise SystemExit
        if resume_data_file == "output_samples.csv":
            resume_data_file = application_name + "_" + resume_data_file

        data_array, fast_addressing_of_data_array = param_space.load_data_file(resume_data_file, debug=False, number_of_cpus=number_of_cpus)
        absolute_configuration_index = len(data_array[list(data_array.keys())[0]]) # get the number of points evaluated in the previous run
        beginning_of_time = beginning_of_time - data_array[param_space.get_timestamp_parameter()[0]][-1] # Set the timestamp back to match the previous run
        print("Resumed optimization, number of samples = %d ......." % absolute_configuration_index)

    ### DoE phase
    if absolute_configuration_index < number_of_doe_samples:
        configurations = []
        default_configuration = param_space.get_default_or_random_configuration()
        str_data = param_space.get_unique_hash_string_from_values(default_configuration)
        if str_data not in fast_addressing_of_data_array:
            fast_addressing_of_data_array[str_data] = absolute_configuration_index
            configurations.append(default_configuration)
            absolute_configuration_index += 1

        doe_configurations = []
        if absolute_configuration_index < number_of_doe_samples:
            doe_configurations = param_space.get_doe_sample_configurations(
                                                                        fast_addressing_of_data_array,
                                                                        number_of_doe_samples-absolute_configuration_index,
                                                                        doe_type)
        configurations += doe_configurations
        print("Design of experiment phase, number of new doe samples = %d ......." % len(configurations))

        doe_data_array = param_space.run_configurations(
                                                        bopro_mode,
                                                        configurations,
                                                        beginning_of_time,
                                                        black_box_function,
                                                        exhaustive_search_data_array,
                                                        exhaustive_search_fast_addressing_of_data_array,
                                                        run_directory,
                                                        batch_mode=batch_mode)
        data_array = concatenate_data_dictionaries(
                                                data_array,
                                                doe_data_array,
                                                param_space.input_output_and_timestamp_parameter_names)
        absolute_configuration_index = number_of_doe_samples
        iteration_number = 1
    else:
        iteration_number = absolute_configuration_index - number_of_doe_samples + 1

    # If we have feasibility constraints, we must ensure we have at least one feasible and one infeasible sample before starting optimization
    # If this is not true, continue design of experiment until the condition is met
    if enable_feasible_predictor:
        while are_all_elements_equal(data_array[feasible_parameter[0]]) and optimization_iterations > 0:
            print("Warning: all points are either valid or invalid, random sampling more configurations.")
            print("Number of doe samples so far:", absolute_configuration_index)
            configurations = param_space.get_doe_sample_configurations(fast_addressing_of_data_array, 1, "random sampling")
            new_data_array = param_space.run_configurations(
                                                            bopro_mode,
                                                            configurations,
                                                            beginning_of_time,
                                                            black_box_function,
                                                            exhaustive_search_data_array,
                                                            exhaustive_search_fast_addressing_of_data_array,
                                                            run_directory,
                                                            batch_mode=batch_mode)
            data_array = concatenate_data_dictionaries(
                                                        new_data_array,
                                                        data_array,
                                                        param_space.input_output_and_timestamp_parameter_names)
            absolute_configuration_index += 1
            optimization_iterations -= 1

    # Create output file with explored configurations from resumed run and DoE
    with open(deal_with_relative_and_absolute_path(run_directory, output_data_file), 'w') as f:
        w = csv.writer(f)
        w.writerow(param_space.get_input_output_and_timestamp_parameters())
        tmp_list = [param_space.convert_types_to_string(j, data_array) for j in param_space.get_input_output_and_timestamp_parameters()]
        tmp_list = list(zip(*tmp_list))
        for i in range(len(data_array[optimization_metrics[0]])):
            w.writerow(tmp_list[i])

    for objective in optimization_metrics:
        lower_bound = min(objective_limits[objective][0], min(data_array[objective]))
        upper_bound = max(objective_limits[objective][1], max(data_array[objective]))
        objective_limits[objective] = [lower_bound, upper_bound]
    print("\nEnd of doe/resume phase, the number of evaluated configurations is: %d\n" %absolute_configuration_index)
    sys.stdout.write_to_logfile(("End of DoE - Time %10.4f sec\n" % ((datetime.datetime.now() - doe_t0).total_seconds())))
    if doe_type == "grid_search" and optimization_iterations > 0:
        print("Warning: DoE is grid search, setting number of optimization iterations to 0")
        optimization_iterations = 0

    ### Main optimization loop
    bo_t0 = datetime.datetime.now()
    run_time = (datetime.datetime.now() - start_time).total_seconds() / 60
    # run_time / time_budget < 1 if budget > elapsed time or budget == -1
    if time_budget > 0:
        print('starting optimization phase, limited to run for ', time_budget, ' minutes')
    elif time_budget == 0:
        print('Time budget cannot be zero. To not limit runtime set time_budget = -1')
        sys.exit()

    configurations = []
    evaluation_budget = optimization_iterations * evaluations_per_optimization_iteration
    iteration_number = 0
    evaluation_count = 0
    while evaluation_count < evaluation_budget and run_time / time_budget < 1:
        if evaluation_count % evaluations_per_optimization_iteration == 0:
            iteration_number += 1
            print("Starting optimization iteration", iteration_number)
            iteration_t0 = datetime.datetime.now()

        model_t0 = datetime.datetime.now()
        regression_models,_,_ = models.generate_mono_output_regression_models(
                                                                            data_array,
                                                                            param_space,
                                                                            input_params,
                                                                            optimization_metrics,
                                                                            1.00,
                                                                            config,
                                                                            model_type=model_type,
                                                                            number_of_cpus=number_of_cpus,
                                                                            print_importances=print_importances,
                                                                            normalize_objectives=normalize_objectives,
                                                                            objective_limits=objective_limits)

        classification_model = None
        if enable_feasible_predictor:
            classification_model,_,_ = models.generate_classification_model(application_name,
                                                                            param_space,
                                                                            data_array,
                                                                            input_params,
                                                                            feasible_parameter,
                                                                            1.00,
                                                                            config,
                                                                            debug,
                                                                            number_of_cpus=number_of_cpus,
                                                                            data_array_exhaustive=exhaustive_search_data_array,
                                                                            enable_feasible_predictor_grid_search_on_recall_and_precision=enable_feasible_predictor_grid_search_on_recall_and_precision,
                                                                            feasible_predictor_grid_search_validation_file=feasible_predictor_grid_search_validation_file,
                                                                            print_importances=print_importances)
        model_t1 = datetime.datetime.now()
        sys.stdout.write_to_logfile(("Model fitting time %10.4f sec\n" % ((model_t1 - model_t0).total_seconds())))
        if (weight_sampling == "bounding_box"):
            objective_weights = sample_weight_bbox(optimization_metrics, objective_bounds, objective_limits, 1)[0]
        elif (weight_sampling == "flat"):
            objective_weights = sample_weight_flat(optimization_metrics, 1)[0]
        else:
            print("Error: unrecognized option:", weight_sampling)
            raise SystemExit

        data_array_scalarization, _ = compute_data_array_scalarization(
                                                                    data_array,
                                                                    objective_weights,
                                                                    objective_limits,
                                                                    scalarization_method)
        data_array[scalarization_key] = data_array_scalarization.tolist()

        epsilon = random.uniform(0,1)
        local_search_t0 = datetime.datetime.now()
        if epsilon > epsilon_greedy_threshold:
            best_configuration = bo_method(
                                        config,
                                        data_array,
                                        param_space,
                                        fast_addressing_of_data_array,
                                        regression_models,
                                        iteration_number,
                                        objective_weights,
                                        objective_limits,
                                        classification_model)

        else:
            sys.stdout.write_to_logfile(str(epsilon) + " < " + str(epsilon_greedy_threshold) + " random sampling a configuration to run\n")
            tmp_fast_addressing_of_data_array = copy.deepcopy(fast_addressing_of_data_array)
            best_configuration = param_space.random_sample_configurations_without_repetitions(tmp_fast_addressing_of_data_array, 1, use_priors=False)[0]
        local_search_t1 = datetime.datetime.now()
        sys.stdout.write_to_logfile(("Local search time %10.4f sec\n" % ((local_search_t1 - local_search_t0).total_seconds())))

        configurations.append(best_configuration)

        # When we have selected "evaluations_per_optimization_iteration" configurations, evaluate the batch
        if evaluation_count % evaluations_per_optimization_iteration == (evaluations_per_optimization_iteration - 1):
            black_box_function_t0 = datetime.datetime.now()
            new_data_array = param_space.run_configurations(
                                                            bopro_mode,
                                                            configurations,
                                                            beginning_of_time,
                                                            black_box_function,
                                                            exhaustive_search_data_array,
                                                            exhaustive_search_fast_addressing_of_data_array,
                                                            run_directory,
                                                            batch_mode=batch_mode)
            black_box_function_t1 = datetime.datetime.now()
            sys.stdout.write_to_logfile(("Black box function time %10.4f sec\n" % ((black_box_function_t1 - black_box_function_t0).total_seconds())))

            # If running batch BO, we will have some liars in fast_addressing_of_data, update them with the true value
            for configuration_idx in range(len(new_data_array[list(new_data_array.keys())[0]])):
                configuration = get_single_configuration(new_data_array, configuration_idx)
                str_data = param_space.get_unique_hash_string_from_values(configuration)
                if str_data in fast_addressing_of_data_array:
                    absolute_index = fast_addressing_of_data_array[str_data]
                    for header in configuration:
                        data_array[header][absolute_index] = configuration[header]
                else:
                    fast_addressing_of_data_array[str_data] = absolute_configuration_index
                    absolute_configuration_index += 1
                    for header in configuration:
                        data_array[header].append(configuration[header])

            # and save results
            with open(deal_with_relative_and_absolute_path(run_directory, output_data_file), 'a') as f:
                w = csv.writer(f)
                tmp_list = [param_space.convert_types_to_string(j, new_data_array) for j in list(param_space.get_input_output_and_timestamp_parameters())]
                tmp_list = list(zip(*tmp_list))
                for i in range(len(new_data_array[optimization_metrics[0]])):
                    w.writerow(tmp_list[i])
            configurations = []
        else:
            # If we have not selected all points in the batch yet, add the model prediction as a 'liar'
            for header in best_configuration:
                data_array[header].append(best_configuration[header])

            bufferx = [tuple(best_configuration.values())]
            prediction_means, _ = models.compute_model_mean_and_uncertainty(bufferx, regression_models, model_type, param_space)
            for objective in prediction_means:
                data_array[objective].append(prediction_means[objective][0])

            if classification_model is not None:
                classification_prediction_results = models.model_probabilities(bufferx,classification_model,param_space)
                true_value_index = classification_model[feasible_parameter[0]].classes_.tolist().index(True)
                feasibility_indicator = classification_prediction_results[feasible_parameter[0]][:,true_value_index]
                data_array[feasible_output_name].append(True if feasibility_indicator[0] >= 0.5 else False)

            data_array[param_space.get_timestamp_parameter()[0]].append(absolute_configuration_index)
            str_data = param_space.get_unique_hash_string_from_values(best_configuration)
            fast_addressing_of_data_array[str_data] = absolute_configuration_index
            absolute_configuration_index += 1


        for objective in optimization_metrics:
            lower_bound = min(objective_limits[objective][0], min(data_array[objective]))
            upper_bound = max(objective_limits[objective][1], max(data_array[objective]))
            objective_limits[objective] = [lower_bound, upper_bound]

        evaluation_count += 1
        run_time = (datetime.datetime.now() - start_time).total_seconds() / 60

        sys.stdout.write_to_logfile(("Total iteration time %10.4f sec\n" % ((datetime.datetime.now() - iteration_t0).total_seconds())))
    sys.stdout.write_to_logfile(("End of BO phase - Time %10.4f sec\n" % ((datetime.datetime.now() - bo_t0).total_seconds())))

    print("End of Bayesian Optimization")
    sys.stdout.write_to_logfile(("Total script time %10.2f sec\n" % ((datetime.datetime.now() - start_time).total_seconds())))
def EI(bufferx,
       data_array,
       objective_weights,
       regression_models,
       param_space,
       scalarization_method,
       objective_limits,
       iteration_number,
       model_type,
       classification_model=None,
       number_of_cpus=0):
    """
    Compute a multi-objective EI acquisition function on bufferx.
    The mean and variance of the predictions are computed as defined by Hutter et al.: https://arxiv.org/pdf/1211.0906.pdf
    :param bufferx: a list of tuples containing the points to predict and scalarize.
    :param data_array: a dictionary containing the previously run points and their function values.
    :param objective_weights: a list containing the weights for each objective.
    :param regression_models: the surrogate models used to evaluate points.
    :param param_space: a space object containing the search space.
    :param scalarization_method: a string indicating which scalarization method to use.
    :param evaluations_per_optimization_iteration: how many configurations to return.
    :param objective_limits: a dictionary with estimated minimum and maximum values for each objective.
    :param iteration_number: an integer for the current iteration number, used to compute the beta
    :param classification_model: the surrogate model used to evaluate feasibility constraints
    :param number_of_cpus: an integer for the number of cpus to be used in parallel.
    :return: a list of scalarized values for each point in bufferx.
    """
    augmentation_constant = 0.05
    prediction_means = {}
    prediction_variances = {}
    number_of_predictions = len(bufferx)
    tmp_objective_limits = copy.deepcopy(objective_limits)

    prediction_means, prediction_variances = models.compute_model_mean_and_uncertainty(
        bufferx, regression_models, model_type, param_space, var=True)

    if classification_model != None:
        classification_prediction_results = models.model_probabilities(
            bufferx, classification_model, param_space)
        feasible_parameter = param_space.get_feasible_parameter()[0]
        true_value_index = classification_model[
            feasible_parameter].classes_.tolist().index(True)
        feasibility_indicator = classification_prediction_results[
            feasible_parameter][:, true_value_index]
    else:
        feasibility_indicator = [
            1
        ] * number_of_predictions  # if no classification model is used, then all points are feasible

    data_array_scalarization, tmp_objective_limits = compute_data_array_scalarization(
        data_array, objective_weights, tmp_objective_limits,
        scalarization_method)

    if (scalarization_method == "linear"):
        scalarized_predictions = np.zeros(number_of_predictions)
        scalarized_value = 0
        for objective in regression_models:
            f_min = 1 - (min(data_array[objective]) - tmp_objective_limits[objective][0])\
                            /(tmp_objective_limits[objective][1] - tmp_objective_limits[objective][0])
            x_std = np.sqrt(prediction_variances[objective])
            x_mean = 1 - prediction_means[objective]
            v = (x_mean - f_min) / x_std
            objective_ei = (
                x_mean - f_min) * stats.norm.cdf(v) + x_std * stats.norm.pdf(v)
            scalarized_predictions += objective_ei * objective_weights[
                objective]
        scalarized_predictions = -1 * scalarized_predictions * feasibility_indicator
    # The paper does not propose this, I applied their methodology to the original tchebyshev to get the approach below
    # Important: since this was not proposed in the paper, their proofs and bounds for the modified_tchebyshev may not be valid here.
    elif (scalarization_method == "tchebyshev"):
        scalarized_predictions = np.zeros(number_of_predictions)
        total_value = np.zeros(number_of_predictions)
        for objective in regression_models:
            f_min = 1 - (min(data_array[objective]) - tmp_objective_limits[objective][0])\
                            /(tmp_objective_limits[objective][1] - tmp_objective_limits[objective][0])
            x_std = np.sqrt(prediction_variances[objective])
            x_mean = 1 - prediction_means[objective]
            v = (x_mean - f_min) / x_std
            scalarized_value = objective_weights[objective] * (
                (1 - prediction_means[objective] - f_min) * stats.norm.cdf(v) +
                x_std * stats.norm.pdf(v))
            scalarized_predictions = np.maximum(scalarized_value,
                                                scalarized_predictions)
            total_value += scalarized_value
        scalarized_predictions = -1 * (
            scalarized_predictions +
            total_value * augmentation_constant) * feasibility_indicator
    elif (scalarization_method == "modified_tchebyshev"):
        scalarized_predictions = np.full((number_of_predictions), float("inf"))
        reciprocated_weights = reciprocate_weights(objective_weights)
        for objective in regression_models:
            f_min = 1 - (min(data_array[objective]) - tmp_objective_limits[objective][0])\
                            /(tmp_objective_limits[objective][1] - tmp_objective_limits[objective][0])
            x_std = np.sqrt(prediction_variances[objective])
            x_mean = 1 - prediction_means[objective]
            v = (x_mean - f_min) / x_std
            scalarized_value = reciprocated_weights[objective] * (
                (x_mean - f_min) * stats.norm.cdf(v) +
                x_std * stats.norm.pdf(v))
            scalarized_predictions = np.minimum(scalarized_value,
                                                scalarized_predictions)
        scalarized_predictions = scalarized_predictions * feasibility_indicator
    else:
        print("Error: unrecognized scalarization method:",
              scalarization_method)
        raise SystemExit

    return scalarized_predictions, tmp_objective_limits