コード例 #1
0
# --- Model (Multi-output GP)
n_attributes = m
model = multi_outputGP(output_dim=n_attributes,
                       exact_feval=[True] * m,
                       fixed_hyps=False)

# --- Initial design
initial_design = GPyOpt.experiment_design.initial_design(
    'random', space, 2 * (d + 1))

# --- Parameter distribution
parameter_support = np.ones((1, ))
parameter_dist = np.ones((1, ))
parameter_distribution = ParameterDistribution(continuous=False,
                                               support=parameter_support,
                                               prob_dist=parameter_dist)


# --- Utility function
def U_func(parameter, y):
    aux = -np.exp(y)
    return np.sum(aux, axis=0)


def dU_func(parameter, y):
    return -np.exp(y)


U = Utility(func=U_func,
            dfunc=dU_func,
コード例 #2
0
def HOLE_function_caller_test(rep):

    penalty =0
    noise = 1e-6
    alpha =1.95
    np.random.seed(rep)

    folder = "RESULTS"
    subfolder = "HOLE_ParEGO_utilityDM_Lin_utilityAlg_Tche"
    cwd = os.getcwd()
    path = cwd + "/" + folder + "/"+subfolder

    # func2 = dropwave()
    POL_func= HOLE(sd=np.sqrt(noise))
    ref_point = POL_func.ref_point

    # --- Attributes
    #repeat same objective function to solve a 1 objective problem
    f = MultiObjective([POL_func.f1, POL_func.f2])
    # c = MultiObjective([POL_func.c1, POL_func.c2])

    # --- Attributes
    #repeat same objective function to solve a 1 objective problem

    #c2 = MultiObjective([test_c2])
    # --- Space
    #define space of variables
    space =  GPyOpt.Design_space(space =[{'name': 'var_1', 'type': 'continuous', 'domain': (-1.0, 1.0)},{'name': 'var_2', 'type': 'continuous', 'domain': (-1.0, 1.0)}])#GPyOpt.Design_space(space =[{'name': 'var_1', 'type': 'continuous', 'domain': (0,100)}])#

    n_f = 1
    n_c = 0
    input_d = 2
    m =2


    model_f = multi_outputGP(output_dim = n_f,   noise_var=[noise]*n_f, exact_feval=[True]*n_f)
    #model_c = multi_outputGP(output_dim = n_c,  noise_var=[1e-7]*n_c, exact_feval=[True]*n_c)

    # --- Aquisition optimizer
    #optimizer for inner acquisition function
    acq_opt = GPyOpt.optimization.AcquisitionOptimizer(optimizer='lbfgs', inner_optimizer='Nelder_Mead',space=space, model=model_f, model_c=None)


    # --- Initial design
    #initial design
    initial_design = GPyOpt.experiment_design.initial_design('latin', space, 2*(input_d+1))

    # --- Utility function
    def prior_sample_generator(n_samples=1, seed=None):
        if seed is None:

            samples = np.random.dirichlet(np.ones((m,)), n_samples)
            print("samples", samples)

        else:
            random_state = np.random.RandomState(seed)
            samples = random_state.dirichlet(np.ones((m,)), n_samples)
            print("samples", samples)

        return samples

    def prior_density(x):
        assert x.shape[1] == m, "wrong dimension"
        output = np.zeros(x.shape[0])
        for i in range(len(output)):
            output[i] = dirichlet.pdf(x=x[i], alpha=np.ones((m,)))
        return output.reshape(-1)

    def U_func(parameter, y):
        w = parameter
        scaled_vectors = np.multiply(w, y)
        utility = np.max(scaled_vectors, axis=1)
        utility = np.atleast_2d(utility)
        print("utility", utility.T)
        return utility.T


    def dU_func(parameter, y):
        raise
        return 0

    ##### Utility
    n_samples = 1
    support = prior_sample_generator(n_samples=n_samples)  # generates the support to marginalise the parameters for acquisition function inside the optimisation process. E_theta[EI(x)]

    prob_dist = prior_density(support)  # generates the initial density given the the support
    prob_dist /= np.sum(prob_dist)
    parameter_distribution = ParameterDistribution(continuous=True,support=support, prob_dist=prob_dist, sample_generator=prior_sample_generator)


    U = Utility(func=U_func, dfunc=dU_func, parameter_dist=parameter_distribution, linear=True)


    #acquisition = HVI(model=model_f, model_c=model_c , alpha=alpha, space=space, optimizer = acq_opt)
    acquisition = ParEGO(model=model_f, model_c=None , alpha=alpha, space=space, NSGA_based=False,optimizer = acq_opt, utility= U, true_func=f)


    last_step_acquisition = Last_Step(model_f=model_f, model_c=None , true_f=f, true_c=None,n_f=m, n_c=n_c, B=1,acquisition_optimiser = acq_opt, acquisition_f=acquisition,seed=rep,prior_gen=prior_sample_generator, space=space, path=path)

    evaluator = GPyOpt.core.evaluators.Sequential(acquisition)
    bo = BO(model_f, None, space, f, None, acquisition, evaluator, initial_design,  ref_point=ref_point)



    # print("Finished Initialization")
    X, Y, C, Opportunity_cost = bo.run_optimization(max_iter =100,  rep=rep, last_step_evaluator=last_step_acquisition, path=path, verbosity=False)
    print("Code Ended")

    # data = {}
    # data["Opportunity_cost"] = np.array(Opportunity_cost).reshape(-1)
    #
    # gen_file = pd.DataFrame.from_dict(data)
    # folder = "RESULTS"
    # subfolder = "DEB_HVI_"
    # cwd = os.getcwd()
    # print("cwd", cwd)
    # path = cwd + "/" + folder +"/"+ subfolder +'/it_' + str(rep)+ '.csv'
    # if os.path.isdir(cwd + "/" + folder +"/"+ subfolder) == False:
    #     os.makedirs(cwd + "/" + folder +"/"+ subfolder)
    #
    # gen_file.to_csv(path_or_buf=path)

    print("X",X,"Y",Y, "C", C)
コード例 #3
0
n_a = 2
model = multi_outputGP(output_dim=n_a, noise_var=noise_var)

# --- Aquisition optimizer
acq_opt = GPyOpt.optimization.AcquisitionOptimizer(optimizer='sgd',
                                                   space=space)

# --- Initial design
initial_design = GPyOpt.experiment_design.initial_design('random', space, 30)
#print(initial_design)

# --- Parameter distribution
l = 1
support = [[0.5, 0.5]]
prob_dist = [1 / l] * l
parameter_distribution = ParameterDistribution(support=support,
                                               prob_dist=prob_dist)


# --- Utility function
def U_func(parameter, y):

    return np.dot(parameter, y)


def dU_func(parameter, y):
    return parameter


U = Utility(func=U_func,
            dfunc=dU_func,
            parameter_dist=parameter_distribution,
コード例 #4
0
def SRN_function_caller_test(rep):

    penalty = 0
    noise = 1e-4
    alpha = 1.95
    np.random.seed(rep)
    folder = "RESULTS"
    subfolder = "SRN_KG"
    cwd = os.getcwd()
    path = cwd + "/" + folder + "/" + subfolder

    # func2 = dropwave()
    SRN_func = SRN(sd=np.sqrt(noise))
    ref_point = SRN_func.ref_point

    # --- Attributes
    #repeat same objective function to solve a 1 objective problem
    f = MultiObjective([SRN_func.f1, SRN_func.f2])
    c = MultiObjective([SRN_func.c1, SRN_func.c2])

    # --- Attributes
    #repeat same objective function to solve a 1 objective problem

    #c2 = MultiObjective([test_c2])
    # --- Space
    #define space of variables
    space = GPyOpt.Design_space(
        space=[{
            'name': 'var_1',
            'type': 'continuous',
            'domain': (-20, 20)
        }, {
            'name': 'var_2',
            'type': 'continuous',
            'domain': (-20, 20)
        }]
    )  #GPyOpt.Design_space(space =[{'name': 'var_1', 'type': 'continuous', 'domain': (0,100)}])#
    n_f = 2
    n_c = 2
    input_d = 2
    m = n_f

    model_f = multi_outputGP(output_dim=n_f,
                             noise_var=[noise] * n_f,
                             exact_feval=[True] * n_f)
    model_c = multi_outputGP(output_dim=n_c,
                             noise_var=[1e-21] * n_c,
                             exact_feval=[True] * n_c)

    # --- Aquisition optimizer
    # optimizer for inner acquisition function
    acq_opt = GPyOpt.optimization.AcquisitionOptimizer(
        optimizer='lbfgs',
        space=space,
        model=model_f,
        model_c=model_c,
        NSGA_based=False,
        analytical_gradient_prediction=True)

    # --- Initial design
    # initial design
    initial_design = GPyOpt.experiment_design.initial_design(
        'latin', space, 40)  # 2*(input_d+1))

    # --- Utility function
    def prior_sample_generator(n_samples=1, seed=None):
        if seed is None:
            samples = np.random.dirichlet(np.ones((m, )), n_samples)
        else:
            random_state = np.random.RandomState(seed)
            samples = random_state.dirichlet(np.ones((m, )), n_samples)
        return samples

    def prior_density(x):
        assert x.shape[1] == m, "wrong dimension"
        output = np.zeros(x.shape[0])
        for i in range(len(output)):
            output[i] = dirichlet.pdf(x=x[i], alpha=np.ones((m, )))
        return output.reshape(-1)

    def U_func(parameter, y):
        return np.dot(parameter, y)

    def dU_func(parameter, y):
        return parameter

    ##### Utility
    n_samples = 5
    support = prior_sample_generator(
        n_samples=n_samples
    )  # generates the support to marginalise the parameters for acquisition function inside the optimisation process. E_theta[EI(x)]

    prob_dist = prior_density(
        support)  # generates the initial density given the the support
    prob_dist /= np.sum(prob_dist)
    parameter_distribution = ParameterDistribution(
        continuous=True, sample_generator=prior_sample_generator)

    U = Utility(func=U_func,
                dfunc=dU_func,
                parameter_dist=parameter_distribution,
                linear=True)

    # acquisition = HVI(model=model_f, model_c=model_c , alpha=alpha, space=space, optimizer = acq_opt)
    acquisition = AcquisitionUKG(model=model_f,
                                 model_c=model_c,
                                 alpha=alpha,
                                 space=space,
                                 optimizer=acq_opt,
                                 utility=U,
                                 true_func=f)
    last_step_acquisition = Last_Step(model_f=model_f,
                                      model_c=model_c,
                                      true_f=f,
                                      true_c=c,
                                      n_f=n_f,
                                      n_c=n_c,
                                      acquisition_optimiser=acq_opt,
                                      seed=rep,
                                      path=path)

    evaluator = GPyOpt.core.evaluators.Sequential(acquisition)
    # GPyOpt.core.evaluators.Sequential(last_step_acquisition)
    bo = BO(model_f,
            model_c,
            space,
            f,
            c,
            acquisition,
            evaluator,
            initial_design,
            ref_point=ref_point)

    max_iter = 25
    # print("Finished Initialization")
    X, Y, C, Opportunity_cost = bo.run_optimization(
        max_iter=max_iter,
        rep=rep,
        last_step_evaluator=last_step_acquisition,
        path=path,
        verbosity=True)
    print("Code Ended")

    print("X", X, "Y", Y, "C", C)