Exemple #1
0
    def run(self, save_path, init_design_size=1, n_iter=24, plot=False, likelihood_uncert=1.,design = None):
        self.save_path = save_path
        self.iter = 0
        
        self.X = [] + self.init_X
        self.Y = [] + self.init_Y
        
        domain = np.sum(self.domain)

#        if self.first_tries is not None:
#            Y = [self.objective(x).flatten() for x in self.first_tries]
#            self.Y = self.Y + Y
#            self.X = self.X + list(self.first_tries)

        if init_design_size > 0:
            if design == 'latin':
                design = LatinHyperCube(init_design_size,domain)
            if design == 'fac':
                design = FactorialDesign(2, domain)
            X = list(design.generate())
            Y = [self.objective(x).flatten() for x in X]
            self.Y = self.Y + Y
            self.X = self.X + X
        
        
        self.X, self.Y = np.stack(self.X,axis=0), np.stack(self.Y,axis=0)
        
        u = np.concatenate([self.X,self.Y],axis=1)
        u, idx = np.unique(u,return_index=True,axis=0)
        self.X = list(self.X[idx,:])
        self.Y = list(self.Y[idx,:])
        
        self.burnin = len(self.X)
        
        logging.warning("Beginnig search")
#         logging.warning("Initial solutions\n{}".format(list(zip(self.X, self.Y))))
        with gp.defer_build():
            kern = gp.kernels.Matern52(domain.size, ARD=True)# + gp.kernels.White(domain.size)
            m = gp.models.GPR(np.stack(self.X,axis=0), np.stack(self.Y,axis=0), kern)
            lik_var = log_normal_solve(likelihood_uncert**2, 0.5*likelihood_uncert**2)
            m.likelihood.variance = likelihood_uncert**2#np.exp(lik_var[0])
            m.likelihood.variance.prior = gp.priors.LogNormal(lik_var[0], lik_var[1]**2)
            m.likelihood.variance.trainable = False
            m.compile()
        self.ei = MomentGeneratingFunctionImprovement(m, self.t)
        opt = optim.StagedOptimizer([optim.MCOptimizer(domain, 5000), optim.SciPyOptimizer(domain)])
        optimizer = BayesianOptimizer(domain, self.ei, optimizer=opt, hyper_draws=1)

        #with optimizer.silent():
        result = optimizer.optimize(self.objective, n_iter=n_iter)
        logging.warning(result)
        if plot:
            self.plot_results()
        result = self.get_kwargs(result.x)
        self.print_top_k(5)
        return result
Exemple #2
0
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(x):
        y = problem.evaluate(x)
        return np.array([[y['objs'][0]]])

    def constraint_function(x):
        y = problem.evaluate(x)
        return np.array([[y['constraints'][0]]])

    # random seed
    np.random.seed(seed)

    # Initial evaluations
    X_init = LatinHyperCube(initial_runs, domain).generate()
    # X_init = RandomDesign(initial_runs, domain).generate()
    Y_init = np.vstack(
        [objective_function(X_init[i, :]) for i in range(X_init.shape[0])])
    C_init = np.vstack(
        [constraint_function(X_init[i, :]) for i in range(X_init.shape[0])])

    # Use standard Gaussian process Regression
    model = gpflow.gpr.GPR(X_init, Y_init,
                           gpflow.kernels.Matern52(domain.size, ARD=True))
    model.kern.lengthscales.transform = gpflow.transforms.Log1pe(1e-3)

    constraint_model = gpflow.gpr.GPR(
        X_init, C_init, gpflow.kernels.Matern52(domain.size, ARD=True))
    constraint_model.kern.lengthscales.transform = gpflow.transforms.Log1pe(
        1e-3)
    constraint_model.likelihood.variance = 0.01
    constraint_model.likelihood.variance.prior = gpflow.priors.Gamma(
        1. / 4., 1.0)

    # Now create the Bayesian Optimizer
    ei = ExpectedImprovement(model)
    pof = ProbabilityOfFeasibility(constraint_model)
    joint = ei * pof
    acquisition_opt = StagedOptimizer(
        [MCOptimizer(domain, 400),
         SciPyOptimizer(domain)])
    optimizer = BayesianOptimizer_modified(domain,
                                           joint,
                                           optimizer=acquisition_opt,
                                           verbose=True)

    # Run the Bayesian optimization for (max_runs-init_num) iterations
    result = optimizer.optimize([objective_function, constraint_function],
                                n_iter=max_runs)

    # Save result
    X, Y = optimizer.acquisition.data

    perf_list = [_[0] for _ in Y]
    time_list = [0.] * initial_runs + optimizer.time_list

    return X, perf_list, time_list
Exemple #3
0
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(x):
        y = problem.evaluate(x)
        return np.array([[y]])

    # random seed
    np.random.seed(seed)

    # Initial evaluations
    if init_strategy == 'latin':
        X_init = LatinHyperCube(initial_runs, domain).generate()
    elif init_strategy == 'random':
        X_init = RandomDesign(initial_runs, domain).generate()
    else:
        raise ValueError('Unknown init_strategy: %s' % (init_strategy,))
    Y_init = np.vstack([objective_function(X_init[i, :]) for i in range(X_init.shape[0])])

    # Use standard Gaussian process Regression
    model = gpflow.gpr.GPR(X_init, Y_init, gpflow.kernels.Matern52(domain.size, ARD=True))
    model.kern.lengthscales.transform = gpflow.transforms.Log1pe(1e-3)

    # Now create the Bayesian Optimizer
    alpha = ExpectedImprovement(model)
    acquisition_opt = StagedOptimizer([MCOptimizer(domain, optimizer_mc_times),
                                       SciPyOptimizer(domain)])
    optimizer = BayesianOptimizer_modified(domain, alpha, optimizer=acquisition_opt, verbose=True)

    # Run the Bayesian optimization for (max_runs-init_num) iterations
    result = optimizer.optimize(objective_function, n_iter=max_runs-initial_runs)

    # Save result
    X, Y = optimizer.acquisition.data
    perf_list = Y.reshape(-1).tolist()
    time_list = [0.] * initial_runs + optimizer.time_list

    return X, perf_list, time_list
Exemple #4
0
    X = np.atleast_2d(X)
    return np.sum(np.square(X), axis=1)[:, None]


domain = ContinuousParameter('x1', -2, 2) + ContinuousParameter('x2', -1, 2)
domain

######################################################################################

import GPflow as gpflow
from gpflowopt.bo import BayesianOptimizer
from gpflowopt.design import LatinHyperCube
from gpflowopt.acquisition import ExpectedImprovement
from gpflowopt.optim import SciPyOptimizer

# Use standard Gaussian process Regression
lhd = LatinHyperCube(21, domain)
X = lhd.generate()
Y = fx(X)
model = gpflow.gpr.GPR(X, Y, gpflow.kernels.Matern52(2, ARD=True))
model.kern.lengthscales.transform = gpflow.transforms.Log1pe(1e-3)

# Now create the Bayesian Optimizer
alpha = ExpectedImprovement(model)
optimizer = BayesianOptimizer(domain, alpha)

# Run the Bayesian optimization
with optimizer.silent():
    r = optimizer.optimize(fx, n_iter=15)

print(r)
Exemple #5
0
import numpy as np
import sys
import os
from gpflowopt.domain import ContinuousParameter
import FUNC as FUNC
domain = ContinuousParameter('x1', 0, 30) + \
         ContinuousParameter('x2', -30, 0)

import gpflow
from gpflowopt.bo import BayesianOptimizer
from gpflowopt.design import LatinHyperCube
from gpflowopt.acquisition import ExpectedImprovement
from gpflowopt.optim import SciPyOptimizer, StagedOptimizer, MCOptimizer

# Use standard Gaussian process Regression
lhd = LatinHyperCube(int(sys.argv[1]), domain)
X = lhd.generate()

np.savetxt('X_start.dat', X)
Exemple #6
0
def calcInitialMLPriority(queue,
                          stat,
                          features=['mass', 'Ecoh', 'EN', 'IP'],
                          N_init=50,
                          stable_limit=0.05):

    allmats = getComposition(queue, 0)

    files_allmats = getFile(queue, 0)

    elref = set(['mass', 'Ecoh', 'EN', 'IP'])

    elfeatures = set(features).intersection(elref)

    allmats = addElemental(allmats, elfeatures)

    allfeats = [
        x for x in allmats.columns
        if ''.join([i for i in x if not i.isdigit()]) in features
    ]

    #apply pca to all materials and to train set
    pca = PCA(n_components=8)
    train_means1 = np.mean(allmats[allfeats].values, axis=0)
    train_stds1 = np.std(allmats[allfeats].values, axis=0)

    throwinds = np.where(train_stds1 == 0)[0]

    transf = np.delete(allmats[allfeats].values, throwinds, axis=1)
    train_means1 = np.delete(train_means1, throwinds)
    train_stds1 = np.delete(train_stds1, throwinds)

    X_all = pca.fit_transform((transf - train_means1) / train_stds1)

    train_means = np.mean(X_all, axis=0)
    train_stds = np.std(X_all, axis=0)

    X = (X_all - train_means) / train_stds

    domain = gpflowopt.domain.ContinuousParameter('x1', min(X[:, 0]),
                                                  max(X[:, 0]))

    for i in np.arange(1, X.shape[1]):
        domain += gpflowopt.domain.ContinuousParameter('x' + str(i + 1),
                                                       min(X[:, i]),
                                                       max(X[:, i]))

    design = LatinHyperCube(N_init, domain)

    #X0 is the intial sampling plan in continuous space
    X0 = design.generate()

    indices = []

    for x0 in X0:
        for j in range(X.shape[0]):
            index_new = np.linalg.norm(X - x0, axis=1).argsort()[j]
            if index_new not in indices:
                indices.append(index_new)
                break

    priority = X.shape[0] * np.ones((len(indices)), dtype=int)

    priority = pd.DataFrame({
        'id': allmats.id.iloc[indices],
        'priority': priority
    })

    print("priorities of initial sampling plan are set")
    return priority
Exemple #7
0
pop_size = 50;
F = 0.75;
max_iter = 300;
max_iter_bayesian = 200;
CR = 0.5;

num_of_runs_to_eval = 10
run_res_DE_crit = list()
run_res_BO = list()
run_res_actor_critic = list()



############# PERFORMANCE EVALUATION FOR BAYESIAN OPTIMIZATION ################ 
for run in range(4):
    lhd = LatinHyperCube(pop_size, domain)
    X = lhd.generate()
    Y = fname_bayes(X)
    model = gpflow.gpr.GPR(X, Y, gpflow.kernels.Matern52(2, ARD=True))
    model.kern.lengthscales.transform = gpflow.transforms.Log1pe(1e-3)
    # Now create the Bayesian Optimizer
    alpha = ExpectedImprovement(model)
    
    acquisition_opt = StagedOptimizer([MCOptimizer(domain, 200),
                                       SciPyOptimizer(domain)])
    
    optimizer = BayesianOptimizer(domain, alpha, optimizer=acquisition_opt, verbose=True)
    best_iter = np.zeros(max_iter)
    # Run the Bayesian optimization
    for i in range(max_iter-150):
        r = optimizer.optimize(fname_bayes, n_iter=1)