Ejemplo n.º 1
0
def evaluateDataset(csv_path,
                    target_index,
                    problem,
                    model,
                    parameter_dict,
                    method='holdout',
                    seed=20,
                    max_iter=50):
    print('Now evaluating {}...'.format(csv_path))
    X, y = build(csv_path, target_index)

    wrapper = loss(model, X, y, method=method, problem=problem)

    print('Evaluating EI')
    np.random.seed(seed)
    sexp = squaredExponential()
    gp = GaussianProcess(sexp, optimize=True, usegrads=True)
    acq_ei = Acquisition(mode='ExpectedImprovement')
    gpgo_ei = GPGO(gp, acq_ei, wrapper.evaluateLoss, parameter_dict, n_jobs=1)
    gpgo_ei.run(max_iter=max_iter)

    # Also add UCB, beta = 0.5, beta = 1.5
    print('Evaluating UCB beta = 0.5')
    np.random.seed(seed)
    sexp = squaredExponential()
    gp = GaussianProcess(sexp, optimize=True, usegrads=True)
    acq_ucb = Acquisition(mode='UCB', beta=0.5)
    gpgo_ucb = GPGO(gp,
                    acq_ucb,
                    wrapper.evaluateLoss,
                    parameter_dict,
                    n_jobs=1)
    gpgo_ucb.run(max_iter=max_iter)

    print('Evaluating UCB beta = 1.5')
    np.random.seed(seed)
    sexp = squaredExponential()
    gp = GaussianProcess(sexp, optimize=True, usegrads=True)
    acq_ucb2 = Acquisition(mode='UCB', beta=1.5)
    gpgo_ucb2 = GPGO(gp,
                     acq_ucb2,
                     wrapper.evaluateLoss,
                     parameter_dict,
                     n_jobs=1)
    gpgo_ucb2.run(max_iter=max_iter)

    print('Evaluating random')
    np.random.seed(seed)
    r = evaluateRandom(gpgo_ei, wrapper.evaluateLoss, n_eval=max_iter + 1)
    r = cumMax(r)

    return np.array(gpgo_ei.history), np.array(gpgo_ucb.history), np.array(
        gpgo_ucb2.history), r
Ejemplo n.º 2
0
def test_GP():
    rng = np.random.RandomState(0)
    X = rng.uniform(0, 5, 20)[:, np.newaxis]
    y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])

    sexp = squaredExponential()
    gp = GaussianProcessMCMC(sexp, step=pm.Slice)
    gp.fit(X, y)
Ejemplo n.º 3
0
def test_tSP():
    rng = np.random.RandomState(0)
    X = rng.uniform(0, 5, 20)[:, np.newaxis]
    y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])

    sexp = squaredExponential()
    tsp = tStudentProcessMCMC(sexp, step=pm.Slice, niter=100)
    tsp.fit(X, y)
Ejemplo n.º 4
0
def test_GPGO_mcmc():
    np.random.seed(20)
    sexp = squaredExponential()
    gp = GaussianProcessMCMC(sexp, step=pm.Slice, niter=100)
    acq = Acquisition(mode='IntegratedExpectedImprovement')
    params = {'x': ('cont', (0, 1))}
    gpgo = GPGO(gp, acq, f, params)
    gpgo.run(max_iter=10)
    res = gpgo.getResult()[0]
    assert .7 < res['x'] < .8
Ejemplo n.º 5
0
def test_GPGO():
    np.random.seed(20)
    sexp = squaredExponential()
    gp = GaussianProcess(sexp)
    acq = Acquisition(mode='ExpectedImprovement')
    params = {'x': ('cont', (0, 1))}
    gpgo = GPGO(gp, acq, f, params)
    gpgo.run(max_iter=10)
    res = gpgo.getResult()[0]
    assert .6 < res['x'] < .8
Ejemplo n.º 6
0
def test_tSP_opt_nograd():
    rng = np.random.RandomState(0)
    X = rng.uniform(0, 5, 20)[:, np.newaxis]
    y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])

    sexp = squaredExponential()
    tsp = tStudentProcess(sexp, optimize=True)
    tsp.fit(X, y)
    params = tsp.getcovparams()

    assert 0.3 < params['l'] < 0.5
    assert 0.3 < params['sigmaf'] < 0.6
    assert 0.2 < params['sigman'] < 0.4
Ejemplo n.º 7
0
def test_GP_opt_grad():
    rng = np.random.RandomState(0)
    X = rng.uniform(0, 5, 20)[:, np.newaxis]
    y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])

    sexp = squaredExponential()
    gp = GaussianProcess(sexp, optimize=True, usegrads=True)
    gp.fit(X, y)

    params = gp.getcovparams()

    assert 0.36 < params['l'] < 0.37
    assert 0.39 < params['sigmaf'] < 0.41
    assert 0.29 < params['sigman'] < 0.3
Ejemplo n.º 8
0
def main():

    def f(x):
        return (np.sin(x))
    
    
    sexp = squaredExponential()
    gp = GaussianProcess(sexp)
    acq = Acquisition(mode='ExpectedImprovement')
    param = {'n_hidden_2': ('int', [80,120]),'aOp'}
    
    np.random.seed(23)
    gpgo = GPGO(gp, acq, Main_Loop, param)
    gpgo.run(max_iter=20)
    res = gpgo.getResult()[0]
    print res
Ejemplo n.º 9
0
def main2():

    sexp = squaredExponential()
    gp = GaussianProcess(sexp)
    acq = Acquisition(mode='ExpectedImprovement')
    param = {
        'r1': ('cont', (0, 1)),
        'r2': ('cont', (0, 1)),
        'r3': ('cont', (0, 1)),
        'r4': ('cont', (0, 1)),
        'r5': ('cont', (0, 1)),
        'r6': ('cont', (0, 1)),
        'r7': ('cont', (0, 1)),
        'r8': ('cont', (0, 1))
    }

    gpgo = GPGO(gp, acq, Main_Loop, param)
    gpgo.run(max_iter=200)
    res = gpgo.getResult()[0]
    print res
def part_1(max_iter):
    # Plot the function
    param = OrderedDict()
    param['x'] = ('cont', [-2, 2])
    param['y'] = ('cont', [-2, 2])

    # squared exponential kernel function
    plt.suptitle("Convergence Rate, True Optimum = 0")
    np.random.seed(20)
    plt.subplot(131)
    sqexp = squaredExponential()
    gp = GaussianProcess(sqexp)
    acq = Acquisition(mode='ExpectedImprovement')
    gpgo = GPGO(gp, acq, Part_1a.f, param, n_jobs=-1)
    gpgo.run(max_iter=max_iter)
    plot_convergence(gpgo, "Squared Exponential Kernel")

    # matern52 kernel function
    np.random.seed(20)
    plt.subplot(132)
    matern = matern52()
    gp = GaussianProcess(matern)
    acq = Acquisition(mode='ExpectedImprovement')
    gpgo = GPGO(gp, acq, Part_1a.f, param, n_jobs=-1)
    gpgo.run(max_iter=max_iter)
    plot_convergence(gpgo, "Matern52 Kernel")

    # rational quadratic kernel function
    np.random.seed(20)
    plt.subplot(133)
    ratq = rationalQuadratic()
    gp = GaussianProcess(ratq)
    acq = Acquisition(mode='ExpectedImprovement')
    gpgo = GPGO(gp, acq, Part_1a.f, param, n_jobs=-1)
    gpgo.run(max_iter=max_iter)
    plot_convergence(gpgo, "Rational Quadratic Kernel")
    plt.show()
def part_2(max_iter):
    acq_1 = Acquisition(mode='ExpectedImprovement')
    acq_2 = Acquisition(mode='ProbabilityImprovement')
    acq_3 = Acquisition(mode='UCB', beta=0.5)
    acq_4 = Acquisition(mode='UCB', beta=1.5)
    acq_list = [acq_1, acq_2, acq_3, acq_4]
    sqexp = squaredExponential()
    param = OrderedDict()
    param['x'] = ('cont', [-2, 2])
    param['y'] = ('cont', [-2, 2])
    new = True
    colors = ['green', 'red', 'orange', 'black']
    acq_titles = [
        'Expected improvement', 'Probability of Improvement',
        'GP-UCB, beta = .5', 'GP-UCB beta = 1.5'
    ]

    plt.suptitle('Acquisition Functions with Convergence Rates')
    idx = 0

    for index, acq in enumerate(acq_list):
        np.random.seed(200)
        gp = GaussianProcess(sqexp)
        gpgo = GPGO(gp, acq, Part_1a.f, param)
        gpgo.run(max_iter=max_iter)
        plt.subplot(4, 2, idx + 1)
        plot_acquisition(gpgo,
                         param,
                         index + 2,
                         colors,
                         acq_titles[index],
                         new=new)
        plt.subplot(4, 2, idx + 2)
        plot_convergence(gpgo, acq_titles[index])
        new = False
        idx = idx + 2
    plt.show()
Ejemplo n.º 12
0

np.random.seed(42)
initialPoints = 30
furtherEvaluations = 120

# defining a dictionary on "x"
param = {
    'C': ('cont', [0.1, 5]),
    'gamma': ('cont', [0.1, 10]),
    'coef0': ('cont', [0.1, 10])
}

# creating a GP surrogate model with a Squared Exponantial covariance function,
# aka kernel
sexp = squaredExponential()
sur_model_1 = GaussianProcess(sexp)
sur_model_2 = RandomForest()

# setting the acquisition function
acq_1 = Acquisition(mode="ExpectedImprovement")
acq_2 = Acquisition(mode="ProbabilityImprovement")
acq_3 = Acquisition(mode="UCB")

# creating an object Bayesian Optimization
gpgo_gaussian_model_1 = GPGO(sur_model_1, acq_1, compute_accuracy_SVC, param)
gpgo_gaussian_model_2 = GPGO(sur_model_1, acq_2, compute_accuracy_SVC, param)
gpgo_gaussian_model_3 = GPGO(sur_model_1, acq_3, compute_accuracy_SVC, param)

gpgo_random_forest_1 = GPGO(sur_model_2, acq_1, compute_accuracy_SVC, param)
gpgo_random_forest_2 = GPGO(sur_model_2, acq_2, compute_accuracy_SVC, param)
Ejemplo n.º 13
0
import numpy as np
from pyGPGO.covfunc import squaredExponential, matern, matern32, matern52, \
                           gammaExponential, rationalQuadratic, expSine, dotProd

covfuncs = [
    squaredExponential(),
    matern(),
    matern32(),
    matern52(),
    gammaExponential(),
    rationalQuadratic(),
    expSine(),
    dotProd()
]

grad_enabled = [
    squaredExponential(),
    matern32(),
    matern52(),
    gammaExponential(),
    rationalQuadratic(),
    expSine()
]

# Some kernels do not have gradient computation enabled, such is the case
# of the generalised matérn kernel.
#
# All (but the dotProd kernel) have a characteristic length-scale l that
# we test for here.

Ejemplo n.º 14
0
import numpy as np
from pyGPGO.covfunc import squaredExponential
from pyGPGO.acquisition import Acquisition
from pyGPGO.surrogates.GaussianProcess import GaussianProcess
from pyGPGO.GPGO import GPGO
from main import uhgs


def f(x):
    return (np.sin(x) - 2)


sexp = squaredExponential(sigman=100)
gp = GaussianProcess(sexp)
acq = Acquisition(mode='ExpectedImprovement')
param = {
    'minSol': ('int', [10, 50]),
    'maxSol': ('int', [60, 100]),
    'omega': ('int', [5, 500]),
    'muelite': ('cont', [0.5, 2]),
    'itDiv': ('cont', [0.1, 0.51])
}
np.random.seed(7)
gpgo = GPGO(gp, acq, uhgs, param, n_jobs=1)
gpgo.run(init_evals=6, max_iter=60)
Ejemplo n.º 15
0
         'lsigma_t':  ('cont', [-3, 1]),
         'l_corr':  ('cont', [10, 1000]),
         't_corr':  ('cont', [1, 20])}
for key, value in json.loads(args.range).items():
    param[key] = value
print("Prameter ranges as below:")
if not args.full:
    del param['l_corr']
    del param['t_corr']
print(param)

# preparation of data_manager
dm = data_manupulation.impute_shield_dm(100)
gene_df = pd.read_csv("data/gene_list/selected_cluster_gene_list.txt")
selected_gene_df = dm.select_gene_df(gene_df)
dm = data_manupulation.standard_dm(args.refnum)
# setting for BO
cov = squaredExponential()
if args.mcmc:
    gp = GaussianProcessMCMC(cov, niter=300, burnin=100, step=pm.Slice)
    acq = Acquisition(mode='IntegratedExpectedImprovement')
else:
    gp = GaussianProcess(cov, optimize=True, usegrads=True)
    acq = Acquisition(mode='ExpectedImprovement')
np.random.seed(100000)
gpgo = GPGO(gp, acq, ts_recovery_correlation, param)
gpgo.run(max_iter=args.boiter)
print(gpgo.getResult())
f = open(args.filepath, "w")
json.dump(gpgo.getResult(), f)
from pyGPGO.covfunc import squaredExponential
from pyGPGO.acquisition import Acquisition
from pyGPGO.surrogates.GaussianProcess import GaussianProcess
from pyGPGO.GPGO import GPGO

from tube_class import evaluate_tube_design__bayesian


def test_quadratic_function(radius, length, submergence):
    x = radius
    y = length
    z = submergence
    return -((x - 2.15)**2 + (y - 42.0)**2 + (z + 8.75)**2)


sq_exp = squaredExponential(l=3, sigman=0.0)
gp = GaussianProcess(sq_exp)
acq = Acquisition(mode='ExpectedImprovement')
design_parameters = {
    'radius': ('cont', [0.05, 3.0]),
    'length': ('cont', [20.0, 200.0]),
    'submergence': ('cont', [-12.0, 3.0])
}

np.random.seed(42)
gpgo = GPGO(gp, acq, evaluate_tube_design__bayesian, design_parameters)
gpgo.run(max_iter=40, init_evals=20)
optimal_design, optimal_power = gpgo.getResult()

print('Best design after {} iterations is {} with objective function value {}'.
      format(60, optimal_design, optimal_power))