def part_1(max_iter):
    # Plot the function
    param = OrderedDict()
    param['x'] = ('cont', [-2, 2])
    param['y'] = ('cont', [-2, 2])

    # squared exponential kernel function
    plt.suptitle("Convergence Rate, True Optimum = 0")
    np.random.seed(20)
    plt.subplot(131)
    sqexp = squaredExponential()
    gp = GaussianProcess(sqexp)
    acq = Acquisition(mode='ExpectedImprovement')
    gpgo = GPGO(gp, acq, Part_1a.f, param, n_jobs=-1)
    gpgo.run(max_iter=max_iter)
    plot_convergence(gpgo, "Squared Exponential Kernel")

    # matern52 kernel function
    np.random.seed(20)
    plt.subplot(132)
    matern = matern52()
    gp = GaussianProcess(matern)
    acq = Acquisition(mode='ExpectedImprovement')
    gpgo = GPGO(gp, acq, Part_1a.f, param, n_jobs=-1)
    gpgo.run(max_iter=max_iter)
    plot_convergence(gpgo, "Matern52 Kernel")

    # rational quadratic kernel function
    np.random.seed(20)
    plt.subplot(133)
    ratq = rationalQuadratic()
    gp = GaussianProcess(ratq)
    acq = Acquisition(mode='ExpectedImprovement')
    gpgo = GPGO(gp, acq, Part_1a.f, param, n_jobs=-1)
    gpgo.run(max_iter=max_iter)
    plot_convergence(gpgo, "Rational Quadratic Kernel")
    plt.show()
Пример #2
0
import numpy as np
from pyGPGO.covfunc import squaredExponential, matern, matern32, matern52, \
                           gammaExponential, rationalQuadratic, expSine, dotProd

covfuncs = [
    squaredExponential(),
    matern(),
    matern32(),
    matern52(),
    gammaExponential(),
    rationalQuadratic(),
    expSine(),
    dotProd()
]

grad_enabled = [
    squaredExponential(),
    matern32(),
    matern52(),
    gammaExponential(),
    rationalQuadratic(),
    expSine()
]

# Some kernels do not have gradient computation enabled, such is the case
# of the generalised matérn kernel.
#
# All (but the dotProd kernel) have a characteristic length-scale l that
# we test for here.