Exemple #1
0
    fig = plt.figure()
    a = np.array([-gpgo._acqWrapper(np.atleast_1d(x))
                  for x in x_test]).flatten()
    r = fig.add_subplot(1, 1, 1)
    r.set_title('Acquisition function')
    plt.plot(x_test, a, color='green')
    gpgo._optimizeAcq(method='L-BFGS-B', n_start=25)
    plt.axvline(x=gpgo.best, color='black', label='Found optima')
    plt.legend(loc=0)
    plt.tight_layout()
    plt.show()


if __name__ == '__main__':
    np.random.seed(321)

    def f(x):
        return (np.sin(x))

    sexp = squaredExponential()
    gp = GaussianProcessMCMC(sexp, step=pm.Slice)
    acq = Acquisition(mode='IntegratedExpectedImprovement')
    param = {'x': ('cont', [0, 2 * np.pi])}

    gpgo = GPGO(gp, acq, f, param, n_jobs=-1)
    gpgo._firstRun()

    for i in range(6):
        plotGPGO(gpgo, param)
        gpgo.updateGP()
Exemple #2
0
        # Load binary truths
        binary_truth = np.loadtxt(
            "./data/" + lang + "/semeval2020_ulscd_" + lang[:3] +
            "/truth/binary.txt",
            dtype=str,
            delimiter="\t",
        )
        # Creating a GP surrogate model with a Squared Exponantial
        # covariance function, aka kernel
        sexp = squaredExponential()
        sur_model = GaussianProcess(sexp)
        fitness = get_fitness_for_automl(model1, model2, binary_truth, logger)
        # setting the acquisition function
        acq = Acquisition(mode="ExpectedImprovement")

        # creating an object Bayesian Optimization
        bo = GPGO(sur_model, acq, fitness, param, n_jobs=4)
        bo._firstRun = functools.partial(myFirstRun, bo)
        bo.updateGP = functools.partial(myUpdateGP, bo)
        bo._firstRun(init_rand_configs=init_rand_configs)
        bo.logger._printInit(bo)

        bo.run(furtherEvaluations, resume=True)
        best = bo.getResult()
        logger.info("BEST PARAMETERS: " +
                    ", ".join([k + ": " + str(v)
                               for k, v in best[0].items()]) + ", ACCU: " +
                    str(best[1]))
        logger.info("OPTIMIZATION HISTORY")
        logger.info(pprint.pformat(bo.history))
    from pyGPGO.acquisition import Acquisition
    from pyGPGO.covfunc import squaredExponential
    from pyGPGO.GPGO import GPGO

    if __name__ == '__main__':
        sexp = squaredExponential()
        gp = tStudentProcessMCMC(sexp, step=pm.Slice)

        def f(x):
            return np.sin(x)

        np.random.seed(200)
        param = {'x': ('cont', [0, 6])}
        acq = Acquisition(mode='IntegratedExpectedImprovement')
        gpgo = GPGO(gp, acq, f, param)
        gpgo._firstRun(n_eval=7)

        plt.figure()
        plt.subplot(2, 1, 1)

        Z = np.linspace(0, 6, 100)[:, None]
        post_mean, post_var = gpgo.GP.predict(Z, return_std=True, nsamples=200)
        for i in range(200):
            plt.plot(Z.flatten(), post_mean[i], linewidth=0.4)

        plt.plot(gpgo.GP.X.flatten(),
                 gpgo.GP.y,
                 'X',
                 label='Sampled data',
                 markersize=10,
                 color='red')
Exemple #4
0

if __name__ == '__main__':

    def f(x):
        return (np.sin(x))

    acq_1 = Acquisition(mode='ExpectedImprovement')
    acq_2 = Acquisition(mode='ProbabilityImprovement')
    acq_3 = Acquisition(mode='UCB', beta=0.5)
    acq_4 = Acquisition(mode='UCB', beta=1.5)
    acq_list = [acq_1, acq_2, acq_3, acq_4]
    sexp = squaredExponential()
    param = {'x': ('cont', [0, 2 * np.pi])}
    new = True
    colors = ['green', 'red', 'orange', 'black']
    acq_titles = [
        r'Expected improvement', r'Probability of Improvement',
        r'GP-UCB $\beta = .5$', r'GP-UCB $\beta = 1.5$'
    ]

    for index, acq in enumerate(acq_list):
        np.random.seed(200)
        gp = GaussianProcess(sexp)
        gpgo = GPGO(gp, acq, f, param)
        gpgo._firstRun(n_eval=3)
        plotGPGO(gpgo, param, index=index + 2, new=new)
        new = False

    plt.show()