fig = plt.figure() a = np.array([-gpgo._acqWrapper(np.atleast_1d(x)) for x in x_test]).flatten() r = fig.add_subplot(1, 1, 1) r.set_title('Acquisition function') plt.plot(x_test, a, color='green') gpgo._optimizeAcq(method='L-BFGS-B', n_start=25) plt.axvline(x=gpgo.best, color='black', label='Found optima') plt.legend(loc=0) plt.tight_layout() plt.show() if __name__ == '__main__': np.random.seed(321) def f(x): return (np.sin(x)) sexp = squaredExponential() gp = GaussianProcessMCMC(sexp, step=pm.Slice) acq = Acquisition(mode='IntegratedExpectedImprovement') param = {'x': ('cont', [0, 2 * np.pi])} gpgo = GPGO(gp, acq, f, param, n_jobs=-1) gpgo._firstRun() for i in range(6): plotGPGO(gpgo, param) gpgo.updateGP()
# Load binary truths binary_truth = np.loadtxt( "./data/" + lang + "/semeval2020_ulscd_" + lang[:3] + "/truth/binary.txt", dtype=str, delimiter="\t", ) # Creating a GP surrogate model with a Squared Exponantial # covariance function, aka kernel sexp = squaredExponential() sur_model = GaussianProcess(sexp) fitness = get_fitness_for_automl(model1, model2, binary_truth, logger) # setting the acquisition function acq = Acquisition(mode="ExpectedImprovement") # creating an object Bayesian Optimization bo = GPGO(sur_model, acq, fitness, param, n_jobs=4) bo._firstRun = functools.partial(myFirstRun, bo) bo.updateGP = functools.partial(myUpdateGP, bo) bo._firstRun(init_rand_configs=init_rand_configs) bo.logger._printInit(bo) bo.run(furtherEvaluations, resume=True) best = bo.getResult() logger.info("BEST PARAMETERS: " + ", ".join([k + ": " + str(v) for k, v in best[0].items()]) + ", ACCU: " + str(best[1])) logger.info("OPTIMIZATION HISTORY") logger.info(pprint.pformat(bo.history))