def bayesian_search(xmat, y, theta, p, xinit, acq_func, bounds=None, constraints=None): """ Search best point to sample by maximizing acquisition function Args: xmat (numpy.ndarray) : the data points so far, shape = (n, k) y (numpy.ndarray) : y, shape=(n, 1) theta (numpy.ndarray) : vector of theta params, one by dim, shape = (k, ) p (numpy.ndarray) : powers used to compute the distance, one by dim, shape = (k, ) xinit (numpy.ndarray) : initial value for acquisition maximization, shape = (k, ) acq_func : Instance of one of the classes in Acquisition_Functions.py file bounds (tuple) : bounds for acquisition maximization in scipy Returns: numpy.ndarray. The new point to sample from given the data so far """ R = exp_kernel.kernel_mat(xmat, theta, p) Rinv = cho_inv.cholesky_inv(R) beta_hat = pred.beta_est(y, Rinv) opti_result = am.opti_acq_func(xmat, y, Rinv, beta_hat, theta, p, xinit, acq_func, bounds) return opti_result
def prediction_function_krigging(xnew, y, xmat, theta_vec, p_vec): R = exp_kernel.kernel_mat(xmat, theta_vec, p_vec) Rinv = cho_inv.cholesky_inv(R) beta_hat = pred.beta_est(y, Rinv) rx = exp_kernel.kernel_rx(xmat, xnew, theta_vec, p_vec) y_hat = pred.y_est(rx, y, Rinv, beta_hat) return y_hat
def bayesian_opti_plot_1d(xmat, y, n_it, theta, p, acq_func, objective_func, bounds=None): for i in range(0, n_it): print(i) xinit = initial.xinit_inbounds(bounds) if acq_func.name == "EI": acq_func.set_fmin(np.min(y)) opti_result = bayesian_search(xmat, y, theta, p, xinit, acq_func, bounds) xnew = opti_result.x R = exp_kernel.kernel_mat(xmat, theta, p) Rinv = cho_inv.cholesky_inv(R) beta_hat = pred.beta_est(y, Rinv) axes = viz.bayes_opti_plot_1d(xmat, y, Rinv, beta_hat, theta, p, bounds[0], grid_size=1000, acq_func=acq_func, objective_func=objective_func) y_acq = -opti_result.fun axes[1].vlines(xnew[0], 0, y_acq, linestyles='dashed', colors='r', linewidth=2) plt.show() xmat, y = evaluate_add(xmat, xnew, y, objective_func)
def bayesian_optimization(n, nb_it, p_vec, theta_vec, function2Bmin): # Ce serait bien de faire une fonction pour une iteration puis # de boucler en utilisant cette fonction "atomique" cf infra """ Function for bayesian optimization with fixed p and theta Args: n (integer) : number of initial sampling observations nb_it (integer) : number of iteration of sampling theta_vec (numpy.ndarray) : vector of theta params, one by dim, shape = (2, ) p_vec (numpy.ndarray) : powers used to compute the distance, one by dim, shape = (2, ) Returns: float. Minimum evaluation of the fonction to be minimized numpy.ndarray Point minimizing the function to be minimized """ xtest = 5 * np.random.rand(n, 2) y = np.zeros((n, 1)) for i in range(0, n): y[i, 0] = test_func.mystery_vec(xtest[i, :]) for it in range(0, nb_it): R = exp_kernel.kernel_mat(xtest, theta_vec, p_vec) Rinv = cho_inv.cholesky_inv(R) beta = pred.beta_est(y, Rinv) xinit = 5 * np.random.rand(1, 2) optiEI = af.max_EI(xtest, y, Rinv, beta, theta_vec, p_vec, xinit, function2Bmin) xnew = optiEI["x"].reshape(1, 2) ynew = np.array(function2Bmin(xnew.reshape(2, 1))).reshape(1, 1) xtest = np.concatenate((xtest, xnew), axis=0) y = np.concatenate((y, ynew)) print(it) return min(y), y, xtest[np.argmin(y), ], xtest
def log_likelihood(xmat, y, params_vec): """ Log likelihood Args : xmat (numpy.ndarray) : shape = (n, k) y (numpy.ndarray) : shape = (n, 1) params_vec (numpy.ndarray) : shape = (2*k, ) Returns : float. log likelihood """ theta_vec, p_vec = params_to_vec(params_vec) R = exp_kernel.kernel_mat(xmat, theta_vec, p_vec) n = R.shape[0] Rinv = cho_inv.cholesky_inv(R) detR = np.linalg.det(R) hat_sigz_sqr = hat_sigmaz_sqr_mle(y, Rinv) # print("Theta vec" + str(theta_vec)) # print("p_vec" + str(p_vec)) # print("sigma " + str(hat_sigz_sqr)) # print("Det " + str(detR)) return -0.5 * (n * math.log(hat_sigz_sqr) + math.log(detR))
opti = max_llk.max_log_likelihood( xmat, y, params_init, fixed_p=False, mins_list=[0.01, 0.01, 0.1, 0.1], maxs_list=[None, None, 1.99, 1.99]) print(opti) theta_vec = opti.x[0:d] p_vec = opti.x[d:] # Plot of initial acquisition function in 2d if plot_acq_2d and (d == 2): # Computation of the necessaries quantities R = exp_kernel.kernel_mat(xmat, theta_vec, p_vec) Rinv = cho_inv.cholesky_inv(R) beta = pred.beta_est(y, Rinv) # Plot acq_func1 viz.plot_acq_func_2d(xmat, y, Rinv, beta, theta_vec, p_vec, bounds, (100, 100), acq_func1) # Plot acq_func2 viz.plot_acq_func_2d(xmat, y, Rinv,