Example #1
0
def prediction_function_krigging(xnew, y, xmat, theta_vec, p_vec):
    R = exp_kernel.kernel_mat(xmat, theta_vec, p_vec)
    Rinv = cho_inv.cholesky_inv(R)
    beta_hat = pred.beta_est(y, Rinv)
    rx = exp_kernel.kernel_rx(xmat, xnew, theta_vec, p_vec)
    y_hat = pred.y_est(rx, y, Rinv, beta_hat)
    return y_hat
def complete_acq_func(xmat, xnew, y, Rinv, beta_hat, theta, p, acq_func):
    """
    Generate acquisition function for optimization with possibility
    to change easily of acquisition function

    Args:
        xmat (numpy.ndarray) : the data points, shape = (n, k)
        xnew (numpy.ndarray) : the new data point, shape = (k, )
        y (numpy.ndarray) : y, shape=(n, 1)
        Rinv (numpy.ndarray) : Inverse of R, shape=(n, n)
        beta_hat(float) : estimation of beta on the data of xmat
        theta (numpy.ndarray) : vector of theta params, one by dim, shape = (k, )
        p (numpy.ndarray) : powers used to compute the distance, one by dim, shape = (k, )
        acq_func : Instance of one of the classes in Acquisition_Functions.py file

    Returns:
        scipy.optimize.optimize.OptimizeResult. The result of optimization
    """
    rx = exp_kernel.kernel_rx(xmat, xnew, theta, p)
    hat_y = pred.y_est(rx, y, Rinv, beta_hat)
    hat_sigma = np.power(pred.sigma_sqr_est(y, rx, Rinv, beta_hat), 0.5)
    # print("xnew")
    # print(xnew)
    if acq_func.name == "EI":
        fmin = np.min(y)
        acq_func.set_fmin(fmin)
    return acq_func.evaluate(hat_y, hat_sigma)
Example #3
0
def EI(xnew, xtest, y, Rinv, beta_hat, theta_vec, p_vec, function2Bmin):

    # Si on prend ta logique de code, on va devoir recoder toutes les
    # Fonctions pour chaque fonction d'acquisition, clairement sous optimal
    # Cf mon fichier acquisition max
    # Reprend sa logique quand tu t'y remettras

    f_min = fmin(y)

    # Ici ce n'est pas la bonne formule pour y_hat puisque
    # meme si ce n'est pas vraiment le cas ici, l'article traite
    # de la minimization de "expensive" functions
    # Donc c'est un peu de la triche d'evaluer la fonction
    # A chaque nouveau point que l'on se propose...
    # Le role de l'expected improvement est justement de savoir ou on
    # va evaluer la fonction pour eviter l'evaluation explicite
    # Donc il faut reprendre la formule de l'article pour l'estimation de
    # y(xnew)
    y_hat = function2Bmin(xnew)
    rx = exp_kernel.kernel_rx(xtest, xnew, theta_vec, p_vec)
    sigma_hat = math.sqrt(pred.sigma_sqr_est(y, rx, Rinv, beta_hat))
    if sigma_hat == 0:
        EI = 0
    else:
        z = (f_min - y_hat) / sigma_hat
        EI = float((f_min - y_hat) * stats.norm.cdf(z) +
                   sigma_hat * stats.norm.pdf(z))
    print(EI)
    print(type(EI))
    return EI
Example #4
0
def pred_means_stds(x_grid, xmat, y, Rinv, beta_hat, theta, p):
    gp_means = np.zeros(shape=x_grid.shape)
    gp_stds = np.zeros(shape=x_grid.shape)
    for i in range(0, x_grid.shape[0]):
        rx = exp_kernel.kernel_rx(xmat, np.array([x_grid[i]]), theta, p)
        y_hat = y_est(rx, y, Rinv, beta_hat)
        sig_hat = np.sqrt(sigma_sqr_est(y, rx, Rinv, beta_hat))
        gp_means[i] = y_hat
        gp_stds[i] = sig_hat
    return gp_means, gp_stds
Example #5
0
import acquisition_functions as af
import bayesian_optimization as bo
import prediction_formulae as pred 
import math


#Test for gp_tools
n = 10
xtest = np.random.rand(n, 2)
theta_vec = [1, 1]
p_vec = [1, 1]
#R = gp_tools.kernel_mat_2d(xtest, theta_vec, p_vec)
R = exp_kernel.kernel_mat(xtest, theta_vec, p_vec)
print(R)
xnew = np.random.rand(2)
rx = exp_kernel.kernel_rx(xtest, xnew, theta_vec, p_vec)
print(rx)
image = test_func.mystery_vec(xnew)


#Test for test_func
y = np.zeros((n, 1))
for i in range(0, n):
	y[i, 0] = test_func.mystery_vec(xtest[i, :])
print(y)



#Test for cho_inv
#Rinv = cho_inv.cholesky_inv(R)
#print(np.dot(Rinv, R))