Example #1
0
def load_gp(fname_base):
    kernel=None
    with open(fname_base+".json",'r') as f:
        my_json = json.load(f)
    my_X = np.loadtxt(fname_base+"_X.dat")
    my_y = np.loadtxt(fname_base+"_y.dat")
    my_alpha = np.loadtxt(fname_base+"_alpha.dat")
    dict_params = my_json['kernel_params']
    eval("kernel = "+my_json['kernel'])
    gp = GaussianProcessRegressor(kernel=kernel,n_restarts_optimizer=0)
    gp.kernel_ = kernel
    dict_params_eval = {}
    for name in dict_params:
        if not('length' in name   or 'constant' in name):
            continue
        if name =="k2__k2__length_scale":
            one_space = ' '.join(dict_params[name].split())
            dict_params_eval[name] = eval(one_space.replace(' ',','))
        else:
            dict_params_eval[name] = eval(dict_params[name])
    gp.kernel_.set_params(dict_params_eval)
    gp.X_train_ = my_X
    gp.y_train_ = my_y
    gp.alpha_ = my_alpha
    gp._y_train_std = float(my_json['y_train_std'])
    gp._y_train_mean = float(my_json['y_train_mean'])
    return gp

                            
Example #2
0
    def __init__(self, gamma, beta, nugget, kernelName, k_lambda, xTrain,
                 yTrain):
        """
        Create a new GaussianProcess Object
        gamma: Hyperparameter
        beta: Hyperparameter
        k_lambda: Hyperparameter
        nugget: The noise hyperparameter
        kernelName: The name of the covariance kernel
        xTrain: Numpy array containing x training values
        yTrain: Numpy array containing y training values

        """
        self.xTrain = xTrain
        self.yTrain = yTrain
        self.k_lambda = k_lambda
        self.beta = beta
        self.gamma = gamma
        self.nugget = nugget
        self.kernelName = kernelName

        # Setup the regressor as if gp.fit had been called
        # See https://github.com/scikit-learn/scikit-learn/master/sklearn/gaussian_process/gpr.py
        kernel = self._getKernel()
        gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=0)
        gp.K = kernel(xTrain)
        gp.X_train_ = xTrain
        gp.y_train_ = yTrain
        gp.L_ = cholesky(gp.K, lower=True)
        gp.alpha_ = cho_solve((gp.L_, True), yTrain)
        gp.fit(xTrain, yTrain)
        gp.kernel_ = kernel
        self.gp = gp
        self.kernel = kernel

        # Calculate the matrix inverses once. Save time later
        # This is only used for own own implimentation of the scoring engine
        self.L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
        self.K_inv = L_inv.dot(L_inv.T)