예제 #1
0
def compute_gpy_model_rmse(model: GP, x_test: np.ndarray,
                           y_test: np.ndarray) -> float:
    """RMSE of a GPy model.

    :param model:
    :param x_test:
    :param y_test:
    :return:
    """
    mean, var = model.predict(x_test)
    y_pred = mean
    return np.sqrt(mean_squared_error(y_test, y_pred))
예제 #2
0
    def __init__(self, X: np.ndarray, Y: np.ndarray, kernel: GPy.kern.Kern, n_fidelities: int, mf_type: str,
                 likelihood: GPy.likelihoods.Likelihood=None):
        """

        :param X: Training data features with fidelity input appended as last column
        :param Y: Training data targets
        :param kernel: Multi-fidelity kernel
        :param n_fidelities: Number of fidelities in problem
        :param likelihood: GPy likelihood object.
                           Defaults to MixedNoise which has different noise levels for each fidelity
        """

        # Input checks
        if not isinstance(X, np.ndarray):
            raise ValueError('X should be an array')

        if not isinstance(Y, np.ndarray):
            raise ValueError('Y should be an array')

        if X.ndim != 2:
            raise ValueError('X should be 2d')

        if Y.ndim != 2:
            raise ValueError('Y should be 2d')

        if np.any(X[:, -1] >= n_fidelities):
            raise ValueError('One or more points has a higher fidelity index than number of fidelities')

        # Make default likelihood as different noise for each fidelity
        if likelihood is None:
            likelihood = GPy.likelihoods.mixed_noise.MixedNoise(
                [GPy.likelihoods.Gaussian(variance=1.) for _ in range(n_fidelities)])
        y_metadata = {'output_index': X[:, -1].astype(int)}

        if mf_type == 'autoregressive':
            GP(X, Y, kernel, likelihood)
        elif mf_type == 'recursive':
            multiGP(X=X, Y=Y, kernel=kernel, Y_metadata=y_metadata)
        else:
            raise ValueError('mf_type must be autoregressive or recursive')
예제 #3
0
 def log_likelihood(self):
     return GP.log_likelihood(self)
예제 #4
0
 def _log_likelihood_gradients(self):
     dL_df = self.kern.dK_dX(self.dL_dK, self.X)
     dL_dtheta = self.mapping.df_dtheta(dL_df, self.likelihood.Y)
     return np.hstack(
         (dL_dtheta.flatten(), GP._log_likelihood_gradients(self)))
예제 #5
0
 def _set_params(self, x):
     self.mapping._set_params(x[:self.mapping.num_params])
     self.X = self.mapping.f(self.likelihood.Y)
     GP._set_params(self, x[self.mapping.num_params:])
예제 #6
0
 def _get_params(self):
     return np.hstack((self.mapping._get_params(), GP._get_params(self)))
예제 #7
0
 def _get_param_names(self):
     return self.mapping._get_param_names() + GP._get_param_names(self)