Example #1
0
def fit_gp_hyperparam(x_train, y_train, x_test):
    k = gptools.SquaredExponentialKernel(param_bounds=[(0, 1e3), (0, 100)])
    gp = gptools.GaussianProcess(k)
    gp.add_data(x_train, y_train)
    gp.optimize_hyperparameters()
    y_star, err_y_star = gp.predict(x_test)

    return y_star, err_y_star
Example #2
0
    def test_fit_GP_SN1999em(self):
        # add data
        dm = -29.38  # D = 7.5e6 pc
        # dm = -30.4  # D = 12.e6 pc
        curves = sn1999em.read_curves()
        lc = curves.get('V')
        lc.mshift = dm
        t = lc.Time
        y = lc.Mag
        yerr = lc.Err

        # k = gptools.SquaredExponentialKernel()
        # gp = gptools.GaussianProcess(k)
        # k = gptools.SquaredExponentialKernel(param_bounds=[(0, 1e3), (0, 100)])
        k = gptools.SquaredExponentialKernel(param_bounds=[(0., max(np.abs(y))),
                                                           (0, np.std(t))])
        gp = gptools.GaussianProcess(k, mu=gptools.LinearMeanFunction())

        gp.add_data(t, y, err_y=yerr)

        is_mcmc = True
        is_mcmc = False
        if is_mcmc:
            out = gp.predict(t, use_MCMC=True, full_MCMC=True,
                             return_std=False,
                             num_proc=0,
                             nsamp=200,
                             plot_posterior=True,
                             plot_chains=False,
                             burn=100,
                             thin=1)

        else:
            gp.optimize_hyperparameters(verbose=True)
            out = gp.predict(t, use_MCMC=False)

        y_star, err_y_star = out

        # gp.optimize_hyperparameters()
        # y_star, err_y_star = gp.predict(t)

        fig = plt.figure()
        ax = fig.add_axes((0.1, 0.3, 0.8, 0.65))
        ax.invert_yaxis()

        ax.plot(t, y, color='blue', label='L bol', lw=2.5)
        ax.errorbar(t, y, yerr=yerr, fmt='o', color='blue', label='%s obs.')

        #
        # ax.plot(t, y_star, color='red', ls='--', lw=1.5, label='GP')
        # third: plot a constrained function with errors
        ax.plot(t, y_star, '-', color='gray')
        ax.fill_between(t, y_star - 2 * err_y_star, y_star + 2 * err_y_star, color='gray', alpha=0.3)
        # ax.errorbar(t, y_star, err_y_star, fmt='.k', ms=6)

        plt.legend()
        plt.show()
Example #3
0
    def test_fit_GP_SNRefsdal_all_lc(self):
        # k = gptools.SquaredExponentialKernel(param_bounds=[(0, 1e3), (0, 100)])
        # gp = gptools.GaussianProcess(k, mu=gptools.LinearMeanFunction())

        # add data
        dm = -29.38  # D = 7.5e6 pc
        # dm = -30.4  # D = 12.e6 pc
        image = "S1"
        bands = ['F160W', 'F105W', 'F125W']
        # bands = ('F160W','F140W','F105W', 'F125W')
        curves = snrefsdal.read_curves(snrefsdal.path_data, image)
        for bname in bands:
            lc = curves.get(bname)
            # lc.mshift = dm
            t = lc.Time
            y = lc.Mag
            yerr = lc.Err

            #  Gaussian process
            k = gptools.SquaredExponentialKernel(param_bounds=[(min(np.abs(y)), max(np.abs(y))),
                                                               (0, np.std(t))])
            # k = gptools.SquaredExponentialKernel(param_bounds=[(min(np.abs(y)), max(np.abs(y))),
            #                                                    (0, np.std(t))])
            gp = gptools.GaussianProcess(k)
            # gp = gptools.GaussianProcess(k, mu=gptools.LinearMeanFunction())
            gp.add_data(t, y, err_y=yerr)

            is_mcmc = True
            if is_mcmc:
                out = gp.predict(t, use_MCMC=True, full_MCMC=True, return_std=True,
                                 num_proc=0, nsamp=100,
                                 plot_posterior=True, plot_chains=False,
                                 burn=10, thin=1)

            else:
                gp.optimize_hyperparameters()
                out = gp.predict(t, use_MCMC=False)
            y_star, err_y_star = out
            # y_star, err_y_star = gp.predict(t)

            fig = plt.figure()
            ax = fig.add_axes((0.1, 0.3, 0.8, 0.65))
            ax.invert_yaxis()

            bcolor = band.colors()[bname]
            ax.plot(t, y, color=bcolor, label='L bol', lw=2.5)
            ax.errorbar(t, y, yerr=yerr, fmt='o', color=bcolor, label='%s obs.')

            #
            # ax.plot(t, y_star, color='red', ls='--', lw=1.5, label='GP')
            ax.plot(t, y_star, '-', color=bcolor)
            ax.fill_between(t, y_star - 2 * err_y_star, y_star + 2 * err_y_star, color=bcolor, alpha=0.3)

        plt.show()
Example #4
0
    def test_fit_GP_SNRefsdal(self):
        # k = gptools.SquaredExponentialKernel(param_bounds=[(0, 1e3), (0, 100)])
        # gp = gptools.GaussianProcess(k, mu=gptools.LinearMeanFunction())

        # add data
        dm = -29.38  # D = 7.5e6 pc
        # dm = -30.4  # D = 12.e6 pc
        image = "S1"
        bname = 'F160W'
        curves = snrefsdal.read_curves(snrefsdal.path_data, image)
        lc = curves.get(bname)
        # lc.mshift = dm
        t = lc.Time
        y = lc.Mag
        yerr = lc.Err

        #  Gaussian process
        k = gptools.SquaredExponentialKernel(param_bounds=[(0, max(np.abs(y))),
                                                           (0, np.std(t))])
        # k = gptools.SquaredExponentialKernel(param_bounds=[(min(np.abs(y)), max(np.abs(y))),
        #                                                    (0, np.std(t))])
        gp = gptools.GaussianProcess(k)
        # gp = gptools.GaussianProcess(k, mu=gptools.LinearMeanFunction())
        gp.add_data(t, y, err_y=yerr)

        gp.optimize_hyperparameters()
        y_star, err_y_star = gp.predict(t)

        fig = plt.figure()
        ax = fig.add_axes((0.1, 0.3, 0.8, 0.65))
        ax.invert_yaxis()

        ax.plot(t, y, color='blue', label='L bol', lw=2.5)
        ax.errorbar(t, y, yerr=yerr, fmt='o', color='blue', label='%s obs.')

        #
        # ax.plot(t, y_star, color='red', ls='--', lw=1.5, label='GP')
        ax.plot(t, y_star, '-', color='gray')
        ax.fill_between(t, y_star - 2 * err_y_star, y_star + 2 * err_y_star, color='gray', alpha=0.3)

        plt.show()
Example #5
0
def profile_fitting(x,
                    y,
                    err_y=None,
                    optimize=True,
                    method='GPR',
                    kernel='SE',
                    num_dim=1,
                    debug_plots=True,
                    noiseLevel=2.,
                    **kwargs):  #sigma_max=10.0, l_min = 0.005,
    """Interpolate profiles and uncertainties over a dense grid. Also return the maximum 
    value of the smoothed data.
    
    This function can use Gaussian process regression or splines. When the former is adopted, both the 
    mean and the standard deviation of the updated profile are returned. Use the spline interpolation
    only as a cross-check.
    
    We allow the use of several GPR kernels so as to assess their performance in confidently 
    predicting profiles and assessing their uncertainty consistency. 

    Parameters
    ----------
    x : array of float
        Abscissa of data to be interpolated.
    y : array of float
        Data to be interpolated.
    err_y : array of float, optional
        Uncertainty in `y`. If absent, the data are interpolated.
    optimize : bool, optional
        Specify whether optimization over hyperparameters should occur or not. Default is True.
    method : {'GPR', 'spline'}, optional
        Method to use when interpolating. Default is 'GPR' (Gaussian process
        regression). Can also use a cubic spline.
    kernel : str, optional
        Type of kernel to be used. At this stage, we create the kernel internally, but in the future
        it would be better to do it externally and just give a gptools kernel object as an argument
        More kernels should be added over time. 
    num_dim : int, optional
        Number of dimensions of the input/output data. Default is 1
    debug_plots : bool, optional
        Set to True to plot the data, the smoothed curve (with uncertainty) and
        the location of the peak value.
    noiseLevel : float, optional
        Initial guess for a noise multiplier. Default: 2
    kwargs : dictionary
        arguments to be passed on to set the hyper-prior bounds for the kernel of choice. 
    """
    # grid = scipy.linspace(max(0, x.min()), min(0.08, x.max()), 1000)
    #grid = scipy.linspace(x.min(), x.max(), 1000)
    grid = x
    # Create empty object for results:
    res = type('', (), {})()

    if method == 'GPR':
        # hp is the hyperprior. A product of kernels is a kernel, so the joint hyperprior is
        # just the product of hyperpriors for each of the hyperparameters of the individual
        # priors. gptools offers convenient functionalities to create joint hyperpriors.

        # Define the kernel type amongst the implemented options.
        if kernel == 'SE':
            assert len(kwargs) == 4
            hparams = hyperparams(**kwargs)
            #hparams.set_kwargs(**kwargs)
            # Defaults:
            if not hasattr(hparams, 'sigma_mean'): hparams.sigma_mean = 2.0
            if not hasattr(hparams, 'l_mean'): hparams.l_mean = 0.005
            if not hasattr(hparams, 'sigma_sd'): hparams.sigma_sd = 10.0
            if not hasattr(hparams, 'l_sd'): hparams.l_sd = 0.1

            hprior = (gptools.GammaJointPriorAlt(
                [hparams.sigma_mean, hparams.l_mean],
                [hparams.sigma_sd, hparams.l_sd]))
            k = gptools.SquaredExponentialKernel(
                #= ====== =======================================================================
                #0 sigma  Amplitude of the covariance function
                #1 l1     Small-X saturation value of the length scale.
                #2 l2     Large-X saturation value of the length scale.
                #= ====== =======================================================================
                # param_bounds=[(0, sigma_max), (0, 2.0)],
                hyperprior=hprior,
                initial_params=[
                    10000.0, 400000.0
                ],  # random, doesn't matter because we do random starts anyway
                fixed_params=[False] * 2)

        elif kernel == 'gibbs':
            #if num_dim == 1: assert len(kwargs) == 10
            hparams = hyperparams(**kwargs)
            # Defaults:
            if not hasattr(hparams, 'sigma_min'): hparams.sigma_min = 0.0
            if not hasattr(hparams, 'sigma_max'): hparams.sigma_max = 10.0

            if not hasattr(hparams, 'l1_mean'): hparams.l1_mean = 0.3
            if not hasattr(hparams, 'l1_sd'): hparams.l1_sd = 0.3

            if not hasattr(hparams, 'l2_mean'): hparams.l2_mean = 0.5
            if not hasattr(hparams, 'l2_sd'): hparams.l2_sd = 0.25

            if not hasattr(hparams, 'lw_mean'): hparams.lw_mean = 0.0
            if not hasattr(hparams, 'lw_sd'): hparams.lw_sd = 0.3

            if not hasattr(hparams, 'x0_mean'): hparams.x0_mean = 0.0
            if not hasattr(hparams, 'x0_sd'): hparams.x0_sd = 0.3

            hprior = (gptools.UniformJointPrior([
                (hparams.sigma_min, hparams.sigma_max),
            ]) * gptools.GammaJointPriorAlt([
                hparams.l1_mean, hparams.l2_mean, hparams.lw_mean,
                hparams.x0_mean
            ], [hparams.l1_sd, hparams.l2_sd, hparams.lw_sd, hparams.x0_sd]))

            k = gptools.GibbsKernel1dTanh(
                #= ====== =======================================================================
                #0 sigma  Amplitude of the covariance function
                #1 l1     Small-X saturation value of the length scale.
                #2 l2     Large-X saturation value of the length scale.
                #3 lw     Length scale of the transition between the two length scales.
                #4 x0     Location of the center of the transition between the two length scales.
                #= ====== =======================================================================
                initial_params=[
                    2.0, 0.5, 0.05, 0.1, 0.5
                ],  # for random_starts!= 0, the initial state of the hyperparameters is not actually used.,
                fixed_params=[False] * 5,
                hyperprior=hprior,
            )
        elif kernel == 'matern52':
            if num_dim == 1: assert len(kwargs) == 4
            hparams = hyperparams(**kwargs)
            # Defaults:
            if not hasattr(hparams, 'sigma_mean'): hparams.sigma_mean = 2.0
            if not hasattr(hparams, 'l_mean'): hparams.l_mean = 0.005

            if not hasattr(hparams, 'sigma_sd'): hparams.sigma_sd = 10.0
            if not hasattr(hparams, 'l_sd'): hparams.l_sd = 0.1

            hprior = (gptools.GammaJointPriorAlt(
                [hparams.sigma_mean, hparams.l_mean],
                [hparams.sigma_sd, hparams.l_sd]))
            k = gptools.Matern52Kernel(  # this has 2 hyperparameters in 1D
                #= ===== ===========================================
                #0 sigma Prefactor to the kernel
                #2 l1    Length scale for first dimension
                #3 ...   More length scales for more dimensions
                #= ===== =======================================
                hyperprior=hprior,
                initial_params=[0.5, 0.5],
                fixed_params=[False] * 2)
        elif kernel == 'RQ':  # rational quadratic
            if num_dim == 1: assert len(kwargs) == 6
            hparams = hyperparams(**kwargs)

            # Defaults:
            if not hasattr(hparams, 'sigma_mean'): hparams.sigma_mean = 2.0
            if not hasattr(hparams, 'alpha_mean'): hparams.alpha_mean = 0.005
            if not hasattr(hparams, 'l1_mean'): hparams.l1_mean = 0.005

            if not hasattr(hparams, 'sigma_sd'): hparams.sigma_sd = 10.0
            if not hasattr(hparams, 'alpha_sd'): hparams.alpha_sd = 0.1
            if not hasattr(hparams, 'l1_sd'): hparams.l1_sd = 0.1

            hprior = (gptools.GammaJointPriorAlt(
                [hparams.sigma_mean, hparams.alpha_mean, hparams.l1_mean],
                [hparams.sigma_sd, hparams.alpha_sd, hparams.l1_sd]))

            k = gptools.RationalQuadraticKernel(
                #= ===== ===========================================
                #0 sigma Prefactor to the kernel
                #1 alpha Order of kernel
                #2 l1    Length scale for first dimension
                #3 l2    Length scale for second dimension
                #4 ...   More length scales for more dimensions
                #= ===== =======================================
                hyperprior=hprior,
                initial_params=[1.0, 0.5, 1.0],
                fixed_params=[False] * 3)

        elif isinstance(kernel, gptools.Kernel):
            k = kernel
        else:
            ValueError('Only the SE kernel is currently defined! Break here.')

        # Create additional noise to optimize over (the first argument is n_dims)
        nk = gptools.DiagonalNoiseKernel(
            1,
            n=0,
            initial_noise=np.mean(err_y) * noiseLevel,
            fixed_noise=True
        )  #, noise_bound=(np.mean(err_y)*noiseLevel*(4.0/5.0),np.mean(err_y)*noiseLevel*(6.0/5.0)))    #(np.min(err_y), np.max(err_y)*noiseLevel))#, enforce_bounds=True)
        #print "noise_bound= [", np.min(err_y), ",",np.max(err_y)*noiseLevel,"]"

        gp = gptools.GaussianProcess(k, X=x, y=y, err_y=err_y, noise_k=nk)

        for i in range(len(y)):
            if y[i] == 0:
                gp.add_data(x[i], 0, n=0, err_y=0.0)
                gp.add_data(x[i], 0, n=0, err_y=0.0)
                gp.add_data(x[i], 0, n=0, err_y=0.0)
                gp.add_data(x[i], 0, n=1, err_y=0.0)

        with warnings.catch_warnings():
            warnings.filterwarnings(
                "ignore", message="invalid value encountered in subtract")
            if optimize:
                res_min, ll_trials = gp.optimize_hyperparameters(
                    verbose=False, random_starts=100)
            else:
                print 'Optimization is turned off. Using initial guesses for hyperparameters!'

        m_gp, s_gp = gp.predict(grid, noise=True)
        res.free_params = gp.free_params[:]
        res.free_param_names = gp.free_param_names[:]
        res.free_param_bounds = gp.free_param_bounds[:]

        # Check percentage of points within 3 sd:
        points_in_1sd = 0.0
        points_in_2sd = 0.0
        points_in_3sd = 0.0
        for i in range(len(y)):
            # Find value of grid that is the closest to x[i]:
            gidx = np.argmin(abs(grid - x[i]))
            if abs(m_gp[gidx] - y[i]) < s_gp[gidx]:
                points_in_1sd += 1.0
            if abs(m_gp[gidx] - y[i]) > s_gp[gidx] and abs(
                    m_gp[gidx] - y[i]) < 2 * s_gp[gidx]:
                points_in_2sd += 1.0
            if abs(m_gp[gidx] - y[i]) > 2 * s_gp[gidx] and abs(
                    m_gp[gidx] - y[i]) < 3 * s_gp[gidx]:
                points_in_3sd += 1.0

        frac_within_1sd = float(points_in_1sd) / len(y)
        frac_within_2sd = float(points_in_2sd) / len(y)
        frac_within_3sd = float(points_in_3sd) / len(y)

        ###
        print("Estimating AIC, BIC...")
        sum2_diff = 0
        for i in range(len(y)):
            # Find value of grid that is the closest to x[i]:
            gidx = np.argmin(abs(grid - x[i]))
            sum2_diff = (m_gp[gidx] - y[i])**2

        chi_squared = float(sum2_diff) / len(y)
        num_params = len(hparams.__dict__) / 2
        num_data = len(y)

        AIC = chi_squared + 2.0 * num_params
        BIC = chi_squared + num_params * scipy.log(num_data)

    elif method == 'spline':
        m_gp = scipy.interpolate.UnivariateSpline(
            x,
            y,
            w=1.0 / err_y,
            s=None  #2*len(x)
        )(grid)
        if scipy.isnan(m_gp).any():
            print(x)
            print(y)
            print(err_y)
        #i = m_gp.argmax()
    else:
        raise ValueError("Undefined method: %s" % (method, ))

    if debug_plots:
        f = plt.figure()
        a = f.add_subplot(1, 1, 1)
        a.errorbar(x, y, yerr=err_y, fmt='.', color='b')
        a.plot(grid, m_gp, color='g')
        if method == 'GPR':
            gptools.univariate_envelope_plot(grid,
                                             m_gp,
                                             s_gp,
                                             ax=a,
                                             label='Inferred')
            #a.fill_between(grid, m_gp - s_gp, m_gp + s_gp, color='g', alpha=0.5)
        #plt.plot(grid[m_gp.argmax()],m_gp.max(),'r*')
        plt.xlabel('time (s)', fontsize=14)
        plt.ylabel('Signal Amplitude (A.U.)', fontsize=14)
        plt.tick_params(axis='both', which='major', labelsize=14)

    if method == 'GPR':
        res.m_gp = m_gp
        res.s_gp = s_gp
        res.frac_within_1sd = frac_within_1sd
        res.frac_within_2sd = frac_within_2sd
        res.frac_within_3sd = frac_within_3sd
        if optimize:
            res.ll = res_min.fun
            res.ll_trials = ll_trials
        res.BIC = BIC
        res.AIC = AIC
    else:
        res.m_gp = m_gp

    return res
Example #6
0
def compton_kernel(
    X,
    std,
    ell_omega,
    ell_degrees,
    noise_std=1e-7,
    degrees_zeros=None,
    omega_zeros=None,
    degrees_deriv_zeros=None,
    omega_deriv_zeros=None,
):

    deg = t = X[:, [1]]
    omega = w = X[:, [0]]

    import gptools

    kern_omega = gptools.SquaredExponentialKernel(
        initial_params=[1, ell_omega], fixed_params=[True, True])
    kern_theta = gptools.SquaredExponentialKernel(
        initial_params=[1, ell_degrees], fixed_params=[True, True])
    gp_omega = gptools.GaussianProcess(kern_omega)
    gp_theta = gptools.GaussianProcess(kern_theta)

    if omega_zeros is not None or omega_deriv_zeros is not None:
        w_z = []
        n_w = []

        if omega_zeros is not None:
            w_z.append(omega_zeros)
            n_w.append(np.zeros(len(omega_zeros)))
        if omega_deriv_zeros is not None:
            w_z.append(omega_deriv_zeros)
            n_w.append(np.ones(len(omega_deriv_zeros)))
        w_z = np.concatenate(w_z)[:, None]
        n_w = np.concatenate(n_w)

        gp_omega.add_data(w_z, np.zeros(w_z.shape[0]), n=n_w)
        _, K_omega = gp_omega.predict(w, np.zeros(w.shape[0]), return_cov=True)
    else:
        K_omega = gp_omega.compute_Kij(w, w, np.zeros(w.shape[0]),
                                       np.zeros(w.shape[0]))

    if degrees_zeros is not None or degrees_deriv_zeros is not None:
        t_z = []
        n_t = []

        if degrees_zeros is not None:
            t_z.append(degrees_zeros)
            n_t.append(np.zeros(len(degrees_zeros)))
        if degrees_deriv_zeros is not None:
            t_z.append(degrees_deriv_zeros)
            n_t.append(np.ones(len(degrees_deriv_zeros)))
        t_z = np.concatenate(t_z)[:, None]
        n_t = np.concatenate(n_t)

        gp_theta.add_data(t_z, np.zeros(t_z.shape[0]), n=n_t)
        _, K_degrees = gp_theta.predict(t,
                                        np.zeros(t.shape[0]),
                                        return_cov=True)
    else:
        K_degrees = gp_theta.compute_Kij(t, t, np.zeros(t.shape[0]),
                                         np.zeros(t.shape[0]))

    # kern_omega = RBF(ell_omega)
    # kern_degrees = RBF(ell_degrees)

    # K_omega = kern_omega(omega)
    # K_degrees = kern_degrees(deg)
    #
    # # Create conditional kernels if observables are known to vanish at certain locations
    # if omega_zeros is not None:
    #     omega_zeros = np.atleast_1d(omega_zeros)
    #     if omega_zeros.ndim == 1:
    #         omega_zeros = omega_zeros[:, None]
    #     temp_omega = np.linalg.solve(kern_omega(omega_zeros), kern_omega(omega_zeros, omega))
    #     K_omega = K_omega - kern_omega(omega, omega_zeros) @ temp_omega
    #
    # if degrees_zeros is not None:
    #     degrees_zeros = np.atleast_1d(degrees_zeros)
    #     if degrees_zeros.ndim == 1:
    #         degrees_zeros = degrees_zeros[:, None]
    #     temp = np.linalg.solve(kern_degrees(degrees_zeros), kern_degrees(degrees_zeros, deg))
    #     K_degrees = K_degrees - kern_degrees(deg, degrees_zeros) @ temp

    K = std**2 * K_omega * K_degrees
    K += noise_std**2 * np.eye(K.shape[0])
    return K
Example #7
0
# scale is not in any way the same as the gradient scale length!) There are two
# levels of sophistication which can be employed here: specifying a list of
# tuples for the `param_bounds` keyword places a uniform prior over each
# hyperparameter whereas passing a :py:class:`JointPrior` instance for the
# `hyperprior` keyword allows you to specify a far more complicated prior
# distribution if needed.

# First, an example using `param_bounds`:
# The prefactor corresponds roughly to the typical range of variation of the
# data. It is usually sufficient to let it vary from 0 to about 5 or 10 times
# the maximum value in your data. The covariance length scale dictates how
# rapidly the data can change in space. For the present data, a good guess is
# that it can vary from 0 to 5. If you were fitting multivariate data, you
# would use the `num_dim` keyword to indicate how many dimensions there are.
# (The default is 1.)
k_SE = gptools.SquaredExponentialKernel(param_bounds=[(0, 20), (0, 5)])

# A much more powerful approach is to specify a joint (hyper)prior distribution
# for the hyperparameters. When :py:class:`JointPrior` instances are
# multiplied, the two priors are taken as being independent prior
# distributions. So, we can make a uniform prior on [0, 20] for the prefactor
# and a gamma prior with mode 1 and standard-deivation 0.7 for the covariance
# length scale as follows:
hp = gptools.UniformJointPrior(0, 20) * gptools.GammaJointPriorAlt(1, 0.7)
k_SE = gptools.SquaredExponentialKernel(hyperprior=hp)

# Now, we can move on to creating the Gaussian process itself:
# When creating a Gaussian process, you must specify the covariance kernel to
# use. Optionally, you may also specify a covariance kernel that you consider
# to be a "noise" component. This allows you to represent correlated noise and
# acts in addition to the heteroscedastic, uncorrelated noise you can specify
def interp_max(x,
               y,
               err_y=None,
               s_guess=0.2,
               s_max=10.0,
               l_guess=0.005,
               fixed_l=False,
               debug_plots=False,
               method='GP'):
    """Compute the maximum value of the smoothed data.
    
    Estimates the uncertainty using Gaussian process regression and returns the
    mean and uncertainty.
    
    Parameters
    ----------
    x : array of float
        Abscissa of data to be interpolated.
    y : array of float
        Data to be interpolated.
    err_y : array of float, optional
        Uncertainty in `y`. If absent, the data are interpolated.
    s_guess : float, optional
        Initial guess for the signal variance. Default is 0.2.
    s_max : float, optional
        Maximum value for the signal variance. Default is 10.0
    l_guess : float, optional
        Initial guess for the covariance length scale. Default is 0.03.
    fixed_l : bool, optional
        Set to True to hold the covariance length scale fixed during the MAP
        estimate. This helps mitigate the effect of bad points. Default is True.
    debug_plots : bool, optional
        Set to True to plot the data, the smoothed curve (with uncertainty) and
        the location of the peak value.
    method : {'GP', 'spline'}, optional
        Method to use when interpolating. Default is 'GP' (Gaussian process
        regression). Can also use a cubic spline.
    """
    grid = scipy.linspace(max(0, x.min()), min(0.08, x.max()), 1000)
    if method == 'GP':
        hp = (gptools.UniformJointPrior([
            (0, s_max),
        ]) * gptools.GammaJointPriorAlt([
            l_guess,
        ], [
            0.1,
        ]))
        k = gptools.SquaredExponentialKernel(
            # param_bounds=[(0, s_max), (0, 2.0)],
            hyperprior=hp,
            initial_params=[s_guess, l_guess],
            fixed_params=[False, fixed_l])
        gp = gptools.GaussianProcess(k, X=x, y=y, err_y=err_y)
        gp.optimize_hyperparameters(verbose=True, random_starts=100)
        m_gp, s_gp = gp.predict(grid)
        i = m_gp.argmax()
    elif method == 'spline':
        m_gp = scipy.interpolate.UnivariateSpline(x,
                                                  y,
                                                  w=1.0 / err_y,
                                                  s=2 * len(x))(grid)
        if scipy.isnan(m_gp).any():
            print(x)
            print(y)
            print(err_y)
        i = m_gp.argmax()
    else:
        raise ValueError("Undefined method %s" % (method, ))

    if debug_plots:
        f = plt.figure()
        a = f.add_subplot(1, 1, 1)
        a.errorbar(x, y, yerr=err_y, fmt='.', color='b')
        a.plot(grid, m_gp, color='g')
        if method == 'GP':
            a.fill_between(grid,
                           m_gp - s_gp,
                           m_gp + s_gp,
                           color='g',
                           alpha=0.5)
        a.axvline(grid[i])

    if method == 'GP':
        return (m_gp[i], s_gp[i])
    else:
        return m_gp[i]
Example #9
0
    def compute_conditional_cov(self, X, gp=None):
        if gp is None:
            gp = gm.ConjugateGaussianProcess(**self.kwargs)
            gp.fit(self.X_train, self.c_train)

        if self.degrees_zeros is None and self.omega_zeros is None:
            return gp.cov(X)

        [ls_omega, ls_degrees] = gp.kernel_.k1.get_params()['length_scale']
        std = np.sqrt(gp.cbar_sq_mean_)

        w = X[:, [0]]
        t = X[:, [1]]

        import gptools

        kern_omega = gptools.SquaredExponentialKernel(
            initial_params=[1, ls_omega], fixed_params=[True, True])
        kern_theta = gptools.SquaredExponentialKernel(
            initial_params=[1, ls_degrees], fixed_params=[True, True])
        gp_omega = gptools.GaussianProcess(kern_omega)
        gp_theta = gptools.GaussianProcess(kern_theta)
        # gp_omega.add_data(np.array([[0], [0]]), np.array([0, 0]), n=np.array([0, 1]))

        if self.omega_zeros is not None or self.omega_deriv_zeros is not None:
            w_z = []
            n_w = []

            if self.omega_zeros is not None:
                w_z.append(self.omega_zeros)
                n_w.append(np.zeros(len(self.omega_zeros)))
            if self.omega_deriv_zeros is not None:
                w_z.append(self.omega_deriv_zeros)
                n_w.append(np.ones(len(self.omega_deriv_zeros)))
            w_z = np.concatenate(w_z)[:, None]
            n_w = np.concatenate(n_w)
            print(w_z, n_w)

            gp_omega.add_data(w_z, np.zeros(w_z.shape[0]), n=n_w)
            _, K_omega = gp_omega.predict(w,
                                          np.zeros(w.shape[0]),
                                          return_cov=True)
        else:
            K_omega = gp_omega.compute_Kij(w, w, np.zeros(w.shape[0]),
                                           np.zeros(w.shape[0]))

        if self.degrees_zeros is not None or self.degrees_deriv_zeros is not None:
            t_z = []
            n_t = []

            if self.degrees_zeros is not None:
                t_z.append(self.degrees_zeros)
                n_t.append(np.zeros(len(self.degrees_zeros)))
            if self.degrees_deriv_zeros is not None:
                t_z.append(self.degrees_deriv_zeros)
                n_t.append(np.ones(len(self.degrees_deriv_zeros)))
            t_z = np.concatenate(t_z)[:, None]
            n_t = np.concatenate(n_t)

            gp_theta.add_data(t_z, np.zeros(t_z.shape[0]), n=n_t)
            _, K_theta = gp_theta.predict(t,
                                          np.zeros(t.shape[0]),
                                          return_cov=True)
        else:
            K_theta = gp_theta.compute_Kij(t, t, np.zeros(t.shape[0]),
                                           np.zeros(t.shape[0]))

        # kernel_omega = RBF(ls_omega)
        # kernel_theta = RBF(ls_degrees)

        # if self.omega_zeros is not None:
        #
        #     w_z = np.atleast_1d(self.omega_zeros)[:, None]
        #
        #     K_omega = kernel_omega(w) - kernel_omega(w, w_z) @ np.linalg.solve(kernel_omega(w_z), kernel_omega(w_z, w))
        # else:
        #     K_omega = kernel_omega(w)
        #
        # if self.degrees_zeros is not None:
        #     t_z = np.atleast_1d(self.degrees_zeros)[:, None]
        #     K_theta = kernel_theta(t) - kernel_theta(t, t_z) @ np.linalg.solve(kernel_theta(t_z), kernel_theta(t_z, t))
        # else:
        #     K_theta = kernel_theta(t)

        return std**2 * K_omega * K_theta
Example #10
0
def imp_gptools(data, fil, mcmc=True, p=None):
    """
    Perform Gaussian Process with gptools through MCMC.

    input: data, dict
           dictionary of raw data
           output from read_snana_lc
           keys: filters

           fil, str
           filter
        
           mcmc, bool, optional
           if True, optimize kernel parameters using mcmc
           Default is True

           p, list of integers
           lower and upper bound where the GP fit is required
           if None use min and max values from mjd data
           default is None

    output: data, dict
            updated dictionary with GP results
    """

    # format data
    mjd = data[fil][:, 0]
    flux = data[fil][:, 1]
    fluxerr = data[fil][:, 2]

    absflux = [abs(item) for item in flux]

    # setup GP
    k_obj = gptools.SquaredExponentialKernel(
        param_bounds=[(0, max(absflux)), (0, np.std(mjd))])
    data['GP_obj'][fil] = gptools.GaussianProcess(k_obj)
    data['GP_obj'][fil].add_data(mjd, flux, err_y=fluxerr)

    if p == None:
        data['xarr'][fil] = np.arange(min(mjd), max(mjd), 0.2)
    else:
        data['xarr'][fil] = np.arange(min(mjd) - 100, max(mjd) + 100, 0.2)

    if mcmc:
        out = data['GP_obj'][fil].predict(data['xarr'][fil],
                                          use_MCMC=True,
                                          full_MCMC=True,
                                          return_std=False,
                                          num_proc=int(data['n_proc'][0]),
                                          nsamp=int(data['nsamp_mcmc'][0]),
                                          plot_posterior=False,
                                          plot_chains=False,
                                          burn=int(data['burn'][0]),
                                          thin=int(data['thin'][0]))

    else:
        try:
            data['GP_obj'][fil].optimize_hyperparameters()
            out = data['GP_obj'][fil].predict(data['xarr'][fil],
                                              use_MCMC=False)
            data['fitting_flag'].append(True)

        except ValueError or TypeError:
            data['fitting_flag'].append(False)
            out = [None, None]
            print 'Failed fitting!'

    data['GP_fit'][fil] = out[0]
    data['GP_std'][fil] = out[1]

    del out
    del k_obj

    return data
Example #11
0
def test_gradient_inputs():
    # This test checks whether or not gradient inputs are accepted by gptools
    f_X = np.random.RandomState(0).randn(5, 2)
    # List of function evaluations
    f_y = (f_X[:, 0]**2 + f_X[:, 1]**2).tolist()
    # Function evaluations
    f_y_0 = f_X[:, 0]**2 + f_X[:, 1]**2
    # list of Gradients
    g_y = (2 * f_X).tolist()
    # Gradient components
    g_y_0 = 2 * f_X[:, 0]
    g_y_1 = 2 * f_X[:, 1]
    # List of hessians
    h_y = []
    # List of hessian components
    h_y_00 = []
    h_y_01 = []
    h_y_11 = []
    for k in range(len(f_y)):
        h_y.append(np.array([[2, 0], [0, 2]]))
        h_y_00.append(2)
        h_y_01.append(0)
        h_y_11.append(2)

    # List of errors
    err_y = f_y
    # Errors
    err_y_0 = f_y_0
    # Gradient error components
    err_g_0 = f_y_0
    err_g_1 = 2 * f_y_0
    # List of gradient errors
    err_g_full = np.vstack((err_g_0, err_g_1)).T.tolist()

    n_dims = 2
    length_scales = np.random.lognormal(size=n_dims).tolist()
    K1 = gptools.SquaredExponentialKernel(num_dim=2,
                                          initial_params=[10] + length_scales)
    K2 = gptools.SquaredExponentialKernel(num_dim=2,
                                          initial_params=[10] + length_scales)

    gp1 = gptools.GaussianProcess(K1)
    gp2 = gptools.GaussianProcess(K2)

    # Input list of function observations, gradients, and hessians
    gp1.add_data_list(f_X, f_y, err_y=err_y)
    gp1.add_data_list(f_X, g_y, err_y=err_g_full, n=1)
    gp1.add_data_list(f_X, h_y, err_y=err_y, n=2)
    #Input funtion observations, gradients, and hessians.
    gp2.add_data(f_X, f_y_0, err_y=err_y)
    gp2.add_data(f_X,
                 g_y_0,
                 err_y=err_g_0,
                 n=np.vstack((np.ones(len(f_X)), np.zeros(len(f_X)))).T)
    gp2.add_data(f_X,
                 g_y_1,
                 err_y=err_g_1,
                 n=np.vstack((np.zeros(len(f_X)), np.ones(len(f_X)))).T)
    gp2.add_data(f_X,
                 h_y_00,
                 err_y=err_y,
                 n=np.vstack((2 * np.ones(len(f_X)), np.zeros(len(f_X)))).T)
    gp2.add_data(f_X,
                 h_y_01,
                 err_y=err_y,
                 n=np.vstack((np.ones(len(f_X)), np.ones(len(f_X)))).T)
    gp2.add_data(f_X,
                 h_y_11,
                 err_y=err_y,
                 n=np.vstack((np.zeros(len(f_X)), 2 * np.ones(len(f_X)))).T)

    k1 = gp1.compute_Kij(gp1.X, None, gp1.n, None)
    k2 = gp2.compute_Kij(gp1.X, None, gp1.n, None)

    print([gp1.predict([1, 2])])
    print([gp2.predict([1, 2])])

    np.testing.assert_array_almost_equal(k1, k2, decimal=8)
    def __init__(self,
                 density,
                 y,
                 orders,
                 density_interp,
                 std_n,
                 ls_n,
                 std_s,
                 ls_s,
                 ref_n,
                 ref_s,
                 breakdown,
                 err_y=0,
                 derivs=(0, 1, 2),
                 include_3bf=True,
                 verbose=False,
                 rho=None):
        self.density = density
        self.Density = Density = density[:, None]
        self.kf = None
        self.Kf = None

        self.density_interp = density_interp
        self.Density_interp = Density_interp = density_interp[:, None]
        self.kf_interp = None
        self.Kf_interp = None
        self.X_interp = Density_interp

        self.y = y
        self.N_interp = N_interp = len(density_interp)
        err_y = np.broadcast_to(err_y,
                                y.shape[0])  # Turn to vector if not already
        self.err_y = err_y
        self.Sigma_y = np.diag(err_y**2)  # Make a diagonal covariance matrix
        self.derivs = derivs

        self.gps_interp = {}
        self.gps_trunc = {}

        self._y_interp_all_derivs = {}
        self._cov_interp_all_derivs = {}
        self._y_interp_vecs = {}
        self._std_interp_vecs = {}
        self._cov_interp_blocks = {}

        self._dy_dn = {}
        self._d2y_dn2 = {}
        self._dy_dk = {}
        self._d2y_dk2 = {}
        self._y_dict = {}

        d_dn = FinDiff(0, density, 1)
        d2_dn2 = FinDiff(0, density, 2, acc=2)
        # d_dk = FinDiff(0, kf, 1)
        # d2_dk2 = FinDiff(0, kf, 2, acc=2)

        self._cov_total_all_derivs = {}
        self._cov_total_blocks = {}
        self._std_total_vecs = {}

        # The priors on the interpolator parameters
        self.mean0 = 0
        self.cov0 = 0
        self._best_max_orders = {}
        self._start_poly_order = 2

        self.ref_n = ref_n
        self.ref_s = ref_s

        kf_conversion = 2**(1 / 3.)

        if rho is not None:
            ls_s = ls_n / kf_conversion
        else:
            ls_s_scaled = kf_conversion * ls_s

        from functools import partial
        # transform_n = partial(fermi_momentum, degeneracy=2)
        # transform_s = partial(fermi_momentum, degeneracy=4)

        self.coeff_kernel_n = gptools.SquaredExponentialKernel(
            initial_params=[std_n, ls_n], fixed_params=[True, True])
        # Assumes the symmetric nuclear matter kernel takes kf_s as an argument, so use ls_s
        self.coeff_kernel_s = gptools.SquaredExponentialKernel(
            initial_params=[std_s, ls_s], fixed_params=[True, True])

        if rho is not None:
            # only use ls_n, and assume rho is the correlation of the off-diagonal
            std_off = np.sqrt(std_s * std_n) * rho
            ls_off = ls_n
        else:
            # But the off-diagonal will take kf_n as an argument, so use scaled length scale
            std_off = np.sqrt(
                std_s * std_n) * (2 * ls_n * ls_s_scaled /
                                  (ls_n**2 + ls_s_scaled**2))**0.25
            ls_off = np.sqrt((ls_s_scaled**2 + ls_n**2) / 2)
        ref_off = np.sqrt(ref_s * ref_n)
        self.coeff_kernel_off = gptools.SquaredExponentialKernel(
            initial_params=[std_off, ls_off], fixed_params=[True, True])

        print(ls_n, ls_s, ls_off)

        for i, n in enumerate(orders):
            first_omitted = n + 1
            if first_omitted == 1:
                first_omitted += 1  # the Q^1 contribution is zero, so bump to Q^2
            _kern_lower_n = CustomKernel(
                ConvergenceKernel(breakdown=breakdown,
                                  ref=ref_n,
                                  lowest_order=0,
                                  highest_order=n,
                                  include_3bf=include_3bf))
            _kern_lower_s = CustomKernel(
                ConvergenceKernel(breakdown=breakdown,
                                  ref=ref_s,
                                  lowest_order=0,
                                  highest_order=n,
                                  include_3bf=include_3bf))
            _kern_lower_ns = CustomKernel(
                ConvergenceKernel(
                    breakdown=breakdown,
                    ref=ref_off,
                    lowest_order=0,
                    highest_order=n,
                    include_3bf=include_3bf,
                    k_f1_scale=1,
                    k_f2_scale=1. / kf_conversion,  # Will turn kf_n to kf_s
                    # off_diag=True
                ))
            _kern_lower_sn = CustomKernel(
                ConvergenceKernel(
                    breakdown=breakdown,
                    ref=ref_off,
                    lowest_order=0,
                    highest_order=n,
                    include_3bf=include_3bf,
                    k_f1_scale=1. / kf_conversion,
                    k_f2_scale=1,  # Will turn kf_n to kf_s
                    # off_diag=True
                ))
            kern_interp_n = _kern_lower_n * self.coeff_kernel_n
            kern_interp_s = _kern_lower_s * self.coeff_kernel_s
            kern_interp_ns = _kern_lower_ns * self.coeff_kernel_off
            kern_interp_sn = _kern_lower_sn * self.coeff_kernel_off
            kern_interp = SymmetryEnergyKernel(
                kernel_n=kern_interp_n,
                kernel_s=kern_interp_s,
                kernel_ns=kern_interp_ns,
                kernel_sn=kern_interp_sn,
            )

            _kern_upper_n = CustomKernel(
                ConvergenceKernel(breakdown=breakdown,
                                  ref=ref_n,
                                  lowest_order=first_omitted,
                                  include_3bf=include_3bf))
            _kern_upper_s = CustomKernel(
                ConvergenceKernel(breakdown=breakdown,
                                  ref=ref_s,
                                  lowest_order=first_omitted,
                                  include_3bf=include_3bf))
            _kern_upper_ns = CustomKernel(
                ConvergenceKernel(
                    breakdown=breakdown,
                    ref=ref_off,
                    lowest_order=first_omitted,
                    include_3bf=include_3bf,
                    k_f1_scale=1,
                    k_f2_scale=1 / kf_conversion,
                    # off_diag=True
                ))
            _kern_upper_sn = CustomKernel(
                ConvergenceKernel(
                    breakdown=breakdown,
                    ref=ref_off,
                    lowest_order=first_omitted,
                    include_3bf=include_3bf,
                    k_f1_scale=1 / kf_conversion,
                    k_f2_scale=1,
                    # off_diag=True
                ))
            kern_trunc_n = _kern_upper_n * self.coeff_kernel_n
            kern_trunc_s = _kern_upper_s * self.coeff_kernel_s
            kern_trunc_ns = _kern_upper_ns * self.coeff_kernel_off
            kern_trunc_sn = _kern_upper_sn * self.coeff_kernel_off
            kern_trunc = SymmetryEnergyKernel(
                kernel_n=kern_trunc_n,
                kernel_s=kern_trunc_s,
                kernel_ns=kern_trunc_ns,
                kernel_sn=kern_trunc_sn,
            )

            y_n = y[:, i]
            self._y_dict[n] = y_n

            # Interpolating processes
            # mu_n = gptools.ConstantMeanFunction(initial_params=[np.mean(y_n)])
            # mu_n = gptools.ConstantMeanFunction(initial_params=[np.max(y_n)+20])
            mu_n = gptools.ConstantMeanFunction(initial_params=[0])
            gp_interp = gptools.GaussianProcess(kern_interp, mu=mu_n)
            gp_interp.add_data(Density, y_n, err_y=err_y)
            # gp_interp.optimize_hyperparameters(max_tries=10)  # For the mean
            self.gps_interp[n] = gp_interp

            # Finite difference:
            self._dy_dn[n] = d_dn(y_n)
            self._d2y_dn2[n] = d2_dn2(y_n)
            # self._dy_dk[n] = d_dk(y_n)
            # self._d2y_dk2[n] = d2_dk2(y_n)

            # Fractional interpolator polynomials
            self._best_max_orders[n] = self.compute_best_interpolator(
                density,
                y=y_n,
                start_order=self._start_poly_order,
                max_order=10)
            if verbose:
                print(
                    f'For EFT order {n}, the best polynomial has max nu = {self._best_max_orders[n]}'
                )

            # Back to GPs:

            y_interp_all_derivs_n, cov_interp_all_derivs_n = predict_with_derivatives(
                gp=gp_interp, X=Density_interp, n=derivs, return_cov=True)

            y_interp_vecs_n = get_means_map(y_interp_all_derivs_n, N_interp)
            cov_interp_blocks_n = get_blocks_map(cov_interp_all_derivs_n,
                                                 (N_interp, N_interp))
            # for (ii, jj), cov_ij in cov_interp_blocks_n.items():
            #     cov_interp_blocks_n[ii, jj] += 1e-12 * np.eye(cov_ij.shape[0])
            std_interp_vecs_n = get_std_map(cov_interp_blocks_n)

            self._y_interp_all_derivs[n] = y_interp_all_derivs_n
            self._cov_interp_all_derivs[n] = cov_interp_all_derivs_n
            self._y_interp_vecs[n] = y_interp_vecs_n
            self._cov_interp_blocks[n] = cov_interp_blocks_n
            self._std_interp_vecs[n] = std_interp_vecs_n

            # Truncation Processes
            gp_trunc = gptools.GaussianProcess(kern_trunc)
            self.gps_trunc[n] = gp_trunc

            cov_trunc_all_derivs_n = predict_with_derivatives(gp=gp_trunc,
                                                              X=Density_interp,
                                                              n=derivs,
                                                              only_cov=True)
            cov_total_all_derivs_n = cov_interp_all_derivs_n + cov_trunc_all_derivs_n

            cov_total_blocks_n = get_blocks_map(cov_total_all_derivs_n,
                                                (N_interp, N_interp))
            # for (ii, jj), cov_ij in cov_total_blocks_n.items():
            #     cov_total_blocks_n[ii, jj] += 1e-12 * np.eye(cov_ij.shape[0])
            std_total_vecs_n = get_std_map(cov_total_blocks_n)

            self._cov_total_all_derivs[n] = cov_total_all_derivs_n
            self._cov_total_blocks[n] = cov_total_blocks_n
            self._std_total_vecs[n] = std_total_vecs_n
    def __init__(self,
                 density,
                 kf,
                 y,
                 orders,
                 density_interp,
                 kf_interp,
                 std,
                 ls,
                 ref,
                 breakdown,
                 err_y=0,
                 derivs=(0, 1, 2),
                 include_3bf=True,
                 verbose=False):

        self.density = density
        self.kf = kf
        self.Kf = Kf = kf[:, None]

        self.density_interp = density_interp
        self.kf_interp = kf_interp
        self.Kf_interp = Kf_interp = kf_interp[:, None]
        self.X_interp = Kf_interp

        self.y = y
        self.N_interp = N_interp = len(kf_interp)
        err_y = np.broadcast_to(err_y,
                                y.shape[0])  # Turn to vector if not already
        self.err_y = err_y
        self.Sigma_y = np.diag(err_y**2)  # Make a diagonal covariance matrix
        self.derivs = derivs

        self.gps_interp = {}
        self.gps_trunc = {}

        self._y_interp_all_derivs = {}
        self._cov_interp_all_derivs = {}
        self._y_interp_vecs = {}
        self._std_interp_vecs = {}
        self._cov_interp_blocks = {}

        self._dy_dn = {}
        self._d2y_dn2 = {}
        self._dy_dk = {}
        self._d2y_dk2 = {}
        self._y_dict = {}

        d_dn = FinDiff(0, density, 1)
        d2_dn2 = FinDiff(0, density, 2, acc=2)
        d_dk = FinDiff(0, kf, 1)
        d2_dk2 = FinDiff(0, kf, 2, acc=2)

        self._cov_total_all_derivs = {}
        self._cov_total_blocks = {}
        self._std_total_vecs = {}

        # The priors on the interpolator parameters
        self.mean0 = 0
        self.cov0 = 0
        self._best_max_orders = {}
        self._start_poly_order = 2

        # from scipy.interpolate import splrep
        from scipy.interpolate import UnivariateSpline
        self.splines = {}

        self.coeff_kernel = gptools.SquaredExponentialKernel(
            initial_params=[std, ls], fixed_params=[True, True])
        for i, n in enumerate(orders):
            first_omitted = n + 1
            if first_omitted == 1:
                first_omitted += 1  # the Q^1 contribution is zero, so bump to Q^2
            _kern_lower = CustomKernel(
                ConvergenceKernel(breakdown=breakdown,
                                  ref=ref,
                                  lowest_order=0,
                                  highest_order=n,
                                  include_3bf=include_3bf))
            kern_interp = _kern_lower * self.coeff_kernel
            _kern_upper = CustomKernel(
                ConvergenceKernel(breakdown=breakdown,
                                  ref=ref,
                                  lowest_order=first_omitted,
                                  include_3bf=include_3bf))
            kern_trunc = _kern_upper * self.coeff_kernel

            # try:
            #     err_y_i = err_y[i]
            # except TypeError:
            #     err_y_i = err_y

            y_n = y[:, i]
            self._y_dict[n] = y_n

            # Interpolating processes
            # mu_n = gptools.ConstantMeanFunction(initial_params=[np.mean(y_n)])
            # mu_n = gptools.ConstantMeanFunction(initial_params=[np.max(y_n)+20])
            mu_n = gptools.ConstantMeanFunction(initial_params=[0])
            gp_interp = gptools.GaussianProcess(kern_interp, mu=mu_n)
            gp_interp.add_data(Kf, y_n, err_y=err_y)
            # gp_interp.optimize_hyperparameters(max_tries=10)  # For the mean
            self.gps_interp[n] = gp_interp

            # Finite difference:
            self._dy_dn[n] = d_dn(y_n)
            self._d2y_dn2[n] = d2_dn2(y_n)
            self._dy_dk[n] = d_dk(y_n)
            self._d2y_dk2[n] = d2_dk2(y_n)

            # Fractional interpolator polynomials
            self._best_max_orders[n] = self.compute_best_interpolator(
                density,
                y=y_n,
                start_order=self._start_poly_order,
                max_order=10)
            self.splines[n] = UnivariateSpline(density, y_n, s=np.max(err_y))
            if verbose:
                print(
                    f'For EFT order {n}, the best polynomial has max nu = {self._best_max_orders[n]}'
                )

            # Back to GPs:

            y_interp_all_derivs_n, cov_interp_all_derivs_n = predict_with_derivatives(
                gp=gp_interp, X=Kf_interp, n=derivs, return_cov=True)

            y_interp_vecs_n = get_means_map(y_interp_all_derivs_n, N_interp)
            cov_interp_blocks_n = get_blocks_map(cov_interp_all_derivs_n,
                                                 (N_interp, N_interp))
            std_interp_vecs_n = get_std_map(cov_interp_blocks_n)

            self._y_interp_all_derivs[n] = y_interp_all_derivs_n
            self._cov_interp_all_derivs[n] = cov_interp_all_derivs_n
            self._y_interp_vecs[n] = y_interp_vecs_n
            self._cov_interp_blocks[n] = cov_interp_blocks_n
            self._std_interp_vecs[n] = std_interp_vecs_n

            # Truncation Processes
            gp_trunc = gptools.GaussianProcess(kern_trunc)
            self.gps_trunc[n] = gp_trunc

            cov_trunc_all_derivs_n = predict_with_derivatives(gp=gp_trunc,
                                                              X=Kf_interp,
                                                              n=derivs,
                                                              only_cov=True)
            cov_total_all_derivs_n = cov_interp_all_derivs_n + cov_trunc_all_derivs_n

            cov_total_blocks_n = get_blocks_map(cov_total_all_derivs_n,
                                                (N_interp, N_interp))
            std_total_vecs_n = get_std_map(cov_total_blocks_n)

            self._cov_total_all_derivs[n] = cov_total_all_derivs_n
            self._cov_total_blocks[n] = cov_total_blocks_n
            self._std_total_vecs[n] = std_total_vecs_n