Ejemplo n.º 1
0
def logLFunc_gp(params, data, model):
    """
    Calculate the likelihood of data according to the model and its parameters.

    Parameters
    ----------
    params : list
        The variable parameter list of the model.
    data : DataSet
        The data need to fit.
    model : ModelCombiner
        The model to fit the data.

    Returns
    -------
    lnL : float
        The ln likelihood.

    Notes
    -----
    None.
    """
    #Get the data and error
    xSpc = np.array(data.get_csList("x"))
    yPht = np.array(data.get_dsList("y"))
    ySpc = np.array(data.get_csList("y"))
    ePht = np.array(data.get_dsList("e"))
    eSpc = np.array(data.get_csList("e"))
    #Calculate the model
    model.updateParList(params)
    yDict = Model2Data_gp(model, data)
    yPhtModel = np.array(yDict["pht"])
    ySpcModel = np.array(yDict["spc"])
    nParVary = len(model.get_parVaryList())
    #lnlikelihood for photometric data
    if len(yPhtModel):
        f = np.exp(params[nParVary]) #The parameter to control the model incompleteness
        nParVary += 1
        fltr_non = ePht < 0 #Find those non-detections
        fltr_det = ePht >= 0 #Find those detections
        flag = np.zeros_like(yPht)
        flag[fltr_non] = 1 #Generate the flag of upperlimits.
        ePht[fltr_non] = yPht[fltr_non] / 3.0 #In our data, the upperlimits are 3sigma.
        sPht = np.sqrt(ePht**2 + (yPhtModel * f)**2)
        lnlPht = -0.5 * ChiSq(yPht, yPhtModel, sPht, flag)
    else:
        f = 0
        lnlPht = 0
    #lnlikelihood for spectral data using Gaussian process regression
    if len(ySpcModel):
        a, tau = np.exp(params[nParVary:]) #The covariance for spectral residual
        gp = george.GP(a * kernels.Matern32Kernel(tau))
        sSpc = np.sqrt(eSpc**2 + (ySpcModel * f)**2)
        gp.compute(xSpc, sSpc)
        lnlSpc = gp.lnlikelihood(ySpc - ySpcModel)
    else:
        lnlSpc = 0
    lnL = lnlPht + lnlSpc
    #print lnL
    return lnL
Ejemplo n.º 2
0
def lnlike_gp(p, x, y, yerr):
    a, tau = numpy.exp(p[:2])
    gp = george.GP(a * kernels.Matern32Kernel(tau))
    gp.compute(x, yerr)
    diff = y - model(p[2:])
    print(" ".join([str(v) for v in p[2:]]))
    return gp.lnlikelihood(diff)
Ejemplo n.º 3
0
    def lnlike(self, p):
        """
        GP likelihood function for probability of data given the kernel parameters

        :return lnlike: likelihood of kernel amplitude and length-scale parameters
        """
        # Update the kernel and compute the lnlikelihood.
        a, tau = 10.0**p[0], 10.0**p[1:]

        lnlike = 0.0
        try:

            if self.kernel == 'sqexp':
                self.gaussproc = george.GP(
                    a * kernels.ExpSquaredKernel(tau, ndim=len(tau)))
            elif self.kernel == 'matern32':
                self.gaussproc = george.GP(
                    a * kernels.Matern32Kernel(tau, ndim=len(tau)))
            elif self.kernel == 'matern52':
                self.gaussproc = george.GP(
                    a * kernels.Matern52Kernel(tau, ndim=len(tau)))

            self.gaussproc.compute(self.x, self.yerr)

            lnlike = self.gaussproc.log_likelihood(self.y, quiet=True)

        except np.linalg.LinAlgError:

            lnlike = -np.inf

        return lnlike
Ejemplo n.º 4
0
def lnlike(x, time, flux, err, map):
    """Log-likelihood with gradients."""
    # Generate the light curve from the params
    amp, tau, alpha, beta, l0, per = x[:6]
    map[0, :] = np.ones_like(time)
    map[1:, :] = x[6:].reshape(-1, len(time))
    model, dmdp, dmdy = phase_curve(map, time, per)

    # Hard bounds
    if (amp < 0) or (tau < 0) or (alpha < 0) or (beta > 0) or (per < 0):
        return -np.inf
    elif (l0 < 0) or (l0 > map.lmax):
        return -np.inf
    else:
        ll = 0

    # Compute the GP prior
    for l in range(0, lmax + 1):
        kernel = power(l, amp=amp, alpha=alpha, beta=beta, l0=l0) ** 2 * \
                 kernels.Matern32Kernel(tau ** 2)
        gp = george.GP(kernel)
        gp.compute(time)
        for m in range(-l, l + 1):
            ll += gp.log_likelihood(y[l ** 2 + l + m, :])

    # Compute the likelihood
    ll += -0.5 * np.sum((model - flux) ** 2) / err ** 2

    return ll
Ejemplo n.º 5
0
def gp_interpolator(x, y, res=1000, Nparam=3, decouple_sfr=False):

    yerr = np.zeros_like(y)
    yerr[2:(2 + Nparam)] = 0.001 / np.sqrt(Nparam)
    if len(yerr) > 26:
        yerr[2:(2 + Nparam)] = 0.1 / np.sqrt(Nparam)
    if decouple_sfr == True:
        yerr[(2 + Nparam):] = 0.1
    #if decouple_sfr == True:
    #    yerr[-2:] = 0.1/np.sqrt(Nparam)
    #else:
    #    yerr[-2:] = 0.01/np.sqrt(Nparam)

    #kernel = np.var(yax) * kernels.ExpSquaredKernel(np.median(yax)+np.std(yax))
    #k2 = np.var(yax) * kernels.LinearKernel(np.median(yax),order=1)
    #kernel = np.var(y) * kernels.Matern32Kernel(np.median(y)) #+ k2
    kernel = np.var(y) * (kernels.Matern32Kernel(np.median(y)) +
                          kernels.LinearKernel(np.median(y), order=2))
    gp = george.GP(kernel, solver=george.HODLRSolver)

    #print(xax.shape, yerr.shape)
    gp.compute(x.ravel(), yerr.ravel())

    # optimize kernel parameters
    #     p0 = gp.get_parameter_vector()
    #     results = minimize(nll, p0, jac=grad_nll, method="L-BFGS-B", args = (gp, y))
    #     gp.set_parameter_vector(results.x)

    x_pred = np.linspace(np.amin(x), np.amax(x), res)
    y_pred, pred_var = gp.predict(y.ravel(), x_pred, return_var=True)

    return x_pred, y_pred
Ejemplo n.º 6
0
def run(true_params, t=None, y=None, yerr=None, seed=1):
    ''' params - alpha, loc and sigma^2 of the true model'''

    if ((t == None) | (y == None) | (yerr == None)):
        print("Not enough inputs. Simulating data ...")
        np.random.seed(seed)
        t, y, yerr = generate_data(true_params, 50)
        plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
        plt.ylabel(r"$y$")
        plt.xlabel(r"$t$")
        plt.xlim(-5, 5)
        plt.title("simulated data")

    data = (t, y, yerr)
    truth_gp = [0.0, 0.0] + true_params
    print("Fitting GP ...")
    sampler = fit_gp(truth_gp, data)

    # plot 5 fitted curves
    samples = sampler.flatchain
    x = np.linspace(-5, 5, 500)
    plt.figure()
    plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
    for s in samples[np.random.randint(len(samples), size=5)]:
        gp = george.GP(np.exp(s[0]) * kernels.Matern32Kernel(np.exp(s[1])))
        gp.compute(t, yerr)
        m = gp.sample_conditional(y - model(s[2:], t), x) + model(s[2:], x)
        plt.plot(x, m, color="#4682b4", alpha=0.3)
    plt.ylabel(r"$y$")
    plt.xlabel(r"$t$")
    plt.xlim(-5, 5)
    plt.title("results with Gaussian process noise model")
Ejemplo n.º 7
0
def logLFunc_gp(params, data, model):
    """
    Calculate the likelihood of data according to the model and its parameters.

    Parameters
    ----------
    params : list
        The variable parameter list of the model.
    data : DataSet
        The data need to fit.
    model : ModelCombiner
        The model to fit the data.

    Returns
    -------
    lnL : float
        The ln likelihood.

    Notes
    -----
    None.
    """
    #Get the data and error
    xSpc = np.array(data.get_csList("x"))
    yPht = np.array(data.get_dsList("y"))
    ySpc = np.array(data.get_csList("y"))
    ePht = np.array(data.get_dsList("e"))
    eSpc = np.array(data.get_csList("e"))
    fPht = np.array(data.get_dsList("f"))
    #Calculate the model
    model.updateParList(params)
    yDict = Model2Data_gp(model, data)
    yPhtModel = np.array(yDict["pht"])
    ySpcModel = np.array(yDict["spc"])
    nParVary = len(model.get_parVaryList())
    #lnlikelihood for photometric data
    if len(yPhtModel):
        f = np.exp(params[nParVary]
                   )  #The parameter to control the model incompleteness
        nParVary += 1
        sPht = np.sqrt(ePht**2 + (yPhtModel * f)**2)
        lnlPht = -0.5 * ChiSq(yPht, yPhtModel, sPht, fPht)
    else:
        f = 0
        lnlPht = 0
    #lnlikelihood for spectral data using Gaussian process regression
    if len(ySpcModel):
        a, tau = np.exp(
            params[nParVary:])  #The covariance for spectral residual
        a = a * data.spc_FluxMedian  #Make "a" a relative value
        tau = tau * data.spc_WaveLength  #Make "tau" a relative value
        gp = george.GP(a * kernels.Matern32Kernel(tau))
        sSpc = np.sqrt(eSpc**2 + (ySpcModel * f)**2)
        gp.compute(xSpc, sSpc)
        lnlSpc = gp.lnlikelihood(ySpc - ySpcModel)
    else:
        lnlSpc = 0
    lnL = lnlPht + lnlSpc
    return lnL
Ejemplo n.º 8
0
 def create_kernel(self):
     # This function creates the covariance function kernel for the Gaussian Process
     if self.kern == 'SE':
         return self.sigma_f * kernels.ExpSquaredKernel(self.l_param, ndim=self.n_dim)
     elif self.kern == 'M32':
         return self.sigma_f * kernels.Matern32Kernel(self.l_param, ndim=self.n_dim)
     elif self.kern == 'M52':
         return self.sigma_f * kernels.Matern52Kernel(self.l_param, ndim=self.n_dim)
Ejemplo n.º 9
0
 def create_kernel(self):
     if self.kern == 'SE':
         return self.sigma_f * kernels.ExpSquaredKernel(self.l_param,
                                                        ndim=self.n_dim)
     elif self.kern == 'M32':
         return self.sigma_f * kernels.Matern32Kernel(self.l_param,
                                                      ndim=self.n_dim)
     elif self.kern == 'M52':
         return self.sigma_f * kernels.Matern52Kernel(self.l_param,
                                                      ndim=self.n_dim)
def lnlike(theta, m, d, w, sigma, t):
    print theta
    m.set_all(*theta[2:])
    a, tau = np.exp(theta[:2])
    gp = george.GP(a * kernels.Matern32Kernel(tau, ndim=2))
    #gp.compute(t, 1/np.sqrt(w))  # doesn't work.
    _m = m.broaden(sigma).flatten()
    #_m = m.source.flatten()
    gp.compute(t, _m / _m.sum() / np.sqrt(w))
    return gp.lnlikelihood(d.flatten() - _m)
Ejemplo n.º 11
0
def fit_gp(sndata):
    bands = [band_to_wave[elem] for elem in sndata['passband']]

    mjdall = sndata['mjd']
    fluxall = sndata['flux']
    flux_errall = sndata['flux_err']

    #Want to compute the scale factor that we will use...
    signal_to_noises = np.abs(fluxall) / np.sqrt(flux_errall**2 +
                                                 (1e-2 * np.max(fluxall))**2)
    scale = np.abs(fluxall[signal_to_noises == np.max(signal_to_noises)])
    if len(scale) < 1:
        return None
    elif len(scale) > 1:
        scale = scale.to_numpy()[0]
    #print(scale)
    kernel = (0.5 * int(scale))**2 * kernels.Matern32Kernel(
        [length_scale**2, 6000**2], ndim=2)

    gp = george.GP(kernel)
    guess_parameters = gp.get_parameter_vector()

    x_data = np.vstack([mjdall, bands]).T
    gp.compute(x_data, flux_errall)

    def neg_ln_like(p):
        gp.set_parameter_vector(p)
        return -gp.log_likelihood(fluxall)

    def grad_neg_ln_like(p):
        gp.set_parameter_vector(p)
        return -gp.grad_log_likelihood(fluxall)

    bounds = [(0, np.log(1000**2))]
    bounds = [(guess_parameters[0] - 10, guess_parameters[0] + 10)
              ] + bounds + [(None, None)]
    # check if result with/without bounds are the same

    try:
        fit_result = minimize(neg_ln_like,
                              gp.get_parameter_vector(),
                              jac=grad_neg_ln_like,
                              bounds=bounds)
        gp.set_parameter_vector(fit_result.x)
        gaussian_process = partial(gp.predict, fluxall)
    except ValueError:
        return None
    except np.linalg.LinAlgError:
        return None
    except TypeError:
        return None

    return gaussian_process
Ejemplo n.º 12
0
    def __init__(self, *args, **kwargs):
        kwargs["median"] = False
        super(GPModel, self).__init__(*args, **kwargs)

        # Normalize the fluxes.
        mu = np.median(self.f)
        self.f /= mu
        self.fe /= mu

        # Set up the GP model.
        self.kernel = 1e-6 * kernels.Matern32Kernel(3.0)
        self.gp = george.GP(self.kernel, mean=_mean_function(self.system))
        self.gp.compute(self.t, self.fe)
Ejemplo n.º 13
0
def variable_map(lmax=10, amp=0.1, tau=0.025, npts=300,
                 alpha=2.0, beta=-2.0, l0=2):
    """Generate variability using a GP."""
    time = np.linspace(0.0, 1.0, npts)
    y = np.zeros(((lmax + 1) ** 2, npts))
    y[0, :] = 1.0
    n = 1
    for l in range(1, lmax + 1):
        if hasattr(tau, "__len__"):
            kernel = (power(l, amp=amp, alpha=alpha, beta=beta, l0=l0)) ** 2 * \
                     kernels.Matern32Kernel([tau[0] ** 2,
                        (tau[1] * (2 * l + 1)) ** 2], ndim=2)
            x = np.array([[ti, m] for ti in time for m in range(-l, l + 1)])
            gp = george.GP(kernel)
            sample = gp.sample(x).reshape(-1, 2 * l + 1).transpose()
        else:
            kernel = (power(l, amp=amp, alpha=alpha, beta=beta, l0=l0)) ** 2 * \
                     kernels.Matern32Kernel(tau ** 2)
            gp = george.GP(kernel)
            sample = np.array([gp.sample(time) for m in range(-l, l + 1)])
        y[n:n + 2 * l + 1, :] += sample
        n += 2 * l + 1
    return y
Ejemplo n.º 14
0
def call_gp(params):
    log_sigma, log_rho, log_error_scale = params
    if GP_CODE=='celerite':
        kernel = terms.Matern32Term(log_sigma=log_sigma, log_rho=log_rho)
        gp = celerite.GP(kernel, mean=MEAN, fit_mean=False) #log_white_noise=np.log(yerr), 
        gp.compute(xx, yerr=yyerr/err_norm*np.exp(log_error_scale))
        return gp
    elif GP_CODE=='george':
        kernel = np.exp(log_sigma) * kernels.Matern32Kernel(log_rho)
        gp = george.GP(kernel, mean=MEAN, fit_mean=False) #log_white_noise=np.log(yerr), 
        gp.compute(xx, yerr=yyerr/err_norm*np.exp(log_error_scale))
        return gp
    else:
        raise ValueError('A bad thing happened.')
Ejemplo n.º 15
0
def build_gp(guess_length_scale, sn_data, bands):
    """This is  all  taken from Avacado -
    see https://github.com/kboone/avocado/blob/master/avocado/astronomical_object.py
    In this a 2D matern kernal is used  to  model the transient. The kernel
    width in the wavelength direction is fixed. We fit for the kernel width
    in the time direction"""

    mjdall = sn_data['mjd']
    fluxall = sn_data['flux']
    flux_errall = sn_data['flux_err']

    #Want to compute the scale factor that we will use...
    signal_to_noises = np.abs(fluxall) / np.sqrt(flux_errall**2 +
                                                 (1e-2 * np.max(fluxall))**2)
    scale = np.abs(fluxall[np.argmax(signal_to_noises)])

    kernel = (0.5 * scale)**2 * kernels.Matern32Kernel(
        [guess_length_scale**2, 6000**2], ndim=2)

    gp = george.GP(kernel)
    guess_parameters = gp.get_parameter_vector()

    x_data = np.vstack([mjdall, bands]).T
    gp.compute(x_data, flux_errall)

    def neg_ln_like(p):
        gp.set_parameter_vector(p)
        return -gp.log_likelihood(fluxall)

    def grad_neg_ln_like(p):
        gp.set_parameter_vector(p)
        return -gp.grad_log_likelihood(fluxall)

    bounds = [(0, np.log(1000**2))]
    bounds = [(guess_parameters[0] - 10, guess_parameters[0] + 10)
              ] + bounds + [(None, None)]
    # check if result with/without bounds are the same

    try:
        fit_result = minimize(neg_ln_like,
                              gp.get_parameter_vector(),
                              jac=grad_neg_ln_like,
                              bounds=bounds)
        gp.set_parameter_vector(fit_result.x)
        gaussian_process = partial(gp.predict, fluxall)
    except ValueError:
        return None

    return gaussian_process
Ejemplo n.º 16
0
def generate_data(kernel, N=50, rng=(-5, 5)):
    var = np.random.randn() * 0.5
    if kernel == 'SE':
        gp = george.GP(0.1 * kernels.ExpSquaredKernel(5 + var))
    elif kernel == 'M32':
        gp = george.GP(0.1 * kernels.Matern32Kernel(5 + var))
    elif kernel == 'PER':
        gp = george.GP(0.1 * kernels.CosineKernel(log_period=0.25 + var / 8))
    elif kernel == 'LIN':
        gp = george.GP(0.1 *
                       kernels.LinearKernel(order=1, log_gamma2=1.25 + var))
    else:
        gp = None
    x = rng[0] + np.diff(rng) * np.sort(np.random.rand(N))
    y = gp.sample(x)
    return x, y
Ejemplo n.º 17
0
def call_gp(params):
    log_sigma, log_rho, log_error_scale, contrast, rv, fwhm = params
    if GP_CODE=='celerite':
        mean_model = Model_celerite(**dict(**dict(Contrast=contrast, RV=rv, FWHM=fwhm)))
        kernel = terms.Matern32Term(log_sigma=log_sigma, log_rho=log_rho)
        gp = celerite.GP(kernel, mean=mean_model) 
        gp.compute(xx, yerr=yyerr/err_norm*np.exp(log_error_scale))
        return gp
    elif GP_CODE=='george':
        mean_model = Model_george(**dict(**dict(Contrast=contrast, RV=rv, FWHM=fwhm)))
        kernel = np.exp(log_sigma) * kernels.Matern32Kernel(log_rho)
        gp = george.GP(kernel, mean=mean_model)
        gp.compute(xx, yerr=yyerr/err_norm*np.exp(log_error_scale))
        return gp
    else:
        raise ValueError('gp_code must be "celerite" or "george".')
Ejemplo n.º 18
0
def lnlikeGP(theta, x, y):
    am, tau = np.exp(theta[:2])
    c, rp, a, inc, sigma = theta[2:7]
    alpha = theta[7:]
    gp = george.GP(am * kernels.Matern32Kernel(tau))
    gp.compute(x, sigma)
    params.rp = rp
    params.a = a
    params.inc = inc
    m = batman.TransitModel(params, x / 24.)
    pca_signals = np.sum(
        [alpha[i] * results.Y[:, i] for i in range(np.shape(alpha)[0])],
        axis=0)
    model = c + np.log(m.light_curve(params) + 1e-8) + pca_signals
    inv_sigma2 = 1 / (sigma**2)
    # return -0.5 * (np.sum((y - model)**2) / (sigma**2) - (n_data * 0.5) * np.log(2 * np.pi * sigma**2))
    return gp.lnlikelihood(y - model_outGP(theta, x))
Ejemplo n.º 19
0
    def get_kernel(self, p0):
        if george.__version__ == '0.3.1':
            p0 = np.exp(p0)
        k1 = kernels.ExpSquaredKernel(p0, ndim=len(p0))
        k2 = kernels.Matern32Kernel(p0, ndim=len(p0))
        k3 = kernels.ConstantKernel(0.1, ndim=len(p0))
        #k4 = kernels.WhiteKernel(0.1, ndim=len(p0))
        k5 = kernels.ConstantKernel(0.1, ndim=len(p0))

        kernel_dict = {
            'M32ExpConst': k1 * k5 + k2,
            'M32ExpConst2': k1 * k5 + k2 + k3,
            'M32Const': k2 + k5
        }
        assert self.kernel_name in kernel_dict, f"{self.kernel_name} not in dict!"
        kernel = kernel_dict[self.kernel_name]
        return kernel
Ejemplo n.º 20
0
 def kernel_gp(self, kernelname = 'expsquared'):
     """ Kernel defininition
     Standard kernels provided with george, evtl replaced with manual customizable kernels
     User can change function to add new kernels or comninations thereof as much as required 
     :param kernelname: name of kernel, either 'expsquared', 'matern32', or 'rationalq'
     """
     # Kernel for spatial 2D Gaussian Process. Default Exponential Squared Kernel. Change accordingly below.
     # Kernels are initialised with weight and length (1 by default)
     if kernelname == 'expsquared':
         k0 = 1. * kernels.ExpSquaredKernel(1., ndim = 2)  # + kernels.WhiteKernel(1.,ndim=2)
     # other possible kernels as well as combinations:
     if kernelname == 'matern32':
         k0 = 1. * kernels.Matern32Kernel(1., ndim = 2)
     # k2 = 2.4 ** 2 * kernels.ExpSquaredKernel(90 ** 2) * kernels.ExpSine2Kernel(2.0 / 1.3 ** 2, 1.0)
     if kernelname == 'rationalq':
         k0 =  1. * kernels.RationalQuadraticKernel(0.78, 1.2, ndim=2)
     # k4 = kernels.WhiteKernel(1., ndim=2)
     return k0 # + k1 + k2 + ...
Ejemplo n.º 21
0
def get_kernel(scale, fix_scale, length_scale):
    """Return a Matern 3/2 Kernel

    Args:
        scale        (float): Initial estimate of the scale
        fix_scale     (bool): Fix the scale to the initial estimate
        length_scale (float): Initial kernel length scale in days

    Returns
        A Matern32Kernel object
    """

    kernel = ((0.5 * scale)**2 *
              kernels.Matern32Kernel([length_scale**2, 6000**2], ndim=2))
    kernel.freeze_parameter('k2:metric:log_M_1_1')

    if fix_scale:
        kernel.freeze_parameter('k1:log_constant')

    return kernel
Ejemplo n.º 22
0
def gp_interpolator(x,y,res = 1000, Nparam = 3):
    
    yerr = np.zeros_like(y)
    yerr[2:(2+Nparam)] = 0.001/np.sqrt(Nparam)
    if len(yerr) > 26:
        yerr[2:(2+Nparam)] = 0.1/np.sqrt(Nparam)

    #kernel = np.var(yax) * kernels.ExpSquaredKernel(np.median(yax)+np.std(yax))
    #k2 = np.var(yax) * kernels.LinearKernel(np.median(yax),order=1)
    #kernel = np.var(y) * kernels.Matern32Kernel(np.median(y)) #+ k2
    kernel = np.var(y) * (kernels.Matern32Kernel(np.median(y)) + kernels.LinearKernel(np.median(y), order=2))
    gp = george.GP(kernel)

    #print(xax.shape, yerr.shape)

    gp.compute(x.ravel(), yerr.ravel())

    x_pred = np.linspace(np.amin(x), np.amax(x), res)
    y_pred, pred_var = gp.predict(y.ravel(), x_pred, return_var=True)
    return x_pred, y_pred
Ejemplo n.º 23
0
def get_kernel(architecture, kernel_name, domain_name_lst, n_dims):

    if architecture == "trans":
        mapping = trans_domain_kernel_mapping
    else:
        mapping = None

    kernel = None
    initial_ls = np.ones([n_dims])

    if kernel_name == "constant":
        kernel = kernels.ConstantKernel(1, ndim=n_dims)
    elif kernel_name == "polynomial":
        kernel = kernels.PolynomialKernel(log_sigma2=1, order=3, ndim=n_dims)
    elif kernel_name == "linear":
        kernel = kernels.LinearKernel(log_gamma2=1, order=3, ndim=n_dims)
    elif kernel_name == "dotproduct":
        kernel = kernels.DotProductKernel(ndim=n_dims)
    elif kernel_name == "exp":
        kernel = kernels.ExpKernel(initial_ls, ndim=n_dims)
    elif kernel_name == "expsquared":
        kernel = kernels.ExpSquaredKernel(initial_ls, ndim=n_dims)
    elif kernel_name == "matern32":
        kernel = kernels.Matern32Kernel(initial_ls, ndim=n_dims)
    elif kernel_name == "matern52":
        kernel = kernels.Matern52Kernel(initial_ls, ndim=n_dims)
    elif kernel_name == "rationalquadratic":
        kernel = kernels.RationalQuadraticKernel(log_alpha=1,
                                                 metric=initial_ls,
                                                 ndim=n_dims)
    elif kernel_name == "expsine2":
        kernel = kernels.ExpSine2Kernel(1, 2, ndim=n_dims)
    elif kernel_name == "heuristic":
        kernel = mapping[domain_name_lst[0]](ndim=n_dims, axes=0)
        for i in range(len(domain_name_lst[1:])):
            d = domain_name_lst[1:][i]
            kernel += mapping[d](ndim=n_dims, axes=i)
    elif kernel_name == "logsquared":
        kernel = kernels.LogSquaredKernel(initial_ls, ndim=n_dims)

    return kernel
Ejemplo n.º 24
0
 def init_gps(self):
     self.gps = []
     for i in range(self.nparams):
         if self.k_type[i] == 0:
             kernel = kernels.ConstantKernel(
                 self.logyvars[i], ndim=6) * kernels.ExpSquaredKernel(
                     metric=np.eye(6), ndim=6)
         elif self.k_type[i] == 1:
             kernel = kernels.ConstantKernel(self.logyvars[i],
                                             ndim=6) * kernels.ExpKernel(
                                                 metric=np.eye(6), ndim=6)
         elif self.k_type[i] == 2:
             kernel = kernels.ConstantKernel(
                 self.logyvars[i], ndim=6) * kernels.Matern32Kernel(
                     metric=np.eye(6), ndim=6)
         elif self.k_type[i] == 3:
             kernel = kernels.ConstantKernel(
                 self.logyvars[i], ndim=6) * kernels.Matern52Kernel(
                     metric=np.eye(6), ndim=6)
         tmp = copy.copy(george.GP(kernel))
         tmp.compute(self.xall, self.yerr[:, i])
         self.gps.append(tmp)
Ejemplo n.º 25
0
    def get_kernel(self, kernel_name, i):
        """get individual kernels"""
        metric = (1. / np.exp(self.coeffs[kernel_name]))**2
        if self.gp_code_name == 'george':
            if kernel_name == 'Matern32':
                kernel = kernels.Matern32Kernel(metric,
                                                ndim=self.nkernels,
                                                axes=i)
            if kernel_name == 'ExpSquared':
                kernel = kernels.ExpSquaredKernel(metric,
                                                  ndim=self.nkernels,
                                                  axes=i)
            if kernel_name == 'RationalQuadratic':
                kernel = kernels.RationalQuadraticKernel(log_alpha=1,
                                                         metric=metric,
                                                         ndim=self.nkernels,
                                                         axes=i)
            if kernel_name == 'Exp':
                kernel = kernels.ExpKernel(metric, ndim=self.nkernels, axes=i)

        if self.gp_code_name == 'tinygp':
            if kernel_name == 'Matern32':
                kernel = tinygp.kernels.Matern32(metric,
                                                 ndim=self.nkernels,
                                                 axes=i)
            if kernel_name == 'ExpSquared':
                kernel = tinygp.kernels.ExpSquared(metric,
                                                   ndim=self.nkernels,
                                                   axes=i)
            if kernel_name == 'RationalQuadratic':
                kernel = tinygp.kernels.RationalQuadratic(alpha=1,
                                                          scale=metric,
                                                          ndim=self.nkernels,
                                                          axes=i)
            if kernel_name == 'Exp':
                kernel = tinygp.kernels.Exp(metric, ndim=self.nkernels, axes=i)

        return kernel
Ejemplo n.º 26
0
    def get_gp(self,Kernel="Exp",amplitude=1e-3,metric=10.,gamma=10.,period=10.):
        """
        Citlalicue uses the kernels provided by george,
        now the options are "Exp", "Matern32", "Matern52", and Quasi-Periodic "QP"
        User can modify hyper parameters amplitude, metric, gamma, period.
        """
        import george
        from george import kernels
        if Kernel == "Matern32":
            kernel = amplitude * kernels.Matern32Kernel(metric)
        elif Kernel == "Matern52":
            kernel = amplitude * kernels.Matern52Kernel(metric)
        elif Kernel == "Exp":
            kernel = amplitude * kernels.ExpKernel(metric)
        elif Kernel == "QP":
            log_period = np.log(period)
            kernel = amplitude * kernels.ExpKernel(metric)*kernels.ExpSine2Kernel(gamma,log_period)

        #Save the kernel name as an attribute
        self.kernel = kernel
        #Compute the kernel with George
        self.gp = george.GP(self.kernel,mean=1)
        #We compute the covariance matrix using the binned data
        self.gp.compute(self.time_bin, self.ferr_bin)
Ejemplo n.º 27
0
    def __init__(self, x, y, p0, pmin, pmax, kernel='sqexp', yerr=None):
        """
        :param x: simulation coordinates [in unit hypercube space]
        :param y: data values
        :param p0: initial values of kernel paramters
        :param pmin: lower sampling boundaries of kernel parameters
        :param pmax: upper sampling boundaries of kernel parameters
        :param kernel: string denoting kernel function type [default = sqexp]
        :param yerr: uncertainties on data values
        :return gaussproc: instance of a George GP object
        """
        self.x = x  # simulation coordinates
        self.y = y  # data values
        self.yerr = yerr  # uncertainties
        self.kernel = kernel  # kernel-type: other choices are matern32, matern52
        self.gaussproc = None

        self.p0 = p0  # initial value for kernel parameters
        a, tau = 10.0**self.p0[0], 10.0**self.p0[1:]
        if self.kernel == 'sqexp':
            self.gaussproc = george.GP(
                a * kernels.ExpSquaredKernel(tau, ndim=len(tau)))
        elif self.kernel == 'matern32':
            self.gaussproc = george.GP(a * kernels.Matern32Kernel(tau),
                                       ndim=len(tau))
        elif self.kernel == 'matern52':
            self.gaussproc = george.GP(a * kernels.Matern52Kernel(tau),
                                       ndim=len(tau))

        self.gaussproc.compute(self.x, self.yerr)

        self.pmax = pmax  # sampling max
        self.pmin = pmin  # sampling min
        self.emcee_flatchain = None
        self.emcee_flatlnprob = None
        self.emcee_kernel_map = None
Ejemplo n.º 28
0
def lnlike_gp(p, t, y, yerr):
    a, tau = np.exp(p[:2])
    gp = george.GP(a * kernels.Matern32Kernel(tau))
    gp.compute(t, yerr)
    return gp.lnlikelihood(y - model(p[2:], t))
Ejemplo n.º 29
0
    labels = [r"$\alpha$", r"$\ell$", r"$\sigma^2$"]
    fig = triangle.corner(samples[:, 2:], truths=truth, labels=labels)
    fig.savefig("../_static/model/ind-corner.png", dpi=150)

    # Fit assuming GP.
    print("Fitting GP")
    data = (t, y, yerr)
    truth_gp = [0.0, 0.0] + truth
    sampler = fit_gp(truth_gp, data)

    # Plot the samples in data space.
    print("Making plots")
    samples = sampler.flatchain
    x = np.linspace(-5, 5, 500)
    pl.figure()
    pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
    for s in samples[np.random.randint(len(samples), size=24)]:
        gp = george.GP(np.exp(s[0]) * kernels.Matern32Kernel(np.exp(s[1])))
        gp.compute(t, yerr)
        m = gp.sample_conditional(y - model(s[2:], t), x) + model(s[2:], x)
        pl.plot(x, m, color="#4682b4", alpha=0.3)
    pl.ylabel(r"$y$")
    pl.xlabel(r"$t$")
    pl.xlim(-5, 5)
    pl.title("results with Gaussian process noise model")
    pl.savefig("../_static/model/gp-results.png", dpi=150)

    # Make the corner plot.
    fig = triangle.corner(samples[:, 2:], truths=truth, labels=labels)
    fig.savefig("../_static/model/gp-corner.png", dpi=150)
Ejemplo n.º 30
0
# def lnprob(p):
#     model.set_parameter_vector(p)
#     return model.log_likelihood(y, quiet=True) + model.log_prior()       

#==============================================================================
# GP
#==============================================================================
from george import kernels

truth = dict(P1=8., tau1=1., k1=np.std(y)/100, w1=0., e1=0.4, 
                   P2=100, tau2=1., k2=np.std(y)/100, w2=0., e2=0.4, offset1=0., offset2=0.)
kwargs = dict(**truth)
kwargs["bounds"] = dict(P1=(7,9), k1=(0,0.1), w1=(-2*np.pi,2*np.pi), e1=(0,0.8), 
					tau2=(-50,50), k2=(0,0.2), w2=(-2*np.pi,2*np.pi), e2=(0,0.8))
mean_model = Model(**kwargs)
gp = george.GP(np.var(y) * kernels.Matern32Kernel(10.0), mean=mean_model)
gp.compute(t, yerr)

def lnprob2(p):
    gp.set_parameter_vector(p)
    return gp.log_likelihood(y, quiet=True) + gp.log_prior()           

#==============================================================================
# MCMC
#==============================================================================
import emcee

initial = gp.get_parameter_vector()
ndim, nwalkers = len(initial), 32
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob2, threads=14)