예제 #1
0
    def predict(self, xpred):
        """ Realize the GP using the current values of the hyperparameters at values x=xpred.
            Used for making GP plots. Wrapper around `celerite.GP.predict()`.

            Args:
                xpred (np.array): numpy array of x values for realizing the GP
            Returns:
                tuple: tuple containing:
                    np.array: numpy array of predictive means \n
                    np.array: numpy array of predictive standard deviations
        """

        self.update_kernel_params()

        # build celerite kernel with current values of hparams
        kernel = celerite.terms.JitterTerm(
            log_sigma=np.log(self.params[self.jit_param].value))
        for i in np.arange(self.kernel.num_terms):
            kernel = kernel + celerite.terms.ComplexTerm(
                log_a=self.kernel.hparams[i, 0],
                log_b=self.kernel.hparams[i, 1],
                log_c=self.kernel.hparams[i, 2],
                log_d=self.kernel.hparams[i, 3])
        gp = celerite.GP(kernel)
        gp.compute(self.x, self.yerr)
        mu, var = gp.predict(self.y - self.params[self.gamma_param].value,
                             xpred,
                             return_var=True)

        stdev = np.sqrt(var)

        return mu, stdev
예제 #2
0
def GPfit(times, fluxes, errors, nonmask_idxs):
	#t, y, yerr = times[np.isfinite(times)], fluxes[np.isfinite(fluxes)], errors[np.isfinite(errors)]
	t, y, yerr = times[nonmask_idxs], fluxes[nonmask_idxs], errors[nonmask_idxs]
	t, y, yerr = t[np.isfinite(t)], y[np.isfinite(y)], yerr[np.isfinite(yerr)]

	Q = 1.0
	w0 = 3.0
	S0 = np.var(y) / (w0 * Q)
	bounds = dict(log_S0=(-15, 15), log_Q=(-15, 15), log_omega0=(-15, 15))
	kernel = terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0), bounds=bounds)
	#kernel += terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0) bounds=bounds)


	gp = celerite.GP(kernel, mean=np.mean(y))
	gp.compute(t, yerr)  # You always need to call compute once.
	print("Initial log likelihood: {0}".format(gp.log_likelihood(y)))

	initial_params = gp.get_parameter_vector()
	bounds = gp.get_parameter_bounds()

	r = minimize(neg_log_like, initial_params, method="L-BFGS-B", bounds=bounds, args=(y, gp))
	gp.set_parameter_vector(r.x)
	print(r)

	#interpolated_times = np.linspace(np.nanmin(t), np.nanmax(t), 1000)
	#interpolated_epochs = np.linspace(np.nanmin(t), np.nanmax(t), 1000)
	#pred_mean, pred_var = gp.predict(y, interpolated_times, return_var=True)
	pred_mean, pred_var = gp.predict(y, times, return_var=True)
	pred_std = np.sqrt(pred_var)

	return pred_mean, pred_std ### should be the same dimension as times.
예제 #3
0
    def _lnlikelihood(self, theta, resample=False):
        """Log-likelihood function for the MCMC."""

        # Unpack the free parameters.

        vrot, noise, lnsigma, lnrho = theta

        # Deproject the data and resample if requested.

        if resample:
            x, y = self.deprojected_spectrum(vrot)
        else:
            x, y = self.deprojected_spectra(vrot)

        # Remove pesky points. This is not necessary but speeds it up
        # and is useful to remove regions where there is a low
        # sampling of points.

        mask = np.logical_and(x > self.velax_mask[0], x < self.velax_mask[1])
        x, y = x[mask], y[mask]

        # Build the GP model.

        k_noise = celerite.terms.JitterTerm(log_sigma=np.log(noise))
        k_line = celerite.terms.Matern32Term(log_sigma=lnsigma, log_rho=lnrho)
        gp = celerite.GP(k_noise + k_line, mean=np.nanmean(y), fit_mean=True)

        # Calculate and the log-likelihood.

        try:
            gp.compute(x)
        except Exception:
            return -np.inf
        ll = gp.log_likelihood(y, quiet=True)
        return ll if np.isfinite(ll) else -np.inf
예제 #4
0
def baseline_hybrid_GP(*args):
    x, y, yerr_w, xx, params, inst, key = args

    kernel = terms.Matern32Term(log_sigma=1., log_rho=1.)
    gp = celerite.GP(kernel, mean=np.nanmean(y))
    gp.compute(x, yerr=yerr_w)  #constrain on x/y/yerr

    def neg_log_like(gp_params, y, gp):
        gp.set_parameter_vector(gp_params)
        return -gp.log_likelihood(y)

    def grad_neg_log_like(gp_params, y, gp):
        gp.set_parameter_vector(gp_params)
        return -gp.grad_log_likelihood(y)[1]

    initial_params = gp.get_parameter_vector()
    bounds = gp.get_parameter_bounds()
    soln = minimize(neg_log_like,
                    initial_params,
                    jac=grad_neg_log_like,
                    method="L-BFGS-B",
                    bounds=bounds,
                    args=(y, gp))
    gp.set_parameter_vector(soln.x)

    baseline = gp.predict(y, xx)[0]  #constrain on x/y/yerr, evaluate on xx (!)
    return baseline
예제 #5
0
def gp_sho(idx, data, resid, params):
    logQ, logw, logS, log_jit = params
    err = data.err / data.flux
    #logerr = np.log(np.median(err))
    logerr = log_jit

    #print "logQ, logw, logS", logQ, logw, logS

    mean_resid = np.mean(resid)
    #resid -= mean_resid

    t = data.t_vis[idx]

    kernel = (terms.SHOTerm(log_S0=logS, log_Q=logQ, log_omega0=logw) +
              terms.JitterTerm(log_sigma=logerr))

    gp = celerite.GP(kernel, fit_mean=True)
    gp.compute(t, err, check_sorted=False)  #t must be ascending!

    mu = gp.predict(resid, t, return_cov=False)
    gp_lnlike = gp.log_likelihood(resid)
    """plt.errorbar(t, resid, err, fmt = '.k')
    plt.plot(t, mu)

    x = np.linspace(np.min(t), np.max(t), 1000)
    pred_mean, pred_var = gp.predict(resid, x, return_var = True)
    pred_std = np.sqrt(pred_var)
    plt.fill_between(x, pred_mean+pred_std, pred_mean-pred_std, color='blue', alpha=0.3)
    
    plt.show()"""

    #np.save("resid", [t,  resid, err])

    #return [1.0 + np.array(mu) + mean_resid, gp_lnlike]
    return [1.0 + np.array(mu), gp_lnlike]
예제 #6
0
    def compute_gp(self, params, white=0, return_kernel=True):

        kernel = self._compute_modes_kernel(params, white=white)
        gp = celerite.GP(kernel)
        if return_kernel:
            return kernel, gp
        return gp
예제 #7
0
def sim_lc(t, ppm, per_aper_ratio, periodic_freq, aperiodic_freq, A, planet=False, mean=1.0, planet_params=[0, 0, 0, 0, 0, 0, 0, 0, 0]):

    if not planet:
        mean=mean
    if planet:
        mean=transit_model(*planet_params, t)

    aperiodic_sigma = 1/per_aper_ratio
    periodic_sigma = 1
    white_noise_sigma = ppm/1e6

    # set up a gaussian process with two components and white noise

    # non-periodic component
    Q = 1. / np.sqrt(2.0)  # related to the frequency of the variability
    w0 = 2*np.pi*aperiodic_freq
    S0 = (aperiodic_sigma**2.) / (w0 * Q)
    bounds = dict(log_S0=(-15, 15), log_Q=(-15, 15), log_omega0=(-15, 15))
    kernel = terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0), bounds=bounds)

    # periodic component
    Q = 1.0
    w0 = 2*np.pi*periodic_freq
    S0 = (periodic_sigma**2.) / (w0 * Q)
    kernel += terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0), bounds=bounds)

    # white noise
    # kernel += terms.JitterTerm(log_sigma=np.log(white_noise_sigma), bounds=dict(log_sigma=(-15,15)))

    gp = celerite.GP(kernel, mean=0, fit_mean=True, fit_white_noise=True)
    gp.compute(t, white_noise_sigma)

    return A * gp.sample() + mean + white_noise_sigma * np.random.randn(len(t))
예제 #8
0
def test_gp(celerite_kernel, seed=1234):
    import celerite
    import celerite.terms as cterms  # NOQA

    celerite_kernel = eval(celerite_kernel)
    np.random.seed(seed)
    x = np.sort(np.random.rand(100))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)
    diag = yerr**2

    celerite_gp = celerite.GP(celerite_kernel)
    celerite_gp.compute(x, yerr)
    celerite_loglike = celerite_gp.log_likelihood(y)
    celerite_mu, celerite_cov = celerite_gp.predict(y)
    _, celerite_var = celerite_gp.predict(y, return_cov=False, return_var=True)

    kernel = _get_theano_kernel(celerite_kernel)
    gp = GP(kernel, x, diag)
    loglike = gp.log_likelihood(y).eval()

    assert np.allclose(loglike, celerite_loglike)

    mu = gp.predict()
    _, var = gp.predict(return_var=True)
    _, cov = gp.predict(return_cov=True)
    assert np.allclose(mu.eval(), celerite_mu)
    assert np.allclose(var.eval(), celerite_var)
    assert np.allclose(cov.eval(), celerite_cov)
예제 #9
0
def generate_solar_fluxes(duration, cadence=60 * u.s, seed=None):
    """
    Generate an array of fluxes with zero mean which mimic the power spectrum of
    the SOHO/VIRGO SPM observations.
    
    Parameters
    ----------
    duration : `~astropy.units.Quantity`
        Duration of simulated observations to generate.
    cadence : `~astropy.units.Quantity`
        Length of time between fluxes
    seed : float, optional
        Random seed.
    
    Returns
    -------
    times : `~astropy.units.Quantity`
        Array of times at cadence ``cadence`` of length ``duration/cadence``
    fluxes : `~numpy.ndarray`
        Array of fluxes at cadence ``cadence`` of length ``duration/cadence``
    kernel : `~celerite.terms.TermSum`
        Celerite kernel used to approximate the solar power spectrum.
    """
    if seed is not None:
        np.random.seed(seed)

    _process_inputs(duration, cadence, 5777 * u.K)

    ##########################
    # Assemble celerite kernel
    ##########################
    parameter_vector = np.copy(PARAM_VECTOR)

    nterms = len(parameter_vector) // 3

    kernel = terms.SHOTerm(log_S0=0, log_omega0=0, log_Q=0)

    for term in range(nterms - 1):
        kernel += terms.SHOTerm(log_S0=0, log_omega0=0, log_Q=0)

    kernel.set_parameter_vector(parameter_vector)

    gp = celerite.GP(kernel)

    times = np.arange(0, duration.to(u.s).value, cadence.to(u.s).value) * u.s
    x = times.value

    gp.compute(x, check_sorted=False)

    ###################################
    # Get samples with the kernel's PSD
    ###################################

    y = gp.sample()
    # Remove a linear trend:
    y -= np.polyval(np.polyfit(x - x.mean(), y, 1), x - x.mean())

    return times, y, kernel
예제 #10
0
def test_consistency(oterm, mean, data):
    x, diag, y, t = data

    # Setup the original GP
    original_gp = original_celerite.GP(oterm, mean=mean)
    original_gp.compute(x, np.sqrt(diag))

    # Setup the new GP
    term = terms.OriginalCeleriteTerm(oterm)
    gp = celerite2.GaussianProcess(term, mean=mean)
    gp.compute(x, diag=diag)

    # "log_likelihood" method
    assert np.allclose(original_gp.log_likelihood(y), gp.log_likelihood(y))

    # "predict" method
    for args in [
            dict(return_cov=False, return_var=False),
            dict(return_cov=False, return_var=True),
            dict(return_cov=True, return_var=False),
    ]:
        assert all(
            np.allclose(a, b) for a, b in zip(
                original_gp.predict(y, **args),
                gp.predict(y, **args),
            ))
        assert all(
            np.allclose(a, b) for a, b in zip(
                original_gp.predict(y, t=t, **args),
                gp.predict(y, t=t, **args),
            ))

    # "sample" method
    seed = 5938
    np.random.seed(seed)
    a = original_gp.sample()
    np.random.seed(seed)
    b = gp.sample()
    assert np.allclose(a, b)

    np.random.seed(seed)
    a = original_gp.sample(size=10)
    np.random.seed(seed)
    b = gp.sample(size=10)
    assert np.allclose(a, b)

    # "sample_conditional" method, numerics make this one a little unstable;
    # just check the shape
    a = original_gp.sample_conditional(y, t=t)
    b = gp.sample_conditional(y, t=t)
    assert a.shape == b.shape

    a = original_gp.sample_conditional(y, size=10)
    b = gp.sample_conditional(y, size=10)
    assert a.shape == b.shape
예제 #11
0
def test_consistency(oterm, mean, data):
    x, diag, y, t = data

    # Setup the original GP
    original_gp = original_celerite.GP(oterm, mean=mean)
    original_gp.compute(x, np.sqrt(diag))

    # Setup the new GP
    term = terms.OriginalCeleriteTerm(oterm)
    gp = celerite2.GaussianProcess(term, mean=mean)
    gp.compute(x, diag=diag)

    # "log_likelihood" method
    assert np.allclose(original_gp.log_likelihood(y), gp.log_likelihood(y))

    # Apply inverse
    assert np.allclose(
        np.squeeze(original_gp.apply_inverse(y)), gp.apply_inverse(y)
    )

    conditional_t = gp.condition(y, t=t)
    mu, cov = original_gp.predict(y, t=t, return_cov=True)
    assert np.allclose(conditional_t.mean, mu)
    assert np.allclose(conditional_t.variance, np.diag(cov))
    assert np.allclose(conditional_t.covariance, cov)

    conditional = gp.condition(y)
    mu, cov = original_gp.predict(y, return_cov=True)
    assert np.allclose(conditional.mean, mu)
    assert np.allclose(conditional.variance, np.diag(cov))
    assert np.allclose(conditional.covariance, cov)

    # "sample" method
    seed = 5938
    np.random.seed(seed)
    a = original_gp.sample()
    np.random.seed(seed)
    b = gp.sample()
    assert np.allclose(a, b)

    np.random.seed(seed)
    a = original_gp.sample(size=10)
    np.random.seed(seed)
    b = gp.sample(size=10)
    assert np.allclose(a, b)

    # "sample_conditional" method, numerics make this one a little unstable;
    # just check the shape
    a = original_gp.sample_conditional(y, t=t)
    b = conditional_t.sample()
    assert a.shape == b.shape

    a = original_gp.sample_conditional(y, size=10)
    b = conditional.sample(size=10)
    assert a.shape == b.shape
예제 #12
0
def generate_solar_fluxes(size,
                          cadence=60 * u.s,
                          parameter_vector=parameter_vector):
    """
    Generate an array of fluxes with zero mean which mimic the power spectrum of
    the SOHO/VIRGO SPM observations.
    
    Parameters
    ----------
    size : int
        Number of fluxes to generate. Note: Assumes ``size``>>500.
    cadence : `~astropy.units.Quantity`
        Length of time between fluxes
    
    Returns
    -------
    y : `~numpy.ndarray`
        Array of fluxes at cadence ``cadence`` of length ``size``.
    """
    nterms = len(parameter_vector) // 3

    kernel = terms.SHOTerm(log_S0=0, log_omega0=0, log_Q=0)

    for term in range(nterms - 1):
        kernel += terms.SHOTerm(log_S0=0, log_omega0=0, log_Q=0)

    kernel.set_parameter_vector(parameter_vector)

    gp = celerite.GP(kernel)

    x = np.arange(0, size // 500, cadence.to(u.s).value)
    gp.compute(x, check_sorted=False)

    y = gp.sample(500)

    y_concatenated = []

    for i, yi in enumerate(y):
        xi = np.arange(len(yi))
        fit = np.polyval(np.polyfit(xi - xi.mean(), yi, 1), xi - xi.mean())
        yi -= fit

        if i == 0:
            y_concatenated.append(yi)
        else:
            offset = yi[0] - y_concatenated[i - 1][-1]
            y_concatenated.append(yi - offset)
    y_concatenated = np.hstack(y_concatenated)

    x_c = np.arange(len(y_concatenated))

    y_concatenated -= np.polyval(
        np.polyfit(x_c - x_c.mean(), y_concatenated, 1), x_c - x_c.mean())

    return y_concatenated, kernel
예제 #13
0
def find_celerite_MAP(t,
                      y,
                      yerr,
                      sigma0=0.1,
                      tau0=100,
                      prior='None',
                      set_bounds=True,
                      sig_lims=[0.02, 0.7],
                      tau_lims=[1, 550],
                      verbose=False):

    kernel = terms.RealTerm(log_a=2 * np.log(sigma0), log_c=np.log(1.0 / tau0))
    gp = celerite.GP(kernel, mean=np.mean(y))
    gp.compute(t, yerr)

    # set initial params
    initial_params = gp.get_parameter_vector()
    if verbose:
        print(initial_params)

    # set boundaries
    if set_bounds:
        if verbose:
            print('sig_lims:', sig_lims, 'tau_lims:', tau_lims)
        tau_bounds, sigma_bounds = tau_lims, sig_lims
        loga_bounds = (2 * np.log(min(sigma_bounds)),
                       2 * np.log(max(sigma_bounds)))
        logc_bounds = (np.log(1 / max(tau_bounds)),
                       np.log(1 / min(tau_bounds)))
        bounds = [loga_bounds, logc_bounds]

    else:  # - inf to + inf
        bounds = gp.get_parameter_bounds()
    if verbose:
        print(bounds)

    # wrap the neg_log_posterior for a chosen prior
    def neg_log_like(params, y, gp):
        return neg_log_posterior(params, y, gp, prior, 'celerite')

    # find MAP solution
    r = minimize(neg_log_like,
                 initial_params,
                 method="L-BFGS-B",
                 bounds=bounds,
                 args=(y, gp))
    gp.set_parameter_vector(r.x)
    res = gp.get_parameter_dict()

    tau_fit = np.exp(-res['kernel:log_c'])
    sigma_fit = np.exp(res['kernel:log_a'] / 2)
    if verbose:
        print('sigma_fit', sigma_fit, 'tau_fit', tau_fit)
    return sigma_fit, tau_fit, gp
예제 #14
0
 def _build_kernel(x, y, hyperparams):
     """Build the GP kernel. Returns None if gp.compute(x) fails."""
     noise, lnsigma, lnrho = hyperparams
     k_noise = celerite.terms.JitterTerm(log_sigma=np.log(noise))
     k_line = celerite.terms.Matern32Term(log_sigma=lnsigma, log_rho=lnrho)
     gp = celerite.GP(k_noise + k_line, mean=np.nanmean(y), fit_mean=True)
     try:
         gp.compute(x)
     except Exception:
         return None
     return gp
예제 #15
0
    def log_probability(self, params, band):
        """
        Calculate log of posterior probability

        Parameters
        -----------
        params : iterable
            list of parameter values
        band : string
            SDSS/HiPERCAM band
        """
        self.set_parameter_vector(params)
        t, _, y, ye, _, _ = np.loadtxt(self.lightcurves[band]).T

        # check model params are valid - checks against bounds
        lp = self.log_prior()
        if not np.isfinite(lp):
            return -np.inf

        # make a GP for this band, if it doesnt already exist
        if not hasattr(self, 'gpdict'):
            self.gpdict = dict()

        # Oscillation params
        pulsation_amp = self.pulse_amp * scale_pulsation(band, self.pulse_temp)

        if band not in self.gpdict:
            kernel = terms.SHOTerm(np.log(pulsation_amp), np.log(self.pulse_q),
                                   np.log(self.pulse_omega))
            gp = celerite.GP(kernel)
            gp.compute(t, ye)
            self.gpdict[band] = gp
        else:
            gp = self.gpdict[band]
            gp.set_parameter_vector(
                (np.log(pulsation_amp), np.log(self.pulse_q),
                 np.log(self.pulse_omega)))
            gp.compute(t, ye)

        # now add prior of Gaussian process
        lp += gp.log_prior()
        if not np.isfinite(lp):
            return -np.inf

        try:
            ym = self.get_value(band)
        except ValueError as err:
            # invalid lcurve params
            print('warning: model failed ', err)
            return -np.inf
        else:
            return gp.log_likelihood(y - ym) + lp
예제 #16
0
 def _lnlike(self, theta):
     """Log-likelihood with chi-squared likelihood function."""
     models = self._calculatemodels(theta)
     if not self.GP:
         return self._chisquared(models)
     lnx2 = 0.0
     _, _, _, _, _, sigs, corrs = self._parse(theta)
     for i in range(self.ntrans):
         rho = M32(log_sigma=sigs[i], log_rho=corrs[i])
         gp = celerite.GP(rho)
         gp.compute(self.velaxs[i], self.rms[i])
         lnx2 += gp.log_likelihood(models[i] - self.spectra[i])
     return np.nansum(lnx2)
예제 #17
0
 def _compute_gp(self):
     # Compute the GP prior on the spectrum
     if self._lnlam is None:
         pass
     if self.s_rho > 0.0:
         kernel = celerite.terms.Matern32Term(np.log(self.s_sig),
                                              np.log(self.s_rho))
         gp = celerite.GP(kernel)
         s_C = gp.get_matrix(self.lnlam_padded)
     else:
         s_C = np.eye(self.Kp) * self.s_sig**2
     self._s_cho_C = cho_factor(s_C)
     self._s_CInv = cho_solve(self._s_cho_C, np.eye(self.Kp))
예제 #18
0
def call_gp(params):
    log_sigma, log_rho, log_error_scale = params
    if GP_CODE=='celerite':
        kernel = terms.Matern32Term(log_sigma=log_sigma, log_rho=log_rho)
        gp = celerite.GP(kernel, mean=MEAN, fit_mean=False) #log_white_noise=np.log(yerr), 
        gp.compute(xx, yerr=yyerr/err_norm*np.exp(log_error_scale))
        return gp
    elif GP_CODE=='george':
        kernel = np.exp(log_sigma) * kernels.Matern32Kernel(log_rho)
        gp = george.GP(kernel, mean=MEAN, fit_mean=False) #log_white_noise=np.log(yerr), 
        gp.compute(xx, yerr=yyerr/err_norm*np.exp(log_error_scale))
        return gp
    else:
        raise ValueError('A bad thing happened.')
예제 #19
0
파일: TESStools.py 프로젝트: tzdwi/TESS
def dSHO_maxlikelihood(lc, npeaks=1):
    # Now let's do some of the GP stuff on this
    # A non-periodic component
    Q = 1.0 / np.sqrt(2.0)
    w0 = 3.0
    S0 = np.var(lc['NormFlux']) / (w0 * Q)
    bounds = dict(log_S0=(-16, 16), log_Q=(-15, 15), log_omega0=(-15, 15))
    kernel = terms.SHOTerm(log_S0=np.log(S0),
                           log_Q=np.log(Q),
                           log_omega0=np.log(w0),
                           bounds=bounds)
    kernel.freeze_parameter(
        "log_Q")  # We don't want to fit for "Q" in this term

    # A periodic component
    for i in range(npeaks):
        Q = 1.0
        w0 = 3.0
        S0 = np.var(lc['NormFlux']) / (w0 * Q)
        kernel += terms.SHOTerm(log_S0=np.log(S0),
                                log_Q=np.log(Q),
                                log_omega0=np.log(w0),
                                bounds=bounds)

    sigma = np.median(lc['NormErr'])

    kernel += terms.JitterTerm(log_sigma=np.log(sigma))

    gp = celerite.GP(kernel, mean=np.mean(lc['NormFlux']))
    gp.compute(lc['Time'],
               lc['NormErr'])  # You always need to call compute once.
    print("Initial log likelihood: {0}".format(
        gp.log_likelihood(lc['NormFlux'])))

    initial_params = gp.get_parameter_vector()
    bounds = gp.get_parameter_bounds()

    r = minimize(neg_log_like,
                 initial_params,
                 method="L-BFGS-B",
                 bounds=bounds,
                 args=(lc['NormFlux'], gp))
    gp.set_parameter_vector(r.x)

    print("Final log likelihood: {0}".format(gp.log_likelihood(
        lc['NormFlux'])))

    print("Maximum Likelihood Soln: {}".format(gp.get_parameter_dict()))

    return gp
예제 #20
0
def wavelet(df):
    # Gaussian Regression
    result = pd.DataFrame()
    mjds = df['mjd'].unique()
    # Two observations per unique mjd value
    t = np.arange(np.min(mjds), np.max(mjds), 0.5)
    if (len(t) % 2) == 0:
        t = np.insert(t, len(t), t[len(t) - 1] + 0.5)
    for obj, agg_df in df.groupby('object_id'):
        agg_df = agg_df.sort_values(by=['mjd'])
        X = agg_df['mjd']
        Y = agg_df['flux']
        Yerr = agg_df['flux_err']
        # Start by setting hyperparamaters to unit:
        log_sigma = 0
        log_rho = 0
        kernel = celerite.terms.Matern32Term(log_sigma, log_rho)
        # According to the paper from Narayan et al, 2018, we will use the Matern 3/2 Kernel.
        gp = celerite.GP(kernel, mean=0.0)
        gp.compute(X, Yerr)
        # extract our initial guess at parameters
        # from the celerite kernel and put it in a
        # vector:
        p0 = gp.get_parameter_vector()
        # run optimization:
        results = minimize(nll,
                           p0,
                           method='L-BFGS-B',
                           jac=grad_nll,
                           args=(Y, gp))
        # set your initial guess parameters
        # as the output from the scipy optimiser
        # remember celerite keeps these in ln() form!
        gp.set_parameter_vector(np.abs(results.x))
        # Predict posterior mean and variance
        mu, var = gp.predict(Y, t, return_var=True)
        if (sum(np.isnan(mu)) != 0):
            print('NANs exist in mu vector')
            return [obj, results.x, mu]
        # Wavelet Transform
        # calculate wavelet transform using even numbered array
        (cA2, cD2), (cA1, cD1) = pywt.swt(mu[1:, ], 'sym2', level=2)
        obj_df = pd.DataFrame(list(cA2) + list(cA1) + list(cD2) +
                              list(cD1)).transpose()
        obj_df['object_id'] = obj
        result = pd.concat([result, obj_df])
    result.reset_index(inplace=True)
    result.drop("index", axis=1, inplace=True)
    return result
예제 #21
0
def call_gp(params):
    log_sigma, log_rho, log_error_scale, contrast, rv, fwhm = params
    if GP_CODE=='celerite':
        mean_model = Model_celerite(**dict(**dict(Contrast=contrast, RV=rv, FWHM=fwhm)))
        kernel = terms.Matern32Term(log_sigma=log_sigma, log_rho=log_rho)
        gp = celerite.GP(kernel, mean=mean_model) 
        gp.compute(xx, yerr=yyerr/err_norm*np.exp(log_error_scale))
        return gp
    elif GP_CODE=='george':
        mean_model = Model_george(**dict(**dict(Contrast=contrast, RV=rv, FWHM=fwhm)))
        kernel = np.exp(log_sigma) * kernels.Matern32Kernel(log_rho)
        gp = george.GP(kernel, mean=mean_model)
        gp.compute(xx, yerr=yyerr/err_norm*np.exp(log_error_scale))
        return gp
    else:
        raise ValueError('gp_code must be "celerite" or "george".')
예제 #22
0
    def Gaussian_process(self, kernel, plot=False):
        mean = np.mean(self.flux)

        gp = celerite.GP(kernel, mean=mean)

        gp.compute(self.time, self.flux_err)

        initial_params = gp.get_parameter_vector()
        bounds = gp.get_parameter_bounds()
        r = minimize(self.neg_log_like,
                     initial_params,
                     method="L-BFGS-B",
                     bounds=bounds,
                     args=(self.flux, gp))
        gp.set_parameter_vector(r.x)
        #pred_mean, pred_var = gp.predict(self.flux, self.time, return_var=True)

        if (plot):
            x = np.linspace(self.time[0], self.time[-1], 1000)
            pred_mean, pred_var = gp.predict(self.flux, x, return_var=True)
            pred_std = np.sqrt(pred_var)
            color = "#ff7f0e"

            plt.plot(x, pred_mean, label='GP model')
            plt.errorbar(self.time,
                         self.flux,
                         fmt='o',
                         yerr=self.flux_err,
                         label=self.band + ' band',
                         markersize=2.5)
            plt.fill_between(x,
                             pred_mean + pred_std,
                             pred_mean - pred_std,
                             color=color,
                             alpha=0.3,
                             edgecolor="none")
            plt.legend()
            plt.title('')
            plt.xlabel('time (days)')
            plt.ylabel('relative flux')

            plt.ylim(min(pred_mean), max(pred_mean))

            plt.xlim(0 - self.time[2], np.max(self.time) + 10)
            plt.title(self.name + ' ' + self.band + ' data')
            plt.show()
        return self.calc_Rchi2_GP(gp)
예제 #23
0
def trappist1_variability(times):
    alpha = 0.973460343001
    log_a = np.log(np.exp(-26.88111923) * alpha)
    log_c = -1.0890621571818671
    # log_sigma = -5.6551601053314622

    # kernel = (terms.JitterTerm(log_sigma=log_sigma) +
    #           terms.RealTerm(log_a=log_a, log_c=log_c))

    kernel = terms.RealTerm(log_a=log_a, log_c=log_c)

    gp = celerite.GP(kernel, mean=0, fit_white_noise=True, fit_mean=True)
    gp.compute(times)

    sample = gp.sample()
    sample -= np.median(sample)
    return sample + 1
예제 #24
0
def k296_variability(times):
    alpha = 0.854646217641
    log_a = np.log(np.exp(-13.821195) * alpha)
    log_c = -1.0890621571818671
    # log_sigma = -7.3950524

    # kernel = (terms.JitterTerm(log_sigma=log_sigma) +
    #           terms.RealTerm(log_a=log_a, log_c=log_c))

    kernel = terms.RealTerm(log_a=log_a, log_c=log_c)

    gp = celerite.GP(kernel, mean=0, fit_white_noise=True, fit_mean=True)
    gp.compute(times)

    sample = gp.sample()
    sample -= np.median(sample)
    return sample + 1
예제 #25
0
def compute_background(t, amp, freqs, white=0):
    """
    Compute granulation background using Gaussian process
    """

    if white == 0:
        white = 1e-6

    kernel = terms.JitterTerm(log_sigma=np.log(white))

    S0 = calculateS0(amp, freqs)
    print(f"S0: {S0}")
    for i in range(len(amp)):
        kernel += terms.SHOTerm(log_S0=np.log(S0[i]),
                                log_Q=np.log(1 / np.sqrt(2)),
                                log_omega0=np.log(2 * np.pi * freqs[i]))
    gp = celerite.GP(kernel)
    return kernel, gp, S0
예제 #26
0
def pred_lc(t, y, yerr, params, p, t_pred, return_var=True):
    """
    Generate predicted values at particular time stamps given the initial
    time series and a best-fit model.

    Args:
        t (array(float)): Time stamps of the initial time series.
        y (array(float)): y values (i.e., flux) of the initial time series.
        yerr (array(float)): Measurement errors of the initial time series.
        params (array(float)): Best-fit CARMA parameters
        p (int): The AR order (p) of the given best-fit model.
        t_pred (array(float)): Time stamps to generate predicted time series.
        return_var (bool, optional): Whether to return uncertainties in the mean
            prediction. Defaults to True.

    Returns:
        (array(float), array(float), array(float)): t_pred, mean prediction at t_pred
        and uncertainties (variance) of the mean prediction.
    """

    assert p >= len(
        params) - p, "The dimension of AR must be greater than that of MA"

    # get ar, ma
    ar = params[:p]
    ma = params[p:]

    # reposition lc
    y_aln = y - np.median(y)

    # init kernel, gp and compute matrix
    kernel = CARMA_term(np.log(ar), np.log(ma))
    gp = celerite.GP(kernel, mean=0)
    gp.compute(t, yerr)

    try:
        mu, var = gp.predict(y_aln, t_pred, return_var=return_var)
    except FloatingPointError as e:
        print(e)
        print("No (super small) variance will be returned")
        return_var = False
        mu, var = gp.predict(y_aln, t_pred, return_var=return_var)

    return t_pred, mu + np.median(y), var
예제 #27
0
def autocorr_ml(y, thin=1, c=5.0):
    """Compute the autocorrelation using a GP model."""
    # Compute the initial estimate of tau using the standard method
    init = autocorr_new(y, c=c)
    z = y[:, ::thin]
    N = z.shape[1]

    # Build the GP model
    tau = max(1.0, init / thin)
    bounds = [(-5.0, 5.0), (-np.log(N), 0.0)]
    kernel = terms.RealTerm(
        np.log(0.9 * np.var(z)),
        -np.log(tau),
        bounds=bounds
    )
    kernel += terms.RealTerm(
        np.log(0.1 * np.var(z)),
        -np.log(0.5 * tau),
        bounds=bounds,
    )

    gp = celerite.GP(kernel, mean=np.mean(z))
    gp.compute(np.arange(z.shape[1]))

    # Define the objective
    def nll(p):
        # Update the GP model
        gp.set_parameter_vector(p)
        # Loop over the chains and compute the likelihoods
        v, g = zip(*(gp.grad_log_likelihood(z0, quiet=True) for zo in z))
        # Combine the datasets
        return -np.sum(v), -np.sum(g, axis=0)

    # Optimize the model
    p0 = gp.get_parameter_vector()
    bounds = gp.get_parameter_bounds()
    soln = minimize(nll, p0, jac=True, bounds=bounds)
    gp.set_parameter_vector(soln.x)

    # Compute the maximum likelihood tau
    a, c = kernel.coefficients[:2]
    tau = thin * 2 * np.sum(a / c) / np.sum(a)

    return tau
예제 #28
0
    def predict(self,xpred):
        """ Realize the GP using the current values of the hyperparameters at values x=xpred.
            Used for making GP plots. Wrapper for `celerite.GP.predict()`.

            Args:
                xpred (np.array): numpy array of x values for realizing the GP
            Returns:
                tuple: tuple containing:
                    np.array: numpy array of predictive means \n
                    np.array: numpy array of predictive standard deviations
        """

        self.update_kernel_params()

        B = self.kernel.hparams['gp_B'].value
        C = self.kernel.hparams['gp_C'].value
        L = self.kernel.hparams['gp_L'].value
        Prot = self.kernel.hparams['gp_Prot'].value

        # build celerite kernel with current values of hparams
        kernel = celerite.terms.JitterTerm(
                log_sigma = np.log(self.params[self.jit_param].value)
                )

        kernel += celerite.terms.RealTerm(
            log_a=np.log(B*(1+C)/(2+C)),
            log_c=np.log(1/L)
        )

        kernel += celerite.terms.ComplexTerm(
            log_a=np.log(B/(2+C)),
            log_b=-np.inf,
            log_c=np.log(1/L),
            log_d=np.log(2*np.pi/Prot)
        )

        gp = celerite.GP(kernel)
        gp.compute(self.x, self.yerr)
    #    mu, var = gp.predict(self.y-self.params[self.gamma_param].value, xpred, return_var=True)
        mu, var = gp.predict(self._resids(), xpred, return_var=True)

        stdev = np.sqrt(var)

        return mu, stdev
예제 #29
0
def get_rotation_gp(t, y, yerr, period, min_period, max_period):
    kernel = get_basic_kernel(t, y, yerr)
    kernel += MixtureOfSHOsTerm(log_a=np.log(np.var(y)),
                                log_Q1=np.log(15),
                                mix_par=-1.0,
                                log_Q2=np.log(15),
                                log_P=np.log(period),
                                bounds=dict(
                                    log_a=(-20.0, 10.0),
                                    log_Q1=(-0.5 * np.log(2.0), 11.0),
                                    mix_par=(-5.0, 5.0),
                                    log_Q2=(-0.5 * np.log(2.0), 11.0),
                                    log_P=(np.log(min_period),
                                           np.log(max_period)),
                                ))

    gp = celerite.GP(kernel=kernel, mean=0.)
    gp.compute(t)
    return gp
예제 #30
0
def test_gp(celerite_kernel, seed=1234):
    import celerite
    import celerite.terms as cterms  # NOQA
    celerite_kernel = eval(celerite_kernel)
    np.random.seed(seed)
    x = np.sort(np.random.rand(100))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)
    diag = yerr**2

    celerite_gp = celerite.GP(celerite_kernel)
    celerite_gp.compute(x, yerr)
    celerite_loglike = celerite_gp.log_likelihood(y)

    kernel = _get_theano_kernel(celerite_kernel)
    gp = GP(kernel, x, diag)
    loglike = gp.log_likelihood(y).eval()

    assert np.allclose(loglike, celerite_loglike)