Esempio n. 1
0
def sim_lc(t, ppm, per_aper_ratio, periodic_freq, aperiodic_freq, A, planet=False, mean=1.0, planet_params=[0, 0, 0, 0, 0, 0, 0, 0, 0]):

    if not planet:
        mean=mean
    if planet:
        mean=transit_model(*planet_params, t)

    aperiodic_sigma = 1/per_aper_ratio
    periodic_sigma = 1
    white_noise_sigma = ppm/1e6

    # set up a gaussian process with two components and white noise

    # non-periodic component
    Q = 1. / np.sqrt(2.0)  # related to the frequency of the variability
    w0 = 2*np.pi*aperiodic_freq
    S0 = (aperiodic_sigma**2.) / (w0 * Q)
    bounds = dict(log_S0=(-15, 15), log_Q=(-15, 15), log_omega0=(-15, 15))
    kernel = terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0), bounds=bounds)

    # periodic component
    Q = 1.0
    w0 = 2*np.pi*periodic_freq
    S0 = (periodic_sigma**2.) / (w0 * Q)
    kernel += terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0), bounds=bounds)

    # white noise
    # kernel += terms.JitterTerm(log_sigma=np.log(white_noise_sigma), bounds=dict(log_sigma=(-15,15)))

    gp = celerite.GP(kernel, mean=0, fit_mean=True, fit_white_noise=True)
    gp.compute(t, white_noise_sigma)

    return A * gp.sample() + mean + white_noise_sigma * np.random.randn(len(t))
Esempio n. 2
0
def generate_solar_fluxes(duration, cadence=60 * u.s, seed=None):
    """
    Generate an array of fluxes with zero mean which mimic the power spectrum of
    the SOHO/VIRGO SPM observations.
    
    Parameters
    ----------
    duration : `~astropy.units.Quantity`
        Duration of simulated observations to generate.
    cadence : `~astropy.units.Quantity`
        Length of time between fluxes
    seed : float, optional
        Random seed.
    
    Returns
    -------
    times : `~astropy.units.Quantity`
        Array of times at cadence ``cadence`` of length ``duration/cadence``
    fluxes : `~numpy.ndarray`
        Array of fluxes at cadence ``cadence`` of length ``duration/cadence``
    kernel : `~celerite.terms.TermSum`
        Celerite kernel used to approximate the solar power spectrum.
    """
    if seed is not None:
        np.random.seed(seed)

    _process_inputs(duration, cadence, 5777 * u.K)

    ##########################
    # Assemble celerite kernel
    ##########################
    parameter_vector = np.copy(PARAM_VECTOR)

    nterms = len(parameter_vector) // 3

    kernel = terms.SHOTerm(log_S0=0, log_omega0=0, log_Q=0)

    for term in range(nterms - 1):
        kernel += terms.SHOTerm(log_S0=0, log_omega0=0, log_Q=0)

    kernel.set_parameter_vector(parameter_vector)

    gp = celerite.GP(kernel)

    times = np.arange(0, duration.to(u.s).value, cadence.to(u.s).value) * u.s
    x = times.value

    gp.compute(x, check_sorted=False)

    ###################################
    # Get samples with the kernel's PSD
    ###################################

    y = gp.sample()
    # Remove a linear trend:
    y -= np.polyval(np.polyfit(x - x.mean(), y, 1), x - x.mean())

    return times, y, kernel
Esempio n. 3
0
def generate_solar_fluxes(size,
                          cadence=60 * u.s,
                          parameter_vector=parameter_vector):
    """
    Generate an array of fluxes with zero mean which mimic the power spectrum of
    the SOHO/VIRGO SPM observations.
    
    Parameters
    ----------
    size : int
        Number of fluxes to generate. Note: Assumes ``size``>>500.
    cadence : `~astropy.units.Quantity`
        Length of time between fluxes
    
    Returns
    -------
    y : `~numpy.ndarray`
        Array of fluxes at cadence ``cadence`` of length ``size``.
    """
    nterms = len(parameter_vector) // 3

    kernel = terms.SHOTerm(log_S0=0, log_omega0=0, log_Q=0)

    for term in range(nterms - 1):
        kernel += terms.SHOTerm(log_S0=0, log_omega0=0, log_Q=0)

    kernel.set_parameter_vector(parameter_vector)

    gp = celerite.GP(kernel)

    x = np.arange(0, size // 500, cadence.to(u.s).value)
    gp.compute(x, check_sorted=False)

    y = gp.sample(500)

    y_concatenated = []

    for i, yi in enumerate(y):
        xi = np.arange(len(yi))
        fit = np.polyval(np.polyfit(xi - xi.mean(), yi, 1), xi - xi.mean())
        yi -= fit

        if i == 0:
            y_concatenated.append(yi)
        else:
            offset = yi[0] - y_concatenated[i - 1][-1]
            y_concatenated.append(yi - offset)
    y_concatenated = np.hstack(y_concatenated)

    x_c = np.arange(len(y_concatenated))

    y_concatenated -= np.polyval(
        np.polyfit(x_c - x_c.mean(), y_concatenated, 1), x_c - x_c.mean())

    return y_concatenated, kernel
Esempio n. 4
0
def dSHO_maxlikelihood(lc, npeaks=1):
    # Now let's do some of the GP stuff on this
    # A non-periodic component
    Q = 1.0 / np.sqrt(2.0)
    w0 = 3.0
    S0 = np.var(lc['NormFlux']) / (w0 * Q)
    bounds = dict(log_S0=(-16, 16), log_Q=(-15, 15), log_omega0=(-15, 15))
    kernel = terms.SHOTerm(log_S0=np.log(S0),
                           log_Q=np.log(Q),
                           log_omega0=np.log(w0),
                           bounds=bounds)
    kernel.freeze_parameter(
        "log_Q")  # We don't want to fit for "Q" in this term

    # A periodic component
    for i in range(npeaks):
        Q = 1.0
        w0 = 3.0
        S0 = np.var(lc['NormFlux']) / (w0 * Q)
        kernel += terms.SHOTerm(log_S0=np.log(S0),
                                log_Q=np.log(Q),
                                log_omega0=np.log(w0),
                                bounds=bounds)

    sigma = np.median(lc['NormErr'])

    kernel += terms.JitterTerm(log_sigma=np.log(sigma))

    gp = celerite.GP(kernel, mean=np.mean(lc['NormFlux']))
    gp.compute(lc['Time'],
               lc['NormErr'])  # You always need to call compute once.
    print("Initial log likelihood: {0}".format(
        gp.log_likelihood(lc['NormFlux'])))

    initial_params = gp.get_parameter_vector()
    bounds = gp.get_parameter_bounds()

    r = minimize(neg_log_like,
                 initial_params,
                 method="L-BFGS-B",
                 bounds=bounds,
                 args=(lc['NormFlux'], gp))
    gp.set_parameter_vector(r.x)

    print("Final log likelihood: {0}".format(gp.log_likelihood(
        lc['NormFlux'])))

    print("Maximum Likelihood Soln: {}".format(gp.get_parameter_dict()))

    return gp
Esempio n. 5
0
def gp_sho(idx, data, resid, params):
    logQ, logw, logS, log_jit = params
    err = data.err / data.flux
    #logerr = np.log(np.median(err))
    logerr = log_jit

    #print "logQ, logw, logS", logQ, logw, logS

    mean_resid = np.mean(resid)
    #resid -= mean_resid

    t = data.t_vis[idx]

    kernel = (terms.SHOTerm(log_S0=logS, log_Q=logQ, log_omega0=logw) +
              terms.JitterTerm(log_sigma=logerr))

    gp = celerite.GP(kernel, fit_mean=True)
    gp.compute(t, err, check_sorted=False)  #t must be ascending!

    mu = gp.predict(resid, t, return_cov=False)
    gp_lnlike = gp.log_likelihood(resid)
    """plt.errorbar(t, resid, err, fmt = '.k')
    plt.plot(t, mu)

    x = np.linspace(np.min(t), np.max(t), 1000)
    pred_mean, pred_var = gp.predict(resid, x, return_var = True)
    pred_std = np.sqrt(pred_var)
    plt.fill_between(x, pred_mean+pred_std, pred_mean-pred_std, color='blue', alpha=0.3)
    
    plt.show()"""

    #np.save("resid", [t,  resid, err])

    #return [1.0 + np.array(mu) + mean_resid, gp_lnlike]
    return [1.0 + np.array(mu), gp_lnlike]
Esempio n. 6
0
def GPfit(times, fluxes, errors, nonmask_idxs):
	#t, y, yerr = times[np.isfinite(times)], fluxes[np.isfinite(fluxes)], errors[np.isfinite(errors)]
	t, y, yerr = times[nonmask_idxs], fluxes[nonmask_idxs], errors[nonmask_idxs]
	t, y, yerr = t[np.isfinite(t)], y[np.isfinite(y)], yerr[np.isfinite(yerr)]

	Q = 1.0
	w0 = 3.0
	S0 = np.var(y) / (w0 * Q)
	bounds = dict(log_S0=(-15, 15), log_Q=(-15, 15), log_omega0=(-15, 15))
	kernel = terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0), bounds=bounds)
	#kernel += terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0) bounds=bounds)


	gp = celerite.GP(kernel, mean=np.mean(y))
	gp.compute(t, yerr)  # You always need to call compute once.
	print("Initial log likelihood: {0}".format(gp.log_likelihood(y)))

	initial_params = gp.get_parameter_vector()
	bounds = gp.get_parameter_bounds()

	r = minimize(neg_log_like, initial_params, method="L-BFGS-B", bounds=bounds, args=(y, gp))
	gp.set_parameter_vector(r.x)
	print(r)

	#interpolated_times = np.linspace(np.nanmin(t), np.nanmax(t), 1000)
	#interpolated_epochs = np.linspace(np.nanmin(t), np.nanmax(t), 1000)
	#pred_mean, pred_var = gp.predict(y, interpolated_times, return_var=True)
	pred_mean, pred_var = gp.predict(y, times, return_var=True)
	pred_std = np.sqrt(pred_var)

	return pred_mean, pred_std ### should be the same dimension as times.
Esempio n. 7
0
 def __init__(self, params):
     Ampl, Plife, Prot = params
     
     S0 = (Ampl*(Prot**2))/(2. * (np.pi**2) * Plife)
     w0 = (2.0*np.pi)/Prot
     Q = Plife(np.pi)/Prot 
     
     kernel = terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0) )
Esempio n. 8
0
def simple_kernel():
    # A non-periodic component
    Q = 1.0 / np.sqrt(2.0)
    w0 = 3.0
    S0 = np.var(y) / (w0 * Q)
    kernel = terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q),
                           log_omega0=np.log(w0),
                           bounds=[(-15, 15), (-15, 15), (-15, 15)])
    kernel.freeze_parameter("log_Q")  # We don't want to fit for "Q" in this term

    # A periodic component
    Q = 1.0
    w0 = 3.0
    S0 = np.var(y) / (w0 * Q)
    kernel += terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q),
                            log_omega0=np.log(w0),
                            bounds=[(-15, 15), (-15, 15), (-15, 15)])
    return kernel
Esempio n. 9
0
    def log_probability(self, params, band):
        """
        Calculate log of posterior probability

        Parameters
        -----------
        params : iterable
            list of parameter values
        band : string
            SDSS/HiPERCAM band
        """
        self.set_parameter_vector(params)
        t, _, y, ye, _, _ = np.loadtxt(self.lightcurves[band]).T

        # check model params are valid - checks against bounds
        lp = self.log_prior()
        if not np.isfinite(lp):
            return -np.inf

        # make a GP for this band, if it doesnt already exist
        if not hasattr(self, 'gpdict'):
            self.gpdict = dict()

        # Oscillation params
        pulsation_amp = self.pulse_amp * scale_pulsation(band, self.pulse_temp)

        if band not in self.gpdict:
            kernel = terms.SHOTerm(np.log(pulsation_amp), np.log(self.pulse_q),
                                   np.log(self.pulse_omega))
            gp = celerite.GP(kernel)
            gp.compute(t, ye)
            self.gpdict[band] = gp
        else:
            gp = self.gpdict[band]
            gp.set_parameter_vector(
                (np.log(pulsation_amp), np.log(self.pulse_q),
                 np.log(self.pulse_omega)))
            gp.compute(t, ye)

        # now add prior of Gaussian process
        lp += gp.log_prior()
        if not np.isfinite(lp):
            return -np.inf

        try:
            ym = self.get_value(band)
        except ValueError as err:
            # invalid lcurve params
            print('warning: model failed ', err)
            return -np.inf
        else:
            return gp.log_likelihood(y - ym) + lp
Esempio n. 10
0
File: gp.py Progetto: dfm/rotate
def get_basic_kernel(t, y, yerr):
    kernel = terms.SHOTerm(
        log_S0=np.log(np.var(y)),
        log_Q=-np.log(4.0),
        log_omega0=np.log(2*np.pi/10.),
        bounds=dict(
            log_S0=(-20.0, 10.0),
            log_omega0=(np.log(2*np.pi/80.0), np.log(2*np.pi/2.0)),
        ),
    )
    kernel.freeze_parameter('log_Q')

    # Finally some jitter
    kernel += terms.JitterTerm(log_sigma=np.log(yerr),
                               bounds=[(-20.0, 5.0)])

    return kernel
Esempio n. 11
0
def compute_background(t, amp, freqs, white=0):
    """
    Compute granulation background using Gaussian process
    """

    if white == 0:
        white = 1e-6

    kernel = terms.JitterTerm(log_sigma=np.log(white))

    S0 = calculateS0(amp, freqs)
    print(f"S0: {S0}")
    for i in range(len(amp)):
        kernel += terms.SHOTerm(log_S0=np.log(S0[i]),
                                log_Q=np.log(1 / np.sqrt(2)),
                                log_omega0=np.log(2 * np.pi * freqs[i]))
    gp = celerite.GP(kernel)
    return kernel, gp, S0
Esempio n. 12
0
def get_basic_kernel(t, y, yerr, period=False):
    if not period:
        period = 0.5
    kernel = terms.SHOTerm(
        log_S0=np.log(np.var(y)),
        log_Q=-np.log(4.0),
        log_omega0=np.log(2 * np.pi / 20.),
        bounds=dict(
            log_S0=(-20.0, 10.0),
            log_omega0=(np.log(2 * np.pi / 100.), np.log(2 * np.pi / (10))),
        ),
    )
    kernel.freeze_parameter('log_Q')
    ##  tau = 2*np.exp(-1*np.log(4.0))/np.exp(log_omega0)

    # Finally some jitter
    ls = np.log(np.median(yerr))
    kernel += terms.JitterTerm(log_sigma=ls, bounds=[(ls - 5.0, ls + 5.0)])

    return kernel
Esempio n. 13
0
def test_product(seed=42):
    np.random.seed(seed)
    t = np.sort(np.random.uniform(0, 5, 100))
    tau = t[:, None] - t[None, :]

    k1 = terms.RealTerm(log_a=0.1, log_c=0.5)
    k2 = terms.ComplexTerm(0.2, -3.0, 0.5, 0.01)
    k3 = terms.SHOTerm(1.0, 0.2, 3.0)

    K1 = k1.get_value(tau)
    K2 = k2.get_value(tau)
    K3 = k3.get_value(tau)

    assert np.allclose((k1 + k2).get_value(tau), K1 + K2)
    assert np.allclose((k3 + k2).get_value(tau), K3 + K2)
    assert np.allclose((k1 + k2 + k3).get_value(tau), K1 + K2 + K3)

    for (a, b), (A, B) in zip(
            product((k1, k2, k3, k1 + k2, k1 + k3, k2 + k3), (k1, k2, k3)),
            product((K1, K2, K3, K1 + K2, K1 + K3, K2 + K3), (K1, K2, K3))):
        assert np.allclose((a * b).get_value(tau), A * B)
Esempio n. 14
0
mean_model = Model(**kwargs)
# mean_model = Model(P1=8., tau1=1., k1=np.std(y)/100, w1=0., e1=0.4, 
#                    P2=100, tau2=1., k2=np.std(y)/100, w2=0., e2=0.4, offset1=0., offset2=0.)


#==============================================================================
# The fit
#==============================================================================
from scipy.optimize import minimize

import celerite
from celerite import terms

# Set up the GP model
# kernel = terms.RealTerm(log_a=np.log(np.var(y)), log_c=-np.log(10.0))
kernel  = terms.SHOTerm(log_S0=np.log(2), log_Q=np.log(2), log_omega0=np.log(5))
gp = celerite.GP(kernel, mean=mean_model, fit_mean=True)
gp.compute(x, yerr)
print("Initial log-likelihood: {0}".format(gp.log_likelihood(y)))

# Define a cost function
def neg_log_like(params, y, gp):
    gp.set_parameter_vector(params)
    return -gp.log_likelihood(y)

# def grad_neg_log_like(params, y, gp):
#     gp.set_parameter_vector(params)
#     return -gp.grad_log_likelihood(y)[1]

# Fit for the maximum likelihood parameters
initial_params = gp.get_parameter_vector()
Esempio n. 15
0
            #ecosw=self.ecosw,  # eccentricity vector
            #esinw=self.esinw,
            occ=0.0)  # a secondary eclipse depth in ppm

        M.add_data(time=t)

        return M.transitmodel


#set the GP parameters
Q = 1.0 / np.sqrt(2.0)
w0 = muhz2omega(13)
S0 = np.var(y) / (w0 * Q)
kernel = terms.SHOTerm(log_S0=np.log(S0),
                       log_Q=np.log(Q),
                       log_omega0=np.log(w0),
                       bounds=[(-25, 0), (-15, 15),
                               (np.log(muhz2omega(3)), np.log(muhz2omega(50)))
                               ])  #omega upper bound: 275 muhz
kernel.freeze_parameter("log_Q")  #to make it a Harvey model

Q = 1.0 / np.sqrt(2.0)
w0 = muhz2omega(61.0)
S0 = np.var(y) / (w0 * Q)
kernel += terms.SHOTerm(log_S0=np.log(S0),
                        log_Q=np.log(Q),
                        log_omega0=np.log(w0),
                        bounds=[(-25, 0), (-15, 15),
                                (np.log(muhz2omega(30)),
                                 np.log(muhz2omega(1000)))])
kernel.freeze_parameter("terms[1]:log_Q")  #to make it a Harvey model
Esempio n. 16
0
    plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
    plt.ylabel('RV' r"$[m/s]$")
    plt.xlabel(r"$t$")
    plt.title("Simulated data -- planet and jitter")
    plt.show()

#==============================================================================
# Modelling
#==============================================================================

import celerite

celerite.__version__
from celerite import terms

kernel = terms.SHOTerm(np.log(2), np.log(2), np.log(25))
#gp      = celerite.GP(kernel, mean=Model(amp=np.var(y)/2, P=7, phase=0))
gp = celerite.GP(kernel, mean=Model(**truth), fit_mean=True)
#gp  = celerite.GP(kernel, mean=Model(**truth ), log_white_noise=np.log(1), fit_white_noise=True)
gp.compute(t, yerr)


def lnprob2(p):
    gp.set_parameter_vector(p)  # Set the parameter values to the given vector
    return gp.log_likelihood(y, quiet=True) + gp.log_prior(
    )  # Compute the logarithm of the marginalized likelihood of a set of observations under the Gaussian process model.


#==============================================================================
# run MCMC on this model
#==============================================================================
Esempio n. 17
0
                       log_period=0.0,
                       bounds=[(-10, 10), (-10, 10), (-10, 10)])

# Simulate a dataset from the true model
np.random.seed(42)
N = 100
t = np.sort(np.random.uniform(0, 20, N))
yerr = 0.5
K = true_model.get_K(t)
K[np.diag_indices_from(K)] += yerr**2
y = np.random.multivariate_normal(np.zeros(N), K)

# Set up the celerite model that we will use to fit - product of two SHOs
log_Q = 1.0
kernel = terms.SHOTerm(log_S0=np.log(np.var(y)) - 2 * log_Q,
                       log_Q=log_Q,
                       log_omega0=np.log(2 * np.pi))
kernel *= terms.SHOTerm(log_S0=0.0, log_omega0=0.0, log_Q=-0.5 * np.log(2))
kernel.freeze_parameter("k2:log_S0")
kernel.freeze_parameter("k2:log_Q")

gp = celerite.GP(kernel)
gp.compute(t, yerr)


# Fit for the maximum likelihood
def nll(params, gp, y):
    gp.set_parameter_vector(params)
    if not np.isfinite(gp.log_prior()):
        return 1e10
    ll = gp.log_likelihood(y)
Esempio n. 18
0
truth = dict(P1=8., tau1=1., k1=np.std(y)/100, w1=0., e1=0.4, offset1=0., offset2=0.)
kwargs = dict(**truth)
kwargs["bounds"] = dict(P1=(7.5,8.5), k1=(0,0.1), w1=(-2*np.pi,2*np.pi), e1=(0,0.8))
mean_model = Model(**kwargs)

#==============================================================================
# The fit
#==============================================================================
from scipy.optimize import minimize

import celerite
from celerite import terms

# Set up the GP model
# kernel = terms.RealTerm(log_a=np.log(np.var(y)), log_c=-np.log(10.0))
kernel  = terms.SHOTerm(log_S0=np.log(2), log_Q=np.log(20), log_omega0=np.log(1/3000))
gp = celerite.GP(kernel, mean=mean_model, fit_mean=True)
gp.compute(x, yerr)
print("Initial log-likelihood: {0}".format(gp.log_likelihood(y)))

# Define a cost function
def neg_log_like(params, y, gp):
    gp.set_parameter_vector(params)
    return -gp.log_likelihood(y)

# def grad_neg_log_like(params, y, gp):
#     gp.set_parameter_vector(params)
#     return -gp.grad_log_likelihood(y)[1]

# Fit for the maximum likelihood parameters
initial_params = gp.get_parameter_vector()
Esempio n. 19
0
test_terms = [
    cterms.RealTerm(log_a=np.log(2.5), log_c=np.log(1.1123)),
    cterms.RealTerm(log_a=np.log(12.345), log_c=np.log(1.5)) +
    cterms.RealTerm(log_a=np.log(0.5), log_c=np.log(1.1234)),
    cterms.ComplexTerm(log_a=np.log(10.0),
                       log_c=np.log(5.6),
                       log_d=np.log(2.1)),
    cterms.ComplexTerm(
        log_a=np.log(7.435),
        log_b=np.log(0.5),
        log_c=np.log(1.102),
        log_d=np.log(1.05),
    ),
    cterms.SHOTerm(log_S0=np.log(1.1),
                   log_Q=np.log(0.1),
                   log_omega0=np.log(1.2)),
    cterms.SHOTerm(log_S0=np.log(1.1),
                   log_Q=np.log(2.5),
                   log_omega0=np.log(1.2)),
    cterms.SHOTerm(
        log_S0=np.log(1.1), log_Q=np.log(2.5), log_omega0=np.log(1.2)) +
    cterms.RealTerm(log_a=np.log(1.345), log_c=np.log(2.4)),
    cterms.SHOTerm(
        log_S0=np.log(1.1), log_Q=np.log(2.5), log_omega0=np.log(1.2)) *
    cterms.RealTerm(log_a=np.log(1.345), log_c=np.log(2.4)),
    cterms.Matern32Term(log_sigma=0.1, log_rho=0.4),
]


@pytest.mark.parametrize("oterm", test_terms)
Esempio n. 20
0
    return k_out


def neo_update_kernel(theta, params):
    gp = george.GP(mean=0.0, fit_mean=False, white_noise=jitt)
    pass


from celerite import terms as cterms

#  2 or sp.log(10.) ?
T = {
    'Constant': 1.**2,
    'RealTerm': cterms.RealTerm(log_a=2., log_c=2.),
    'ComplexTerm': cterms.ComplexTerm(log_a=2., log_b=2., log_c=2., log_d=2.),
    'SHOTerm': cterms.SHOTerm(log_S0=2., log_Q=2., log_omega0=2.),
    'Matern32Term': cterms.Matern32Term(log_sigma=2., log_rho=2.0),
    'JitterTerm': cterms.JitterTerm(log_sigma=2.0)
}


def neo_term(terms):
    t_out = T[terms[0][0]]
    for f in range(len(terms[0])):
        if f == 0:
            pass
        else:
            t_out *= T[terms[0][f]]

    for i in range(len(terms)):
        if i == 0:
Esempio n. 21
0
pad = 25
log_omega0 = 3
log_S0 = 0
log_Q = 2.0
amp = 10
nrot = 30
npts = 1000
nsamples = 5

x = np.linspace(0, nrot * 2 * np.pi / np.exp(log_omega0), npts, endpoint=False)

yerr = 1e2 * np.ones_like(x)
yerr[:pad] = np.logspace(-12, 2, pad)
yerr[-pad:] = np.logspace(2, -12, pad)

kernel = terms.SHOTerm(log_S0=log_S0, log_Q=log_Q, log_omega0=log_omega0)
gp = celerite.GP(kernel)
gp.compute(x, yerr)

y = [None for k in range(nsamples)]
for k in range(nsamples):
    y0 = (amp * np.exp(log_S0) *
          np.sin(np.exp(log_omega0) * x + 2 * np.pi * np.random.random()))
    y[k] = gp.sample_conditional(y0)
    y[k] = (y[k] - y[k].min()) / (y[k].max() - y[k].min())

fig, ax = plt.subplots(figsize=(3.6, 3.2), facecolor="#e5e5e5")
fig.patch.set_facecolor("#e5e5e5")
fig.subplots_adjust(left=0, bottom=0, top=1, right=1)
ax.set(xlim=(0, 1), ylim=(-0.1, 1.1))
ax.axis("off")
Esempio n. 22
0

class CustomTerm(terms.Term):
    parameter_names = ("amp", "P", "phase", "offset")


bounds = dict(amp=(0, 0.002),
              P=(3, 10),
              phase=(0, 2 * np.pi),
              offset=(min(y), max(y)))
kernel1 = CustomTerm(amp=0.001,
                     P=8,
                     phase=1,
                     offset=np.median(y),
                     bounds=bounds)
kernel2 = terms.SHOTerm(np.log(1.1), np.log(1.1), np.log(25))
kernel = kernel1 * kernel2
#gp      = celerite.GP(kernel, mean=Model(amp=np.var(y)/2, P=7, phase=0))
gp = celerite.GP(kernel,
                 mean=Model(amp=(np.var(y))**0.5,
                            P=7.75,
                            phase=1,
                            offset=np.median(y)),
                 fit_mean=True)
#gp  = celerite.GP(kernel, mean=Model(**truth ), fit_mean = True)
#gp  = celerite.GP(kernel, mean=Model(**truth ), fit_mean = True, log_white_noise=np.log(1), fit_white_noise=True)
gp.compute(t, yerr)


def lnprob2(p):
    gp.set_parameter_vector(p)  # Set the parameter values to the given vector
Esempio n. 23
0
    assert all(np.allclose(a, b) for a, b in zip(b0, bounds))

    kernel = terms.RealTerm(log_a=0.1,
                            log_c=0.5,
                            bounds=dict(zip(["log_a", "log_c"], bounds)))
    assert all(
        np.allclose(a, b) for a, b in zip(b0, kernel.get_parameter_bounds()))


@pytest.mark.parametrize("k", [
    terms.RealTerm(log_a=0.1, log_c=0.5),
    terms.RealTerm(log_a=0.1, log_c=0.5) +
    terms.RealTerm(log_a=-0.1, log_c=0.7),
    terms.ComplexTerm(log_a=0.1, log_c=0.5, log_d=0.1),
    terms.ComplexTerm(log_a=0.1, log_b=-0.2, log_c=0.5, log_d=0.1),
    terms.SHOTerm(log_S0=0.1, log_Q=-1, log_omega0=0.5),
    terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5),
    terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) +
    terms.RealTerm(log_a=0.1, log_c=0.4),
    terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) *
    terms.RealTerm(log_a=0.1, log_c=0.4),
])
def test_jacobian(k, eps=1.34e-7):
    if not terms.HAS_AUTOGRAD:
        with pytest.raises(ImportError):
            jac = k.get_coeffs_jacobian()
        return

    v = k.get_parameter_vector()
    c = np.concatenate(k.coefficients)
    jac = k.get_coeffs_jacobian()
Esempio n. 24
0
def generate_stellar_fluxes(size,
                            M,
                            T_eff,
                            L,
                            cadence=60 * u.s,
                            parameter_vector=parameter_vector):
    """
    Generate an array of fluxes with zero mean which mimic the power spectrum of
    the SOHO/VIRGO SPM observations.
    
    Parameters
    ----------
    size : int
        Number of fluxes to generate. Note: Assumes ``size``>>500.
    M : `~astropy.units.Quantity`
        Stellar mass
    T_eff : `~astropy.units.Quantity`
        Stellar effective temperature
    L : `~astropy.units.Quantity`    
        Steller luminosity
    cadence : `~astropy.units.Quantity`
        Length of time between fluxes
    
    Returns
    -------
    y : `~numpy.ndarray`
        Array of fluxes at cadence ``cadence`` of length ``size``.
    """
    parameter_vector = parameter_vector.copy()

    tunable_amps = np.exp(parameter_vector[::3][2:])
    tunable_freqs = np.exp(parameter_vector[2::3][2:]) * 1e6 / 2 / np.pi
    peak_ind = np.argmax(tunable_amps)
    peak_freq = tunable_freqs[peak_ind]
    delta_freqs = tunable_freqs - peak_freq

    T_eff_solar = 5777 * u.K
    nu_max_sun = peak_freq * u.uHz
    delta_nu_sun = 135.1 * u.uHz

    # Huber 2011 Eqn 1
    nu_factor = ((M / M_sun) * (T_eff / T_eff_solar)**3.5 / (L / L_sun))
    # Huber 2011 Eqn 2
    delta_nu_factor = ((M / M_sun)**0.5 * (T_eff / T_eff_solar)**3 /
                       (L / L_sun)**0.75)

    new_peak_freq = nu_factor * peak_freq
    new_delta_freqs = delta_freqs * delta_nu_factor

    new_peak_freq, new_delta_freqs
    new_freqs = new_peak_freq + new_delta_freqs

    new_log_omegas = np.log(2 * np.pi * new_freqs * 1e-6).value

    parameter_vector[2::3][2:] = new_log_omegas

    nterms = len(parameter_vector) // 3

    kernel = terms.SHOTerm(log_S0=0, log_omega0=0, log_Q=0)

    for term in range(nterms - 1):
        kernel += terms.SHOTerm(log_S0=0, log_omega0=0, log_Q=0)

    kernel.set_parameter_vector(parameter_vector)

    gp = celerite.GP(kernel)

    x = np.arange(0, size // 500, cadence.to(u.s).value)
    gp.compute(x, check_sorted=False)

    y = gp.sample(500)

    y_concatenated = []

    for i, yi in enumerate(y):
        xi = np.arange(len(yi))
        fit = np.polyval(np.polyfit(xi - xi.mean(), yi, 1), xi - xi.mean())
        yi -= fit

        if i == 0:
            y_concatenated.append(yi)
        else:
            offset = yi[0] - y_concatenated[i - 1][-1]
            y_concatenated.append(yi - offset)
    y_concatenated = np.hstack(y_concatenated)

    x_c = np.arange(len(y_concatenated))

    y_concatenated -= np.polyval(
        np.polyfit(x_c - x_c.mean(), y_concatenated, 1), x_c - x_c.mean())

    return y_concatenated, kernel
Esempio n. 25
0
print('guess v: ',np.median(slit['rv']))
print('guess K: ',np.std(slit['rv']-np.median(slit['rv'])))

print('guess v: ',np.median(fiber['rv']))
print('guess K: ',np.std(fiber['rv']-np.median(fiber['rv'])))
#print(len(slit))67
#print(len(fiber))82


#set the GP parameters-FROM SAM RAW
#First granulation
Q = 1.0 / np.sqrt(2.0)
w0 = muhz2omega(20)
S0 = np.var(fiber['rv']) / (w0*Q)

kernel = terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0),
                       bounds=[(-20, 20), (-15, 15), (np.log(muhz2omega(0.1)), np.log(muhz2omega(100)))]) #omega upper bound: 10 muhz
kernel.freeze_parameter("log_Q") #to make it a Harvey model


#numax
Q = np.exp(3.0)
w0 = muhz2omega(25) #peak of oscillations 
S0 = np.var(fiber['rv']) / (w0*Q)

kernel += terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0),
                      bounds=[(-20, 20), (0.1, 5), (np.log(muhz2omega(5)), np.log(muhz2omega(50)))])

kernel += terms.JitterTerm(log_sigma=1, bounds=[(-20,40)])


#initial guess of RV model
Esempio n. 26
0
def iterGP(x,
           y,
           yerr,
           period_guess,
           acf_1pk,
           num_iter=20,
           ax=None,
           n_samp=4000):
    # Here is the kernel we will use for the GP regression
    # It consists of a sum of two stochastically driven damped harmonic
    # oscillators. One of the terms has Q fixed at 1/sqrt(2), which
    # forces it to be non-periodic. There is also a white noise term
    # included.

    # Do some aggressive sigma clipping
    m = np.ones(len(x), dtype=bool)
    while True:
        mu = np.mean(y[m])
        sig = np.std(y[m])
        m0 = y - mu < 3 * sig
        if np.all(m0 == m):
            break
        m = m0
    x_clip, y_clip, yerr_clip = x[m], y[m], yerr[m]

    if len(x_clip) < n_samp:
        n_samp = len(x_clip)

    # Randomly select n points from the light curve for the GP fit
    x_ind_rand = np.random.choice(len(x_clip), n_samp, replace=False)
    x_ind = x_ind_rand[np.argsort(x_clip[x_ind_rand])]

    x_gp = x_clip[x_ind]
    y_gp = y_clip[x_ind]
    yerr_gp = yerr_clip[x_ind]

    # A non-periodic component
    Q = 1.0 / np.sqrt(2.0)
    w0 = 3.0
    S0 = np.var(y_gp) / (w0 * Q)
    bounds = dict(log_S0=(-20, 15), log_Q=(-15, 15), log_omega0=(-15, 15))
    kernel = terms.SHOTerm(log_S0=np.log(S0),
                           log_Q=np.log(Q),
                           log_omega0=np.log(w0),
                           bounds=bounds)
    kernel.freeze_parameter('log_Q')

    # A periodic component
    Q = 1.0
    w0 = 2 * np.pi / period_guess
    S0 = np.var(y_gp) / (w0 * Q)
    kernel += terms.SHOTerm(log_S0=np.log(S0),
                            log_Q=np.log(Q),
                            log_omega0=np.log(w0),
                            bounds=bounds)

    # Now calculate the covariance matrix using the initial
    # kernel parameters
    gp = celerite.GP(kernel, mean=np.mean(y_gp))
    gp.compute(x_gp, yerr_gp)

    def neg_log_like(params, y, gp, m):
        gp.set_parameter_vector(params)
        return -gp.log_likelihood(y[m])

    def grad_neg_log_like(params, y, gp, m):
        gp.set_parameter_vector(params)
        return -gp.grad_log_likelihood(y[m])[1]

    bounds = gp.get_parameter_bounds()
    initial_params = gp.get_parameter_vector()

    if ax:
        ax.plot(x_gp, y_gp)

    # Find the best fit kernel parameters. We want to try to ignore the flares
    # when we do the fit. To do this, we will repeatedly find the best fit
    # solution to the kernel model, calculate the covariance matrix, predict
    # the flux and then mask out points based on how far they deviate from
    # the model. After a few passes, this should cause the model to fit mostly
    # to periodic features.
    m = np.ones(len(x_gp), dtype=bool)
    for i in range(num_iter):
        n_pts_prev = np.sum(m)
        gp.compute(x_gp[m], yerr_gp[m])
        soln = minimize(neg_log_like,
                        initial_params,
                        jac=grad_neg_log_like,
                        method='L-BFGS-B',
                        bounds=bounds,
                        args=(y_gp, gp, m))
        gp.set_parameter_vector(soln.x)
        initial_params = soln.x
        mu, var = gp.predict(y_gp[m], x_gp, return_var=True)
        sig = np.sqrt(var + yerr_gp**2)

        if ax:
            ax.plot(x_gp, mu)

        m0 = y_gp - mu < sig
        m[m == 1] = m0[m == 1]
        n_pts = np.sum(m)
        print(n_pts, n_pts_prev)
        if n_pts <= 10:
            raise ValueError('GP iteration threw out too many points')
            break
        if (n_pts == n_pts_prev):
            break

    gp.compute(x_gp[m], yerr_gp[m])
    mu, var = gp.predict(y_gp[m], x_gp, return_var=True)

    return x_gp, mu, var, gp.get_parameter_vector()
Esempio n. 27
0
final = TransitModel(lgror, lgroa, lgt0, lgper, lgb).get_value(time)
phase = fold(time, np.exp(lgper), np.exp(lgt0), 0.5) - 0.5
idx = np.argsort(phase)

################################################
###############~FUNKY PLOTS~####################
################################################

#set the GP parameters-FROM SAM RAW
#First granulation
Q = 1.0 / np.sqrt(2.0)
w0 = muhz2omega(13)
S0 = np.var(lc) / (w0 * Q)

kernel = terms.SHOTerm(log_S0=np.log(S0),
                       log_Q=np.log(Q),
                       log_omega0=np.log(w0))
kernel.freeze_parameter("log_Q")  #to make it a Harvey model

#Second granulation
Q = 1.0 / np.sqrt(2.0)
w0 = muhz2omega(35)
S0 = np.var(lc) / (w0 * Q)
kernel += terms.SHOTerm(log_S0=np.log(S0),
                        log_Q=np.log(Q),
                        log_omega0=np.log(w0))
kernel.freeze_parameter("terms[1]:log_Q")  #to make it a Harvey model

#numax
Q = np.exp(3.0)
w0 = muhz2omega(135)  #peak of oscillations at 133 uhz
Esempio n. 28
0
	"N": terms.Term(),
	"B": terms.RealTerm(log_a=-6., log_c=-np.inf,
				bounds={"log_a": [-30, 30],
						"log_c": [-np.inf, np.inf]}),
	"W": terms.JitterTerm(log_sigma=-25,
				bounds={"log_sigma": [-30, 30]}),
	"Mat32": terms.Matern32Term(
				log_sigma=1.,
				log_rho=1.,
				bounds={"log_sigma": [-30, 30],
						# The `celerite` version of the Matern-3/2
						# kernel has problems with very large `log_rho`
						# values. -7.4 is empirical.
						"log_rho": [-7.4, 16]}),
	"SHO0": terms.SHOTerm(log_S0=-6, log_Q=1.0 / np.sqrt(2.), log_omega0=0.,
				bounds={"log_S0": [-30, 30],
						"log_Q": [-30, 30],
						"log_omega0": [-30, 30]}),
	"SHO1": terms.SHOTerm(log_S0=-6, log_Q=-2., log_omega0=0.,
				bounds={"log_S0": [-10, 10],
						"log_omega0": [-10, 10]}),
	"SHO2": terms.SHOTerm(log_S0=-6, log_Q=0.5, log_omega0=0.,
				bounds={"log_S0": [-10, 10],
						"log_Q": [-10, 10],
						"log_omega0": [-10, 10]}),
	# see Foreman-Mackey et al. 2017, AJ 154, 6, pp. 220
	# doi: 10.3847/1538-3881/aa9332
	# Eq. (53)
	"SHO3": terms.SHOTerm(log_S0=-6, log_Q=0., log_omega0=0.,
				bounds={"log_S0": [-15, 5],
						"log_Q": [-10, 10],
						"log_omega0": [-10, 10]}) *
Esempio n. 29
0
            x = obs_time
            y = obs_flux
            yerr = obs_err

            Q = 1.0 / np.sqrt(2.0)
            log_w0 = 5 #3.0
            log_S0 = 10

            log_cadence_min = None # np.log(2*np.pi/(2./24))
            log_cadence_max = np.log(2*np.pi/(0.25/24))

            bounds = dict(log_S0=(-15, 30), log_Q=(-15, 15),
                          log_omega0=(log_cadence_min, log_cadence_max))

            kernel = terms.SHOTerm(log_S0=log_S0, log_Q=np.log(Q),
                                   log_omega0=log_w0, bounds=bounds)

            kernel.freeze_parameter("log_Q")  # We don't want to fit for "Q" in this term

            gp = celerite.GP(kernel, mean=mean_model, fit_mean=True)
            gp.compute(x, yerr)
            print("Initial log-likelihood: {0}".format(gp.log_likelihood(y)))

            # Define a cost function
            def neg_log_like(params, y, gp):
                gp.set_parameter_vector(params)
                return -gp.log_likelihood(y)

            def grad_neg_log_like(params, y, gp):
                gp.set_parameter_vector(params)
                return -gp.grad_log_likelihood(y)[1]
Esempio n. 30
0
from __future__ import division, print_function

import emcee
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize

import celerite
from celerite import terms
from celerite import plot_setup

np.random.seed(42)
plot_setup.setup(auto=True)

# Simulate some data
kernel = terms.SHOTerm(log_S0=0.0, log_omega0=2.0, log_Q=2.0,
                       bounds=[(-10, 10), (-10, 10), (-10, 10)])
gp = celerite.GP(kernel)
true_params = np.array(gp.get_parameter_vector())
omega = 2*np.pi*np.exp(np.linspace(-np.log(10.0), -np.log(0.1), 5000))
true_psd = gp.kernel.get_psd(omega)
N = 200
t = np.sort(np.random.uniform(0, 10, N))
yerr = 2.5
y = gp.sample(t, diag=yerr**2)

# Find the maximum likelihood model
gp.compute(t, yerr)

def nll(params, gp, y):
    gp.set_parameter_vector(params)
    if not np.isfinite(gp.log_prior()):