Пример #1
0
def generate_data(params, N, rng=(-5, 5)):

    # Create GP object
    # It needs a kernel to be supplied.
    gp = george.GP(0.1 * kernels.ExpSquaredKernel(3.3))

    # Generate an array for the independent variable.
    # In this case, the array goes from -5 to +5.
    # In case of a spectrum this is the wavelength.
    t = rng[0] + np.diff(rng) * np.sort(np.random.rand(N))

    # Generate the dependent variable array. Flux in case of spectra.
    # The sample method of the gp object draws samples from the distribution
    # used to define the gp object.
    y = gp.sample(t)  # This just gives a straight line

    # This adds the gaussian "absorption" or "emission" to the straight line
    y += Model(**params).get_value(t)

    # Generate array for errors and add it to the dependent variable.
    # This has a base error of 0.05 and then another random error that
    # has a magnitude between 0 and 0.05.
    yerr = 0.05 + 0.05 * np.random.rand(N)
    y += yerr * np.random.randn(
        N)  # randn draws samples from the normal distribution
    """
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.errorbar(t, y, yerr=yerr, fmt='.k', capsize=0)
    plt.show()
    """

    return t, y, yerr
Пример #2
0
def generate_data(params, N, rng=(-5, 5)):
    gp = george.GP(0.1 * kernels.ExpSquaredKernel(3.3))
    t = rng[0] + np.diff(rng) * np.sort(np.random.rand(N))
    y = gp.sample(t)
    y += Model(**params).get_value(t)
    yerr = 0.05 + 0.05 * np.random.rand(N)
    y += yerr * np.random.randn(N)
    return t, y, yerr
Пример #3
0
             tau2=1.,
             k2=np.std(y) / 100,
             w2=0.,
             e2=0.4,
             offset1=0.,
             offset2=0.)
kwargs = dict(**truth)
kwargs["bounds"] = dict(P1=(7.5, 8.5),
                        k1=(0, 0.1),
                        w1=(-2 * np.pi, 2 * np.pi),
                        e1=(0, 0.9),
                        tau2=(-50, 50),
                        k2=(0, 0.2),
                        w2=(-2 * np.pi, 2 * np.pi),
                        e2=(0, 0.9))
mean_model = Model(**kwargs)
gp = george.GP(kernel,
               mean=mean_model,
               fit_mean=True,
               white_noise=np.log(0.5**2),
               fit_white_noise=True)
gp.compute(t, yerr)


def lnprob2(p):
    gp.set_parameter_vector(p)
    return gp.log_likelihood(y, quiet=True) + gp.log_prior()


#==============================================================================
# MCMC
Пример #4
0
                 k2=np.std(y) / 100,
                 w2=0.,
                 e2=0.5,
                 d_harps1=0.,
                 d_harps2=0.)
    kwargs = dict(**truth)
    kwargs["bounds"] = dict(P1=(12.5, 13.0),
                            k1=(0, 1.),
                            w1=(-2 * np.pi, 2 * np.pi),
                            e1=(0.7, 0.95),
                            P2=(35, 45),
                            k2=(0, 1.),
                            w2=(-2 * np.pi, 2 * np.pi),
                            e2=(0.4, 0.65))

mean_model = Model(**kwargs)
gp = george.GP(kernel,
               mean=mean_model,
               fit_mean=True,
               white_noise=np.log(0.5**2),
               fit_white_noise=True)
gp.freeze_parameter('kernel:k2:k1:log_constant')
gp.compute(x, yerr)


def lnprob2(p):
    gp.set_parameter_vector(p)
    return gp.log_likelihood(y, quiet=True) + gp.log_prior()


#==============================================================================
Пример #5
0
        idx = t < 57300
        offset[idx] = self.offset1
        offset[~idx] = self.offset2

        return rv1 + rv2 + offset


#==============================================================================
# Priors
#==============================================================================
model = george.GP(mean=Model(P1=8.,
                             tau1=1.,
                             k1=np.std(y) / 100,
                             w1=0.,
                             e1=0.4,
                             P2=100,
                             tau2=1.,
                             k2=np.std(y) / 100,
                             w2=0.,
                             e2=0.4,
                             offset1=0.,
                             offset2=0.))
model.compute(t, yerr)


def lnprob(p):
    model.set_parameter_vector(p)
    return model.log_likelihood(y, quiet=True) + model.log_prior()


#==============================================================================
# GP
Пример #6
0
    p0 = initial + 1e-8 * np.random.randn(nwalkers, ndim)
    sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)

    print("Running burn-in...")
    p0, _, _ = sampler.run_mcmc(p0, 500)
    sampler.reset()

    print("Running production...")
    sampler.run_mcmc(p0, 1000)

#==============================================================================
# Modelling correlated noise
#==============================================================================

# mean: An object (following the modeling protocol) that specifies the mean function of the GP.
gp = george.GP(np.var(y) * kernels.Matern32Kernel(10.0), mean=Model(**truth))

# compute(x, yerr=0.0, **kwargs). Pre-compute the covariance matrix and factorize it for a set of times and uncertainties.
gp.compute(t, yerr)


def lnprob2(p):

    # Set the parameter values to the given vector
    gp.set_parameter_vector(p)

    # Compute the logarithm of the marginalized likelihood of a set of observations under the Gaussian process model.
    return gp.log_likelihood(y, quiet=True) + gp.log_prior()


#==============================================================================
Пример #7
0
# tau = time of pericenter passage ? 
tau = 62.19
	
# k = amplitude of radial velocity (m/s)
K = 186.8

# offset 
offset = 0
'''

t = np.linspace(min(RV_ALL[:, 0]), max(RV_ALL[:, 0]), num=10000, endpoint=True)
truth = dict(n=0.0151661049, tau=62.19, k=186.8, w=0, e0=0.856, offset=0)
#truth       = dict(amp=5, P=25*0.31, phase=0.1)

# y		= Model(**truth).get_value(RV_ALL[:,0])
y = Model(**truth).get_value(t)

# Plot the data using the star.plot function
# plt.plot(RV_ALL[:,0], y, '-')
plt.plot(t, y, '-')
plt.ylabel('RV' r"$[m/s]$")
plt.xlabel('t')
plt.title('Simulated RV')
plt.show()

#==============================================================================
# GP Modelling
#==============================================================================

from george.modeling import Model
from george import kernels
Пример #8
0
#==============================================================================
# Inject a planet
#==============================================================================

from george.modeling import Model


class Model(Model):
    parameter_names = ("amp", "P", "phase")

    def get_value(self, t):
        return self.amp * np.sin(2 * np.pi * t / self.P + self.phase)


truth = dict(amp=5, P=25 * 0.31, phase=0.1)
y_planet = Model(**truth).get_value(t)
yerr = 0.5 + 0.5 * np.random.rand(N)  # size of error bar
delta_y = np.zeros(N)

for i in range(N):
    delta_y[i] = np.random.normal(0, yerr[i], 1)[0]

y_planet = y_planet + delta_y

if 1:  # planet
    plt.errorbar(t, y_planet, yerr=yerr, fmt=".k", capsize=0)
    plt.ylabel(r"$y$ [m/s]")
    plt.xlabel(r"$t$ [days]")
    plt.ylim((-8, 12))
    plt.title("Planet induced radial velocity")
    plt.show()
Пример #9
0
    from george import kernels

    # k1      = kernels.ExpSine2Kernel(gamma = 1, log_period = np.log(100), bounds=dict(gamma=(0,10), log_period=(1,6)))
    k2 = np.std(y) * kernels.ExpSquaredKernel(100)
    # k3      = 0.66**2 * kernels.RationalQuadraticKernel(log_alpha=np.log(0.78), metric=1.2**2)
    kernel = k2

    truth = dict(a=np.log(2.), k=np.log(0.7), phi=1., b=0., m=-0.2)
    kwargs = dict(**truth)
    kwargs["bounds"] = dict(a=(0, 5),
                            k=(-3, 3),
                            phi=(-2 * np.pi, 2 * np.pi),
                            b=(-2, 2),
                            m=(-1, 3))
    mean_model = Model(**kwargs)
    gp = george.GP(kernel, mean=mean_model, fit_mean=True)
    gp.compute(x, yerr)
    names = gp.get_parameter_names()

    def lnprob(p):
        gp.set_parameter_vector(p)
        return gp.log_likelihood(y, quiet=True) + gp.log_prior()

    #==============================================================================
    # MCMC with jitter correction (5 parameters)
    #==============================================================================

    if (mode == 5):
        print('# MCMC with jitter correction (5 parameters) #')
Пример #10
0
def main():

    truth = dict(amp=-1.0, location=0.1, log_sigma2=np.log(0.4))
    t, y, yerr = generate_data(truth, 50)

    model = george.GP(mean=PolynomialModel(
        m=0, b=0, amp=-1, location=0.1, log_sigma2=np.log(0.4)))
    model.compute(t, yerr)

    initial = model.get_parameter_vector()
    ndim, nwalkers = len(initial), 32
    p0 = initial + 1e-8 * np.random.randn(nwalkers, ndim)
    sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(y, model))

    print "Fitting assuming uncorrelated errors."

    print("Running burn-in...")
    p0, _, _ = sampler.run_mcmc(p0, 500)
    sampler.reset()

    print("Running production...")
    sampler.run_mcmc(p0, 1000)

    # Plot
    fig = plt.figure()
    ax = fig.add_subplot(111)

    ax.set_title("Fit assuming uncorrelated errors.")

    # plot data
    ax.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)

    # The positions where the prediction should be computed.
    x = np.linspace(-5, 5, 500)

    # Plot 24 posterior samples.
    samples = sampler.flatchain
    for s in samples[np.random.randint(len(samples), size=24)]:
        model.set_parameter_vector(s)
        ax.plot(x, model.mean.get_value(x), color="#4682b4", alpha=0.3)

    #plt.show()

    # Corner plot for case assuming uncorrelated noise
    tri_cols = ["amp", "location", "log_sigma2"]
    tri_labels = [r"$\alpha$", r"$\ell$", r"$\ln\sigma^2$"]
    tri_truths = [truth[k] for k in tri_cols]
    tri_range = [(-2, -0.01), (-3, -0.5), (-1, 1)]
    names = model.get_parameter_names()
    inds = np.array([names.index("mean:" + k) for k in tri_cols])
    corner.corner(sampler.flatchain[:, inds],
                  truths=tri_truths,
                  labels=tri_labels)

    plt.show(
    )  # Seems like this is necessary for the corner plot to actually show up

    # --------------------- Now assuming correlated errors --------------------- #
    print "\n", "Fitting assuming correlated errors modeled with GP noise model."
    kwargs = dict(**truth)
    kwargs["bounds"] = dict(location=(-2, 2))
    mean_model = Model(**kwargs)
    gp = george.GP(np.var(y) * kernels.Matern32Kernel(10.0), mean=mean_model)
    gp.compute(t, yerr)

    # Again run MCMC
    initial = gp.get_parameter_vector()
    ndim, nwalkers = len(initial), 32
    sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob2, args=(y, gp))

    print("Running first burn-in...")
    p0 = initial + 1e-8 * np.random.randn(nwalkers, ndim)
    p0, lp, _ = sampler.run_mcmc(p0, 2000)

    print("Running second burn-in...")
    p0 = p0[np.argmax(lp)] + 1e-8 * np.random.randn(nwalkers, ndim)
    sampler.reset()
    p0, _, _ = sampler.run_mcmc(p0, 2000)
    sampler.reset()

    print("Running production...")
    sampler.run_mcmc(p0, 2000)

    # Plot
    fig1 = plt.figure()
    ax1 = fig1.add_subplot(111)

    ax1.set_title("Fit assuming correlated errors and GP noise model.")

    # plot data
    ax1.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)

    # Plot 24 posterior samples.
    samples = sampler.flatchain
    for s in samples[np.random.randint(len(samples), size=24)]:
        gp.set_parameter_vector(s)
        mu = gp.sample_conditional(y, x)
        ax1.plot(x, mu, color="#4682b4", alpha=0.3)

    names = gp.get_parameter_names()
    inds = np.array([names.index("mean:" + k) for k in tri_cols])
    corner.corner(sampler.flatchain[:, inds],
                  truths=tri_truths,
                  labels=tri_labels)

    plt.show()

    return None
Пример #11
0
        rot_prof = self.a * np.exp(
            (-flatx * self.b - self.c / self.b)) + self.d
        vals = np.array([
            splittings(rot_prof, 1),
            splittings(rot_prof, 2),
            splittings(rot_prof, 3)
        ])
        return vals


truth = dict(a=400, b=-12, c=0.6, d=50)  #log_sigma=np.log(0.4))

kwargs = dict(**truth)
kwargs["bounds"] = dict(a=(0, 700), b=(-15, 15), c=(-1, 1), d=(0, 100))
#kwargs["bounds"] = dict(a=(-5,5), b = (0,15),c = (0,10.))
mean_model = Model(**kwargs)

plt.figure()
ao, bo, co, do = mean_model.get_parameter_vector()
plt.plot(r[0] * flatx, (ao * np.exp((-flatx * bo - co / bo)) + do))
plt.title('Pre Optimization Rotation Curve')
plt.xlabel('r/R')
plt.ylabel(r'$\Omega$')
#plt.ylim(0,500)
plt.show()

plt.figure()
actual_rot = 330 * np.ones(4800)
actual_rot[3360:-1] = actual_rot[3360:-1] + 30

actual_1 = splittings(actual_rot, 1)