예제 #1
0
파일: data.py 프로젝트: cciaschi/bart
 def optimize_hyperpars(self, n=None, **kwargs):
     if n is None:
         n = len(self.time)
     gp = GaussianProcess(self.hyperpars)
     flag, hyperpars = gp.optimize(self.time[:n], self.ferr[:n],
                                   self.flux[:n]-1, **kwargs)
     self.hyperpars = hyperpars
     return flag, hyperpars
예제 #2
0
파일: data.py 프로젝트: cciaschi/bart
 def gp(self):
     if self._gp is None:
         self._gp = GaussianProcess(ExpSquaredKernel(*self.hyperpars))
     if not self._gp.computed:
         std = np.sqrt(self.ferr**2 + self.jitter**2)
         self._gp.compute(self.time, std)
     return self._gp
예제 #3
0
파일: oned.py 프로젝트: rossfadely/george
np.random.seed(42)


def f(x):
    return x * np.sin(x)


X = 10 * np.random.rand(50)

# Observations
y = f(X).ravel()
yerr = np.ones_like(y)

x = np.atleast_2d(np.linspace(-5, 15, 1001)).T

gp = GaussianProcess([0.1, 1.0])
gp.optimize(X, y, yerr=yerr)

mu, var = gp.predict(x)

print(gp.evaluate())

std = np.sqrt(var.diagonal())

vals = np.random.multivariate_normal(mu, var, 100)

pl.plot(x, mu, "k")
pl.plot(x, vals.T, "k", alpha=0.1)

pl.plot(x, mu + std, ":r")
pl.plot(x, mu - std, ":r")
예제 #4
0
파일: hyperpars.py 프로젝트: cciaschi/bart
t0s = np.sort(np.max(dataset.time) * np.random.rand(N))
depths = 2e-5 + (2e-4 - 2e-5) * np.random.rand(N)
durations = 0.3 + 0.2 * np.random.rand(N)
datasets = []
for t0, depth, duration in zip(t0s, depths, durations):
    ds = deepcopy(dataset)
    model = np.ones_like(ds.time)
    model[(ds.time < t0+duration)*(ds.time > t0-duration)] *= 1.0 - depth
    ds.flux *= model
    datasets.append(ds)

[pl.plot(ds.time, ds.flux, ".") for ds in datasets]
pl.savefig("data.png")

# Compute the Gaussian processes on the dataset.
gp = GaussianProcess([1e-3, 3.0, 10.])
gp.compute(dataset.time, dataset.ferr)

# Loop over true epochs.
d = 0.2
correct = []
incorrect = []
for t0, depth, duration, data in zip(t0s, depths, durations, datasets):
    null = gp.lnlikelihood(data.flux - 1.0)

    # Compute the correct model.
    model = np.ones_like(data.time)
    model[(data.time < t0+duration)*(data.time > t0-duration)] *= 1.0 - 0.0001
    correct.append(gp.lnlikelihood(data.flux - model) - null)

    for i, t in enumerate(data.time):
예제 #5
0
파일: oned.py 프로젝트: rossfadely/george
np.random.seed(42)


def f(x):
    return x * np.sin(x)

X = 10 * np.random.rand(50)

# Observations
y = f(X).ravel()
yerr = np.ones_like(y)

x = np.atleast_2d(np.linspace(-5, 15, 1001)).T

gp = GaussianProcess([0.1, 1.0])
gp.optimize(X, y, yerr=yerr)

mu, var = gp.predict(x)

print(gp.evaluate())

std = np.sqrt(var.diagonal())

vals = np.random.multivariate_normal(mu, var, 100)

pl.plot(x, mu, "k")
pl.plot(x, vals.T, "k", alpha=0.1)

pl.plot(x, mu + std, ":r")
pl.plot(x, mu - std, ":r")
예제 #6
0
파일: data.py 프로젝트: cciaschi/bart
class GPLightCurve(LightCurve):
    """
    An extension to :class:`LightCurve` with a Gaussian Process likelihood
    function. This does various nice things like masking NaNs and Infs and
    normalizing the fluxes by the median.

    :param time:
        The time series in days.

    :param flux:
        The flux measurements in arbitrary units.

    :param ferr:
        The error bars on ``flux``.

    :param hyperpars:
        The parameters for the ``GaussianProcess`` kernel.

    :param texp: (optional)
        The integration time (in seconds). (default: 1626.0… Kepler
        long-cadence)

    :param K: (optional)
        The number of bins to use in the approximate exposure time integral.
        (default: 3)

    """

    def __init__(self, time, flux, ferr, hyperpars, jitter=0.0, **kwargs):
        if GaussianProcess is None:
            raise ImportError("You need to install george to use GPs")
        super(GPLightCurve, self).__init__(time, flux, ferr, **kwargs)
        self._hyperpars = hyperpars
        self._gp = None
        self._jitter = jitter

    def __getstate__(self):
        return dict([(k, v) for k, v in self.__dict__.items() if k != "_gp"])

    def __setstate__(self, d):
        for k, v in d.items():
            setattr(self, k, v)
        self._gp = None

    @property
    def gp(self):
        if self._gp is None:
            self._gp = GaussianProcess(ExpSquaredKernel(*self.hyperpars))
        if not self._gp.computed:
            std = np.sqrt(self.ferr**2 + self.jitter**2)
            self._gp.compute(self.time, std)
        return self._gp

    @property
    def jitter(self):
        return self._jitter

    @jitter.setter
    def jitter(self, value):
        self._gp = None
        self._jitter = value

    @property
    def hyperpars(self):
        return self._hyperpars

    @hyperpars.setter
    def hyperpars(self, value):
        self._hyperpars = value
        self._gp = None

    # Helper functions for the individual hyperparameters.
    @property
    def alpha(self):
        return self._hyperpars[0]

    @alpha.setter
    def alpha(self, value):
        h = self._hyperpars
        h[0] = value
        self.hyperpars = h

    @property
    def scale(self):
        return self._hyperpars[1]

    @scale.setter
    def scale(self, value):
        h = self._hyperpars
        h[1] = value
        self.hyperpars = h

    def optimize_hyperpars(self, n=None, **kwargs):
        if n is None:
            n = len(self.time)
        gp = GaussianProcess(self.hyperpars)
        flag, hyperpars = gp.optimize(self.time[:n], self.ferr[:n],
                                      self.flux[:n]-1, **kwargs)
        self.hyperpars = hyperpars
        return flag, hyperpars

    def lnlike(self, lc):
        """
        Get the likelihood of this dataset given a particular :class:`Model`.

        :param model:
            The predicted light curve model evaluated at ``eval_time``.

        """
        return self.gp.lnlikelihood(self.flux - lc)

    def predict(self, lc, size=None, resample=None):
        """
        Generate a sample from the light curve probability function for a
        particular :class:`Model`.

        :param model:
            The predicted light curve model evaluated at ``eval_time``.

        """
        if resample is None:
            mu, cov = self.gp.predict(self.flux - lc, self.time)
            return mu + lc
        t = self.time[::resample]
        mu, cov = self.gp.predict(self.flux - lc, t)
        mu += lc[::resample]
        return np.interp(self.time, t, mu)
예제 #7
0
파일: test_grid.py 프로젝트: cciaschi/bart
pl.plot(data.time, data.flux, ".")
pl.savefig("data.png")

t = data.time
f = data.flux
d = 0.2

x = np.linspace(-6, 0, 10)
y = np.linspace(-1.0, 10, 12)
s2n = np.zeros((len(x), len(y)))
ll = np.zeros((len(x), len(y)))
delta_ll = np.zeros((len(x), len(y)))

for ix, a in enumerate(x):
    for iy, l in enumerate(y):
        gp = GaussianProcess([10 ** a, 10 ** l, 1e6])
        gp.compute(data.time, data.ferr)
        null = gp.lnlikelihood(f - 1.0)
        results = []
        for i, t0 in enumerate(t):
            if np.abs(t0 - truth[2]) < 2*d:
                continue
            model = np.ones_like(f)
            model[(t < t0+d) * (t > t0-d)] *= 1.0 - 0.0001
            results.append((t0, gp.lnlikelihood(f - model)))
        results = np.array(results)

        model = np.ones_like(f)
        model[(t < truth[2]+d) * (t > truth[2]-d)] *= 1.0 - 0.0001
        ll_true = gp.lnlikelihood(f - model)
예제 #8
0
파일: fake_data.py 프로젝트: cciaschi/bart
# -*- coding: utf-8 -*-

from __future__ import division, print_function, absolute_import, unicode_literals

__all__ = []

import time
import numpy as np
import matplotlib.pyplot as pl
from george import GaussianProcess

import bart
from bart.data import GPLightCurve

# Generate fake data.
gp = GaussianProcess([5e-4, 20.0, 25.0])
t = np.arange(100, 120, 0.5 / 24.0)
yerr = 1e-4 * np.ones_like(t)
y = gp.sample_prior(t)[0] + yerr * np.random.randn(len(yerr)) + 1

# Inject a transit.
t0_true = 110.0
planet = bart.Planet(r=0.02, period=400.0, t0=t0_true)
star = bart.Star(ldp=bart.ld.QuadraticLimbDarkening(0.3, 0.1))
ps = bart.PlanetarySystem(star)
ps.add_planet(planet)
model = ps.light_curve(t)
y *= model

# Plot the data.
pl.plot(t, y, ".k")
예제 #9
0
파일: twod.py 프로젝트: rossfadely/george
from matplotlib import pyplot as pl

from george import GaussianProcess

np.random.seed(42)


def f(x):
    return np.exp(-0.5 * (x[:, 0]**2 + x[:, 1]**2 / 0.1))


x = np.random.randn(200).reshape([100, 2])
y = f(x)
yerr = 0.01 * np.ones_like(y)

gp = GaussianProcess([0.1, 1.0, 1.0])
gp.optimize(x, y, yerr=yerr)
print(gp.pars)

# Grid.
X, Y = np.meshgrid(np.linspace(-3, 3, 40), np.linspace(-3, 3, 40))
mu, var = gp.predict(np.vstack([X.ravel(), Y.ravel()]).T)

ymin, ymax = y.min(), y.max()
colors = (y - ymin) / ymax

pl.figure(figsize=[4, 4])
samples = np.random.multivariate_normal(mu, var, 100)
for i in range(samples.shape[0]):
    pl.clf()
    pl.imshow((samples[i].reshape([40, 40]) - ymin) / ymax,
예제 #10
0
파일: periodic.py 프로젝트: iancze/george
import os
import sys

d = os.path.dirname
sys.path.insert(0, d(d(os.path.abspath(__file__))))

import numpy as np
import matplotlib.pyplot as pl
from george import GaussianProcess
from george.kernels import ExpSquaredKernel, CosineKernel

np.random.seed(123)

kernel = ExpSquaredKernel(1.0, 2.3)
gp = GaussianProcess(kernel)

# Generate some fake data.
period = 0.956
x = 10 * np.sort(np.random.rand(75))
yerr = 0.1 + 0.1 * np.random.rand(len(x))
y = gp.sample_prior(x)
y += 0.8 * np.cos(2 * np.pi * x / period)
y += yerr * np.random.randn(len(yerr))

# Set up a periodic kernel.
pk = ExpSquaredKernel(np.sqrt(0.8), 1000.0) * CosineKernel(period)
kernel2 = kernel + pk
gp2 = GaussianProcess(kernel2)

# Condition on this data.
예제 #11
0
파일: twod.py 프로젝트: rossfadely/george
from george import GaussianProcess


np.random.seed(42)


def f(x):
    return np.exp(-0.5 * (x[:, 0] ** 2 + x[:, 1] ** 2 / 0.1))


x = np.random.randn(200).reshape([100, 2])
y = f(x)
yerr = 0.01 * np.ones_like(y)

gp = GaussianProcess([0.1, 1.0, 1.0])
gp.optimize(x, y, yerr=yerr)
print(gp.pars)

# Grid.
X, Y = np.meshgrid(np.linspace(-3, 3, 40), np.linspace(-3, 3, 40))
mu, var = gp.predict(np.vstack([X.ravel(), Y.ravel()]).T)

ymin, ymax = y.min(), y.max()
colors = (y - ymin) / ymax

pl.figure(figsize=[4, 4])
samples = np.random.multivariate_normal(mu, var, 100)
for i in range(samples.shape[0]):
    pl.clf()
    pl.imshow((samples[i].reshape([40, 40]) - ymin) / ymax,