Esempio n. 1
0
    def __init__(self, x, y, yerr, subdivisions):
        """
        Initialize global variables of Gaussian Process Regression Interpolator
    
        Args:
            x (array): Independent variable
            y (array): Dependent variable
            yerr (array): Uncertainty on y
            subdivisions: The number of subdivisions between data points
        """

        # Define kernels
        kernel_expsq = 38**2 * kernels.ExpSquaredKernel(metric=10**2)
        kernel_periodic = 150**2 * kernels.ExpSquaredKernel(
            2**2) * kernels.ExpSine2Kernel(gamma=0.05, log_period=np.log(11))
        kernel_poly = 5**2 * kernels.RationalQuadraticKernel(
            log_alpha=np.log(.78), metric=1.2**2)
        kernel_extra = 5**2 * kernels.ExpSquaredKernel(1.6**2)
        kernel = kernel_expsq + kernel_periodic + kernel_poly + kernel_extra

        # Create GP object
        self.gp = george.GP(kernel, mean=np.mean(y), fit_mean=False)
        self.gp.compute(x, yerr)

        # Set global variables
        self.ndim = len(self.gp)
        self.x = x
        self.y = y
        self.yerr = yerr
        self.subdivisions = subdivisions
        self.priors = [prior.Prior(0, 1) for i in range(self.ndim)]
        self.x_predict = np.linspace(min(self.x), max(self.x),
                                     subdivisions * (len(self.x) - 1) + 1)
Esempio n. 2
0
def create_data(gamma=10, period=1, amp=1):
    """Create a randomized periodic-sinusoidal function.

    Some example inputs are:
    gamma = 10
    period = 1
    amp = 1
    
    Returns all x-values with corresponding y-values and gp 
    function generated with the given gamma, period, and amplitude.
    """

    #generate 10 random numbers for x
    pre_x = 10 * np.sort(np.random.rand(10))
    #determine the y error and make it an array as long as pre_x
    #have the error scale with the amplitude
    pre_yerr = 0.2 * amp * np.ones_like(pre_x)
    x_possible = np.linspace(0, 10, 1000)

    #create a sinusoidal plot based on inputs
    #establish the kernel type we are using
    kernel = amp * kernels.ExpSine2Kernel(gamma, period)
    gp = george.GP(kernel)
    #generate a ExpSine2 function using our x-values (0-10) and our y error
    gp.compute(pre_x, pre_yerr)
    #note: pre_x is never used again after this

    #sample the y-values from the function we made
    pre_y = gp.sample(x_possible)  #a subset of possible x-values
    #return our simulated data with the original function
    return (x_possible, pre_y, gp)
Esempio n. 3
0
def kern_QP(p):
    nper = (len(p) - 1) / 4
    for i in range(nper):
        log_a, log_gamma, period, log_tau = p[i * 4:i * 4 + 4]
        a_sq = 10.0**(2 * log_a)
        gamma = 10.0**log_gamma
        tau_sq = 10.0**(2 * log_tau)
        if i == 0:
            kern = a_sq * kernels.ExpSine2Kernel(gamma, period) * \
              kernels.ExpSquaredKernel(tau_sq)
        else:
            kern += a_sq * kernels.ExpSine2Kernel(gamma, period) * \
              kernels.ExpSquaredKernel(tau_sq)
    log_sig_ppt = p[-1]
    sig = 10.0**(log_sig_ppt - 3)
    return kern, sig
Esempio n. 4
0
def computeModel():
    global gp
    kernel = np.var(y) * kernels.ExpSquaredKernel(0.5) * kernels.ExpSine2Kernel(log_period = 0.5, gamma=1)
    gp = george.GP(kernel)
    gp.compute(x, y)
    model = gp.predict(y, x, return_var=True)
    result = minimize(neg_ln_like, gp.get_parameter_vector(), jac=grad_neg_ln_like)
    gp.set_parameter_vector(result.x)
Esempio n. 5
0
    def __init__(self, P=None, *args, **kwargs):
        """Initialises the priors, initial hp values, and kernel."""

        # Set up kernel
        # -------------
        k_spatial = 1.0 * kernels.ExpSquaredKernel(
            metric=[1.0, 1.0], ndim=3, axes=[0, 1])
        k_temporal = 1.0 * kernels.ExpSquaredKernel(
                                            metric=1.0,
                                            ndim=3, axes=2) \
                         * kernels.ExpSine2Kernel(
                                             gamma=2, log_period=1,
                                            ndim=3, axes=2)
        k_total = k_spatial + k_temporal

        if P is None:
            P = 0.5

        # NOTE: sigt always starts as multiple of the period
        super().__init__(kernel=k_total,
                         parameter_names=('ln_Axy', '2ln_sigx', '2ln_sigy',
                                          'ln_At', '2ln_sigt', 'gamma', 'lnP'),
                         default_values=(-12.86, -3.47, -4.34, -12.28,
                                         2 * np.log(4 * P), 1.0, np.log(P)),
                         *args,
                         **kwargs)

        # self.set_hyperpriors(keyword=keyword)
        # self.set_bounds(keyword=keyword)
        # self.parameter_names = ('ln_Axy', '2ln_sigx', '2ln_sigy',
        # 				 		'ln_At', '2ln_sigt', 'gamma', 'lnP')
        # self.default_values = (-12.86, -3.47, -4.34, -12.28,
        # 					   max(2*np.log(4*P), self.bounds[4][0] + 1e-6),
        # 					   1.0, np.log(P))
        # self.kernel = k_total
        # self.kernel.set_parameter_vector(np.array(self.default_values))

        # # Additional potential tools
        # self.get_parameter_vector = self.kernel.get_parameter_vector
        # self.set_parameter_vector = self.kernel.set_parameter_vector

        if np.log(P) < self.bounds[-1][0] or np.log(P) > self.bounds[-1][1]:
            raise PriorInitialisationError(
                ("Initial period is out of bounds\nperiod: {},\n"
                 "lnP: {}, \nbounds: {}".format(P, np.log(P),
                                                self.bounds[-1])))
        elif not np.isfinite(self.log_prior(self.default_values)):
            raise PriorInitialisationError(
                ("Initial hyperparameter values are out of "
                 "prior bounds.\n"
                 "hp_default: {}\n"
                 "bounds: {}\n"
                 "P: {}".format(self.default_values, self.bounds, P)))

        self.default_X_cols = ['x', 'y', 't']
Esempio n. 6
0
def test_bounds():
    kernel = 10 * kernels.ExpSquaredKernel(1.0, metric_bounds=[(None, 4.0)])
    kernel += 0.5 * kernels.RationalQuadraticKernel(log_alpha=0.1, metric=5.0)
    gp = GP(kernel, white_noise=LinearWhiteNoise(1.0, 0.1))

    # Test bounds length.
    assert len(gp.get_parameter_bounds()) == len(gp.get_parameter_vector())
    gp.freeze_all_parameters()
    gp.thaw_parameter("white_noise:m")
    assert len(gp.get_parameter_bounds()) == len(gp.get_parameter_vector())

    # Test invalid bounds specification.
    with pytest.raises(ValueError):
        kernels.ExpSine2Kernel(gamma=0.1, log_period=5.0, bounds=[10.0])
Esempio n. 7
0
def angus_kernel(theta):
    """
    use the kernel that Ruth Angus uses. Be sure to cite her
    """
    theta = np.exp(theta)
    A = theta[0]
    l = theta[1]
    G = theta[2]
    sigma = theta[4]
    P = theta[3]
    kernel = (A * kernels.ExpSquaredKernel(l) *
              kernels.ExpSine2Kernel(G, P) +
              kernels.WhiteKernel(sigma)
              )
    return kernel
Esempio n. 8
0
def get_kernel(architecture, kernel_name, domain_name_lst, n_dims):

    if architecture == "trans":
        mapping = trans_domain_kernel_mapping
    else:
        mapping = None

    kernel = None
    initial_ls = np.ones([n_dims])

    if kernel_name == "constant":
        kernel = kernels.ConstantKernel(1, ndim=n_dims)
    elif kernel_name == "polynomial":
        kernel = kernels.PolynomialKernel(log_sigma2=1, order=3, ndim=n_dims)
    elif kernel_name == "linear":
        kernel = kernels.LinearKernel(log_gamma2=1, order=3, ndim=n_dims)
    elif kernel_name == "dotproduct":
        kernel = kernels.DotProductKernel(ndim=n_dims)
    elif kernel_name == "exp":
        kernel = kernels.ExpKernel(initial_ls, ndim=n_dims)
    elif kernel_name == "expsquared":
        kernel = kernels.ExpSquaredKernel(initial_ls, ndim=n_dims)
    elif kernel_name == "matern32":
        kernel = kernels.Matern32Kernel(initial_ls, ndim=n_dims)
    elif kernel_name == "matern52":
        kernel = kernels.Matern52Kernel(initial_ls, ndim=n_dims)
    elif kernel_name == "rationalquadratic":
        kernel = kernels.RationalQuadraticKernel(log_alpha=1,
                                                 metric=initial_ls,
                                                 ndim=n_dims)
    elif kernel_name == "expsine2":
        kernel = kernels.ExpSine2Kernel(1, 2, ndim=n_dims)
    elif kernel_name == "heuristic":
        kernel = mapping[domain_name_lst[0]](ndim=n_dims, axes=0)
        for i in range(len(domain_name_lst[1:])):
            d = domain_name_lst[1:][i]
            kernel += mapping[d](ndim=n_dims, axes=i)
    elif kernel_name == "logsquared":
        kernel = kernels.LogSquaredKernel(initial_ls, ndim=n_dims)

    return kernel
Esempio n. 9
0
    def add_spots(self,QP=[5e-5,0.5,30.,28.]):
        """
        This attribute add stellar variability using a Quasi-periodic Kernel
        The activity is added using a george Kernel
        """

        if not hasattr(self,'flux_spots'):

            A  = QP[0]
            le = QP[1]
            lp = QP[2]
            P  = QP[3]

            from george import kernels, GP
            k = A * kernels.ExpSine2Kernel(gamma=1./2/lp,log_period=np.log(P)) * \
            kernels.ExpSquaredKernel(metric=le)
            gp = GP(k)
            self.flux_spots = 1 + gp.sample(self.time)

        self.flux = self.flux * self.flux_spots

        self.spots = True
Esempio n. 10
0
    def __init__(self, P=None, **kwargs):
        """Initialises the priors, initial hp values, and kernel."""

        # Set up kernel
        k_temporal = 1.0 * kernels.ExpSquaredKernel(metric=1.0, ndim=1) \
            * kernels.ExpSine2Kernel(gamma=2, log_period=1, ndim=1)

        if P is None:
            P = 0.5

        # NOTE: sigt always starts as multiple of the period
        super().__init__(kernel=k_temporal,
                         parameter_names=('ln_At', '2ln_sigt', 'g', 'lnP'),
                         default_values=(-12.28, 2 * np.log(4 * P), 1.0,
                                         np.log(P)),
                         **kwargs)

        if not np.isfinite(self.log_prior(self.default_values)):
            raise PriorInitialisationError(
                "Initial hyperparameter values are out of prior bounds.")

        self.default_X_cols = 't'
Esempio n. 11
0
    def get_gp(self,Kernel="Exp",amplitude=1e-3,metric=10.,gamma=10.,period=10.):
        """
        Citlalicue uses the kernels provided by george,
        now the options are "Exp", "Matern32", "Matern52", and Quasi-Periodic "QP"
        User can modify hyper parameters amplitude, metric, gamma, period.
        """
        import george
        from george import kernels
        if Kernel == "Matern32":
            kernel = amplitude * kernels.Matern32Kernel(metric)
        elif Kernel == "Matern52":
            kernel = amplitude * kernels.Matern52Kernel(metric)
        elif Kernel == "Exp":
            kernel = amplitude * kernels.ExpKernel(metric)
        elif Kernel == "QP":
            log_period = np.log(period)
            kernel = amplitude * kernels.ExpKernel(metric)*kernels.ExpSine2Kernel(gamma,log_period)

        #Save the kernel name as an attribute
        self.kernel = kernel
        #Compute the kernel with George
        self.gp = george.GP(self.kernel,mean=1)
        #We compute the covariance matrix using the binned data
        self.gp.compute(self.time_bin, self.ferr_bin)
Esempio n. 12
0
        def kernel4(data):
            def neg_ln_like(p):
                gp.set_parameter_vector(p)
                return -gp.log_likelihood(y)

            def grad_neg_ln_like(p):
                gp.set_parameter_vector(p)
                return -gp.grad_log_likelihood(y)

            try:
                x, y, err = data
                ls = get_ls(x, y, err)
                k = np.var(y) * kernels.ExpSquaredKernel(ls**2)
                k2 = kernels.ExpSquaredKernel(90**2) * kernels.ExpSine2Kernel(
                    gamma=ls, log_period=ls)

                kernel = k + k2

                gp = george.GP(kernel,
                               fit_mean=True,
                               white_noise=np.max(err)**2,
                               fit_white_noise=True)

                gp.compute(x, err)
                results = optimize.minimize(neg_ln_like,
                                            gp.get_parameter_vector(),
                                            jac=grad_neg_ln_like,
                                            method="L-BFGS-B",
                                            tol=1e-5)
                # Update the kernel and print the final log-likelihood.
                gp.set_parameter_vector(results.x)

            except:
                gp, results = kernel1(data)

            return gp, results
Esempio n. 13
0
            offset = np.zeros(len(t))
            idx = t < 57161
            offset[idx] = self.d_harps1
            offset[~idx] = self.d_harps2

            return rv1 + rv2 + offset


#==============================================================================
# GP
#==============================================================================
from george import kernels

if star == 'HD117618':
    k1 = kernels.ExpSine2Kernel(gamma=1,
                                log_period=np.log(100),
                                bounds=dict(gamma=(0, 100),
                                            log_period=(0, 10)))
    k2 = np.std(y) * kernels.ExpSquaredKernel(100)
    kernel = k1 * k2
    truth = dict(P1=0.25,
                 tau1=0.1,
                 k1=np.std(y) / 100,
                 w1=0.,
                 e1=0.4,
                 P2=3.1,
                 tau2=0.1,
                 k2=np.std(y) / 100,
                 w2=0.,
                 e2=0.4,
                 d_aat=0.,
                 d_harps1=0.,
Esempio n. 14
0
            offset = np.zeros(len(t))
            idx = t < 57161
            offset[idx] = self.d_harps1
            offset[~idx] = self.d_harps2

            return rv1 + rv2 + offset


#==============================================================================
# GP
#==============================================================================
from george import kernels

if star == 'HD117618':
    k1 = kernels.ExpSine2Kernel(gamma=1,
                                log_period=np.log(100),
                                bounds=dict(gamma=(-1, 100),
                                            log_period=(0, 10)))
    k2 = np.std(y) * kernels.ExpSquaredKernel(1.)
    k3 = 0.66**2 * kernels.RationalQuadraticKernel(log_alpha=np.log(0.78),
                                                   metric=1.2**2)
    kernel = k1 * k2 + k3
    truth = dict(P1=0.25,
                 tau1=0.1,
                 k1=np.std(y) / 100,
                 w1=0.,
                 e1=0.4,
                 P2=3.1,
                 tau2=0.1,
                 k2=np.std(y) / 100,
                 w2=0.,
                 e2=0.4,
Esempio n. 15
0
    def gaussian_process_smooth(self,
                                per=None,
                                minobs=10,
                                phase_offset=None,
                                recompute=False,
                                scalemin=None,
                                scalemax=None):
        """
        per = cheaty hackish thing to get a gaussian process with some
        continuity at the end points

        minobs = mininum number of observations in each filter before we fit
        """
        outgp = getattr(self, 'outgp', None)

        if outgp is not None:
            if not recompute:
                return outgp
        else:
            outgp = {}

        # note that we explicitly ask for non-smoothed and non-gpr lc here, since we're trying to compute the gpr
        outlc = self.get_lc(recompute=False,
                            per=per,
                            smoothed=False,
                            gpr=False,
                            phase_offset=phase_offset)

        for i, pb in enumerate(outlc):
            thislc = outlc.get(pb)
            thisphase, thisFlux, thisFluxErr = thislc

            nobs = len(thisphase)

            # if we do not have enough observations in this passband, skip it
            if nobs < minobs:
                continue

            # TODO : REVISIT KERNEL CHOICE
            if per == 1:
                # periodic
                kernel = kernels.ConstantKernel(1.) * kernels.ExpSine2Kernel(
                    1.0, 0.0)
            elif per == 2:
                # quasiperiodic
                kernel = kernels.ConstantKernel(1.) * kernels.ExpSquaredKernel(
                    100.) * kernels.ExpSine2Kernel(1.0, 0.0)
            else:
                # non-periodic
                kernel = kernels.ConstantKernel(1.) * kernels.ExpSquaredKernel(
                    1.)

            gp = george.GP(kernel,
                           mean=thisFlux.mean(),
                           fit_mean=True,
                           fit_white_noise=True,
                           white_noise=np.log(thisFluxErr.mean()**2.))

            # define the objective function
            def nll(p):
                gp.set_parameter_vector(p)
                ll = gp.lnlikelihood(thisFlux, quiet=True)
                return -ll if np.isfinite(ll) else 1e25

            # define the gradient of the objective function.
            def grad_nll(p):
                gp.set_parameter_vector(p)
                return -gp.grad_lnlikelihood(thisFlux, quiet=True)

            # pre-compute the kernel
            gp.compute(thisphase, thisFluxErr)
            p0 = gp.get_parameter_vector()

            max_white_noise = np.log((3. * np.median(thisFluxErr))**2.)
            min_white_noise = np.log((0.3 * np.median(thisFluxErr))**2)

            # coarse optimization with scipy.optimize
            # TODO : almost anything is better than scipy.optimize
            if per == 1:
                # mean, white_noise, amplitude, gamma, FIXED_period
                results = op.minimize(nll, p0, jac=grad_nll, bounds=[(None, None), (min_white_noise, max_white_noise),\
                        (None, None), (None, None), (0.,0.)])
            elif per == 2:
                # mean white_noise, amplitude, variation_timescale, gamma, FIXED_period
                results = op.minimize(nll, p0, jac=grad_nll, bounds=[(None, None), (min_white_noise, max_white_noise),\
                        (None, None), (scalemin, scalemax), (None, None), (0.,0.)])
            else:
                # mean white_noise, amplitude, variation_timescale
                results = op.minimize(nll, p0, jac=grad_nll, bounds=[(None, None), (min_white_noise, max_white_noise),\
                        (None, None), (scalemin,scalemax)])

            gp.set_parameter_vector(results.x)
            # george is a little different than sklearn in that the prediction stage needs the input data
            outgp[pb] = (gp, thisphase, thisFlux, thisFluxErr)

        self.outgp = outgp
        return outgp
Esempio n. 16
0
# Plot the data using the star.plot function
# plt.plot(RV_ALL[:,0], y, '-')
plt.plot(t, y, '-')
plt.ylabel('RV' r"$[m/s]$")
plt.xlabel('t')
plt.title('Simulated RV')
plt.show()

#==============================================================================
# GP Modelling
#==============================================================================

from george.modeling import Model
from george import kernels

k1 = kernels.ExpSine2Kernel(gamma=1, log_period=np.log(415.4))
k2 = np.var(y) * kernels.ExpSquaredKernel(1)
kernel = k1 * k2

gp = george.GP(kernel,
               mean=Model(**truth),
               white_noise=np.log(1),
               fit_white_noise=True)

# gp  	= george.GP(kernel, mean=Model(**truth))
gp.compute(RV_ALL[:, 0], RV_ALL[:, 2])


def lnprob2(p):
    # Set the parameter values to the given vector
    gp.set_parameter_vector(p)
Esempio n. 17
0
	t 	 = t_bin
	y 	 = y_bin
	yerr = err_bin
else:
	t 	 = BJD
	y 	 = Jitter
	yerr = err


################
# Optimization # 
################
from george import kernels

# kernel1 = 2.0 * kernels.Matern32Kernel(5)
kernel2 = 10 * kernels.ExpSine2Kernel(gamma=10.0, log_period=np.log(8))
kernel2 *= kernels.ExpSquaredKernel(5)
# kernel = kernel1 + kernel2
kernel = kernel2

import george
# gp = george.GP(kernel)
# y_test = gp.sample(BJD)
# gp.compute(BJD, yerr)
# plt.errorbar(t, y_test, yerr=yerr, fmt=".k", capsize=0)
# plt.show()


#gp = george.GP(kernel, mean=np.mean(y), fit_mean=True,
#               white_noise=np.log(0.19**2), fit_white_noise=True)
gp = george.GP(kernel, mean=np.mean(y), fit_mean=True)
Esempio n. 18
0
import numpy as np

import george
from george import kernels

from celerite import terms

__all__ = ["george_kernels", "george_solvers",
		"celerite_terms",
		"setup_george_kernel", "setup_celerite_terms"]

george_kernels = {
	"Exp2": kernels.ExpSquaredKernel(10**2),
	"Exp2ESin2": (kernels.ExpSquaredKernel(10**2) *
				kernels.ExpSine2Kernel(2 / 1.3**2, 1.0)),
	"ESin2": kernels.ExpSine2Kernel(2 / 1.3**2, 1.0),
	"27dESin2": kernels.ExpSine2Kernel(2 / 1.3**2, 27.0 / 365.25),
	"RatQ": kernels.RationalQuadraticKernel(0.8, 0.1**2),
	"Mat32": kernels.Matern32Kernel((0.5)**2),
	"Exp": kernels.ExpKernel((0.5)**2),
	# "W": kernels.WhiteKernel,  # deprecated, delegated to `white_noise`
	"B": kernels.ConstantKernel,
}
george_solvers = {
	"basic": george.BasicSolver,
	"HODLR": george.HODLRSolver,
}

celerite_terms = {
	"N": terms.Term(),
Esempio n. 19
0
    plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
    plt.ylabel(r"$y$ [m/s]")
    plt.xlabel(r"$t$ [days]")
    plt.ylim((-8, 12))
    plt.title("Simulated observed data")
    plt.show()

#==============================================================================
# Modelling
#==============================================================================

import george
george.__version__
from george import kernels

k1 = kernels.ExpSine2Kernel(gamma=6, log_period=np.log(25))
k2 = np.var(y) * kernels.ExpSquaredKernel(1)
kernel = k1 * k2
# mean: An object (following the modeling protocol) that specifies the mean function of the GP.
#gp = george.GP(k, mean=Model(amp=4.4, P=7.6, phase=0))
gp = george.GP(kernel,
               mean=Model(**truth),
               white_noise=np.log(1),
               fit_white_noise=True)
gp.compute(t, yerr)


def lnprob2(p):
    # Set the parameter values to the given vector
    gp.set_parameter_vector(p)
    # Compute the logarithm of the marginalized likelihood of a set of observations under the Gaussian process model.
Esempio n. 20
0
    t = t_bin
    y = y_bin
    yerr = err_bin
else:
    t = BJD
    y = Jitter
    yerr = RV_noise

#==============================================================================
# Gaussian Processes
#==============================================================================
import george
from george import kernels

# kernel1 =  kernels.Matern32Kernel(5)
kernel2 = np.var(y) * kernels.ExpSine2Kernel(gamma=2.,
                                             log_period=np.log(23.81 / 3))
kernel2 *= kernels.ExpSquaredKernel(5)
# kernel = kernel1 + kernel2
kernel = kernel2
# kernel.freeze_parameter("k1:k2:log_period")

if 0:  # generate a test GP time series
    gp = george.GP(kernel)
    y_test = gp.sample(BJD)
    plt.errorbar(t, y_test, yerr=yerr, fmt=".k", capsize=0)
    plt.show()

#gp = george.GP(kernel, mean=np.mean(y), fit_mean=True,
#               white_noise=np.log(0.19**2), fit_white_noise=True)
# gp = george.GP(kernel, mean=np.mean(y), fit_mean=True)
gp = george.GP(kernel)
Esempio n. 21
0
    x = np.random.rand(100)
    gp.compute(x, 1e-2)


kernels_to_test = [
    kernels.ConstantKernel(log_constant=0.1),
    kernels.ConstantKernel(log_constant=10.0, ndim=2),
    kernels.ConstantKernel(log_constant=5.0, ndim=5),
    kernels.DotProductKernel(),
    kernels.DotProductKernel(ndim=2),
    kernels.DotProductKernel(ndim=5, axes=0),
    kernels.CosineKernel(log_period=1.0),
    kernels.CosineKernel(log_period=0.5, ndim=2),
    kernels.CosineKernel(log_period=0.5, ndim=2, axes=1),
    kernels.CosineKernel(log_period=0.75, ndim=5, axes=[2, 3]),
    kernels.ExpSine2Kernel(gamma=0.4, log_period=1.0),
    kernels.ExpSine2Kernel(gamma=12., log_period=0.5, ndim=2),
    kernels.ExpSine2Kernel(gamma=17., log_period=0.5, ndim=2, axes=1),
    kernels.ExpSine2Kernel(gamma=13.7, log_period=-0.75, ndim=5, axes=[2, 3]),
    kernels.ExpSine2Kernel(gamma=-0.7, log_period=0.75, ndim=5, axes=[2, 3]),
    kernels.ExpSine2Kernel(gamma=-10, log_period=0.75),
    kernels.LocalGaussianKernel(log_width=0.5, location=1.0),
    kernels.LocalGaussianKernel(log_width=0.1, location=0.5, ndim=2),
    kernels.LocalGaussianKernel(log_width=1.5, location=-0.5, ndim=2, axes=1),
    kernels.LocalGaussianKernel(log_width=2.0,
                                location=0.75,
                                ndim=5,
                                axes=[2, 3]),
    kernels.LinearKernel(order=0, log_gamma2=0.0),
    kernels.LinearKernel(order=2, log_gamma2=0.0),
    kernels.LinearKernel(order=2, log_gamma2=0.0),
Esempio n. 22
0
#==============================================================================
# GP
#==============================================================================

t = t[idx]
y = XY[idx]
yerr = yerr[idx]

from george import kernels

# k1 = 1**2 * kernels.ExpSquaredKernel(metric=10**2)
# k2 = 1**2 * kernels.ExpSquaredKernel(80**2) * kernels.ExpSine2Kernel(gamma=8, log_period=np.log(36.2))
# boundary doesn't seem to take effect
k2 = 1**2 * kernels.ExpSquaredKernel(80**2) * kernels.ExpSine2Kernel(
    gamma=11,
    log_period=np.log(36.2),
    bounds=dict(gamma=(-3, 30),
                log_period=(np.log(36.2 - 5), np.log(36.2 + 6))))
# k3 = 0.66**2 * kernels.RationalQuadraticKernel(log_alpha=np.log(0.78), metric=1.2**2)
# k4 = 1**2 * kernels.ExpSquaredKernel(40**2)
# kernel = k1 + k2 + k3 + k4
kernel = k2

import george
# gp = george.GP(kernel, mean=np.mean(y), fit_mean=True,
#               white_noise=np.log(0.19**2), fit_white_noise=True)
gp = george.GP(kernel, mean=np.mean(y), fit_mean=True)
# gp.freeze_parameter('kernel:k2:log_period')
# gp.freeze_parameter('kernel:k2:gamma')

#==============================================================================
Esempio n. 23
0
    '''
    mu = 0.07 # mean of the Normal prior
    sigma = 0.004 # standard deviation of the Normal prior
    prob_r = np.log(gaussian(r - mu, sigma))

    mu = 2456915.70
    sigma =  0.005
    prob_t0 = np.log(gaussian(T0 - mu, sigma))
    prob = prob_t0
    '''
    return 0

K = {'Constant': 2. ** 2,
     'ExpSquaredKernel': kernels.ExpSquaredKernel(metric=1.**2),
     'ExpSine2Kernel': kernels.ExpSine2Kernel(gamma=1.0, log_period=1.0),
     'Matern32Kernel': kernels.Matern32Kernel(2.)}

def neo_init_george(kernels):
    k_out = K['Constant']
    for func in kernels[0]:
        k_out *= K[func]
    for i in range(len(kernels)):
        if i == 0:
            pass
        else:
            k = K['Constant']
            for func in kernels[i]:
                k *= K[func]
            k_out += k
    gp = george.GP(k_out)
Esempio n. 24
0
if 0:  # plot the jitter time series
    plt.errorbar(t, y, yerr=yerr, fmt="o", capsize=0, alpha=0.5)
    plt.ylabel('RV [m/s]')
    plt.xlabel('time [day]')
    plt.show()

#==============================================================================
# GP
#==============================================================================

from george import kernels

k1 = 10 * kernels.ExpSquaredKernel(metric=10**2)
# k2 = 1**2 * kernels.ExpSquaredKernel(90**2) * kernels.ExpSine2Kernel(gamma=1, log_period=1.9)
k2 = 20 * kernels.ExpSquaredKernel(200**2) * kernels.ExpSine2Kernel(
    gamma=2,
    log_period=np.log(17),
    bounds=dict(gamma=(-3, 30), log_period=(np.log(17 - 5), np.log(17 + 5))))
k3 = 20 * kernels.RationalQuadraticKernel(log_alpha=np.log(100), metric=120**2)
k4 = 10 * kernels.ExpSquaredKernel(1000**2)
# kernel = k1 + k2 + k3 + k4
kernel = k3 + k2 + k4
import george
#gp = george.GP(kernel, mean=np.mean(y), fit_mean=True,
#               white_noise=np.log(0.19**2), fit_white_noise=True)
gp = george.GP(kernel, mean=np.mean(y), fit_mean=True)

gp.compute(t, yerr)

#==============================================================================
# Optimization
#==============================================================================
Esempio n. 25
0
        f2      = 2*np.arctan( np.sqrt((1+self.e2)/(1-self.e2))*np.tan(e_anom2*.5) )
        rv2     = 100*self.k2*(np.cos(f2 + self.w2) + self.e2*np.cos(self.w2))

        offset      = np.zeros(len(t))
        idx         = t < 57161
        offset[idx] = self.offset1
        offset[~idx]= self.offset2

        return rv1 + rv2 + offset

#==============================================================================
# GP
#==============================================================================
from george import kernels

k1  	= kernels.ExpSine2Kernel(gamma = 1, log_period = np.log(3200), 
								bounds=dict(gamma=(-3,1), log_period=(0,10)))
k2  	= kernels.ConstantKernel(log_constant=np.log(1.), bounds=dict(log_constant=(-5,5))) * kernels.ExpSquaredKernel(1.)
kernel 	= k1 * k2

truth = dict(P1=8., tau1=1., k1=np.std(y)/100, w1=0., e1=0.4, 
            P2=100, tau2=1., k2=np.std(y)/100, w2=0., e2=0.4, offset1=0., offset2=0.)
kwargs = dict(**truth)
kwargs["bounds"] = dict(P1=(7.5,8.5), k1=(0,0.1), w1=(-2*np.pi,2*np.pi), e1=(0,0.9), 
					   tau2=(-50,50), k2=(0,0.2), w2=(-2*np.pi,2*np.pi), e2=(0,0.9))
mean_model = Model(**kwargs)
gp = george.GP(kernel, mean=mean_model, fit_mean=True)
# gp = george.GP(kernel, mean=mean_model, fit_mean=True, white_noise=np.log(0.5**2), fit_white_noise=True)
gp.compute(t, yerr)

def lnprob2(p):
    gp.set_parameter_vector(p)
Esempio n. 26
0
# Plot the data.
fig = pl.figure(figsize=(6, 3.5))
ax = fig.add_subplot(111)
ax.plot(t, y, ".k", ms=2)
ax.set_xlim(min(t), 1999)
ax.set_ylim(min(y), 369)
ax.set_xlabel("year")
ax.set_ylabel("CO$_2$ in ppm")
fig.subplots_adjust(left=0.15, bottom=0.2, right=0.99, top=0.95)
fig.savefig("../_static/hyper/data.png", dpi=150)

# Initialize the kernel.
k1 = 66.0**2 * kernels.ExpSquaredKernel(67.0**2)
k2 = 2.4**2 * kernels.ExpSquaredKernel(90**2) \
    * kernels.ExpSine2Kernel(2.0 / 1.3**2, 1.0)
k3 = 0.66**2 * kernels.RationalQuadraticKernel(0.78, 1.2**2)
k4 = 0.18**2 * kernels.ExpSquaredKernel(1.6**2) + kernels.WhiteKernel(0.19)
kernel = k1 + k2 + k3 + k4

# Set up the Gaussian process and maximize the marginalized likelihood.
gp = george.GP(kernel, mean=np.mean(y))

# Define the objective function (negative log-likelihood in this case).
def nll(p):
    # Update the kernel parameters and compute the likelihood.
    gp.kernel[:] = p
    ll = gp.lnlikelihood(y, quiet=True)

    # The scipy optimizer doesn't play well with infinities.
    return -ll if np.isfinite(ll) else 1e25
Esempio n. 27
0
        offset = np.zeros(len(t))
        idx = t < 57300
        offset[idx] = self.offset1
        offset[~idx] = self.offset2

        return rv1 + rv2 + offset


#==============================================================================
# GP
#==============================================================================
from george import kernels

k1 = 1**2 * kernels.ExpSquaredKernel(metric=10**2)
k2 = 1**2 * kernels.ExpSquaredKernel(90**2) * kernels.ExpSine2Kernel(
    gamma=1, log_period=1.8)
k3 = 0.66**2 * kernels.RationalQuadraticKernel(log_alpha=np.log(0.78),
                                               metric=1.2**2)
kernel = k1 + k2 + k3

truth = dict(P1=8.,
             tau1=1.,
             k1=np.std(y) / 100,
             w1=0.,
             e1=0.4,
             P2=100,
             tau2=1.,
             k2=np.std(y) / 100,
             w2=0.,
             e2=0.4,
             offset1=0.,
Esempio n. 28
0
def predict_Q_data(input_dir_2, filename4="training.txt"):

    nu, lam_squared, stokesQ, stokesU = read_data(input_dir_2, filename4)

    nu_R = nu

    stokesQ_R = stokesQ

    lam_R = lam_squared

    # Squared exponential kernel
    k1 = 0.3**2 * kernels.ExpSquaredKernel(0.02**2)

    # periodic covariance kernel with exponential component toallow decay away from periodicity

    k2 = 0.6**2 * kernels.ExpSquaredKernel(0.5**2) * kernels.ExpSine2Kernel(
        gamma=2 / 2.5**2, log_period=0.0)
    ###vary gamma value to widen the    funnel

    # rational quadratic kernel for medium term irregularities.

    k3 = 0.3**2 * kernels.RationalQuadraticKernel(log_alpha=np.log(0.1),
                                                  metric=0.1**2)

    # noise kernel: includes correlated noise & uncorrelated noise

    k4 = 0.18**2 * kernels.ExpSquaredKernel(1.6**2)

    kernel = k1 + k2  #+k3 + k4

    gp = george.GP(kernel,
                   mean=np.mean(stokesQ),
                   fit_mean=True,
                   white_noise=np.log(0.02**2),
                   fit_white_noise=True)
    #gp = george.GP(kernel)

    gp.compute(lam_R)

    # range of times for prediction:

    x = np.linspace(np.min(lam_squared) - 0.01, np.max(lam_squared),
                    1024)  #extend to smaller wavelengths

    # calculate expectation and variance at each point:
    mu_initial, cov = gp.predict(stokesQ_R, x)
    #mu, cov = gp.predict(stokesQ_2Gz, x, return_var = True)

    std_initial = np.sqrt(np.diag(cov))

    print("Initial prediction of missing Q values: \n {}".format(mu_initial))

    # Define the objective function (negative log-likelihood in this case).
    def nll(p):
        gp.set_parameter_vector(p)
        ll = gp.log_likelihood(stokesQ_R, quiet=True)
        return -ll if np.isfinite(ll) else 1e25

    # And the gradient of the objective function.
    def grad_nll(p):
        gp.set_parameter_vector(p)
        return -gp.grad_log_likelihood(stokesQ_R, quiet=True)

    gp.compute(lam_R)

    p0 = gp.get_parameter_vector()

    results = op.minimize(nll, p0, jac=grad_nll, method="L-BFGS-B")

    # run optimization:
    #results = op.minimize(nll, p0, jac=grad_nll)

    gp.set_parameter_vector(results.x)

    x = np.linspace(np.min(lam_squared) - 0.01, np.max(lam_squared),
                    1024)  #extend to smaller wavelengths

    mu_optimized, cov = gp.predict(stokesQ_R, x)

    std_optimized = np.sqrt(np.diag(cov))

    print("Final prediction of missing Q values: \n {}".format(mu_optimized))

    return nu_R, lam_R, stokesQ_R, mu_initial, std_initial, mu_optimized, std_optimized
#Load data
data = co2.load_pandas().data
t = 2000 + (np.array(data.index.to_julian_date()) - 2451545.0) / 365.25
y = np.array(data.co2)
m = np.isfinite(t) & np.isfinite(y) & (t < 1996)
t, y = t[m][::4], y[m][::4]

plt.plot(t, y, ".k")
plt.xlim(t.min(), t.max())
plt.xlabel("year")
plt.ylabel("CO$_2$ in ppm")

#Load kernels
k1 = 66**2 * kernels.ExpSquaredKernel(metric=67**2)
k2 = 2.4**2 * kernels.ExpSquaredKernel(90**2) * kernels.ExpSine2Kernel(
    gamma=2 / 1.3**2, log_period=0.0)
k3 = 0.66**2 * kernels.RationalQuadraticKernel(log_alpha=np.log(0.78),
                                               metric=1.2**2)
k4 = 0.18**2 * kernels.ExpSquaredKernel(1.6**2)
kernel = k1 + k2 + k3 + k4

gp = george.GP(kernel,
               mean=np.mean(y),
               fit_mean=True,
               white_noise=np.log(0.19**2),
               fit_white_noise=True)
gp.compute(t)
print(gp.log_likelihood(y))
print(gp.grad_log_likelihood(y))

import scipy.optimize as op
Esempio n. 30
0
strLC = KeplerLightCurveFile.from_archive(206208968)
strPDC = strLC.PDCSAP_FLUX.remove_outliers()

y = strPDC.flux[:300]
x = strPDC.time[:300]
y = (y / np.median(y)) - 1  # sets the function to begin at 0
x = x[np.isfinite(y)]
y = y[np.isfinite(y)]  # removes NaN values

pl.plot(x, y)
#pl.show()
print(median_absolute_deviation(y))
print(np.var(y))

kernel = np.var(y) * kernels.ExpSquaredKernel(0.5) * kernels.ExpSine2Kernel(
    log_period=0.5, gamma=1)
gp = george.GP(kernel)
gp.compute(x, y)
result = minimize(neg_ln_like, gp.get_parameter_vector(), jac=grad_neg_ln_like)
gp.set_parameter_vector(result.x)

#def plotModel ():
pred_mean, pred_var = gp.predict(y, x, return_var=True)
pred_std = np.sqrt(pred_var)
#pl.errorbar(x,y, yerr=np.var(y) ** 0.5/10, fmt='k.')
pl.plot(x, pred_mean, '--r')
pl.plot(x, y, lw=0.1, color='blue')
pl.ylabel('Relative Flux')
pl.xlabel('Time - BKJDC')
pl.show()