Пример #1
0
        def kernel2(data):
            def neg_ln_like(p):
                gp.set_parameter_vector(p)
                return -gp.log_likelihood(y)

            def grad_neg_ln_like(p):
                gp.set_parameter_vector(p)
                return -gp.grad_log_likelihood(y)

            try:
                x, y, err = data
                ls = get_ls(x, y, err)
                k = np.var(y) * kernels.ExpSquaredKernel(ls**2)
                k2 = kernels.ExpKernel(ls)

                kernel = k + k2

                gp = george.GP(kernel,
                               fit_mean=True,
                               white_noise=np.max(err)**2,
                               fit_white_noise=True)

                gp.compute(x, err)
                results = optimize.minimize(neg_ln_like,
                                            gp.get_parameter_vector(),
                                            jac=grad_neg_ln_like,
                                            method="L-BFGS-B",
                                            tol=1e-5)
                # Update the kernel and print the final log-likelihood.
                gp.set_parameter_vector(results.x)
            except:
                gp, results = kernel1(data)

            return gp, results
Пример #2
0
    def get_gp(self,Kernel="Exp",amplitude=1e-3,metric=10.,gamma=10.,period=10.):
        """
        Citlalicue uses the kernels provided by george,
        now the options are "Exp", "Matern32", "Matern52", and Quasi-Periodic "QP"
        User can modify hyper parameters amplitude, metric, gamma, period.
        """
        import george
        from george import kernels
        if Kernel == "Matern32":
            kernel = amplitude * kernels.Matern32Kernel(metric)
        elif Kernel == "Matern52":
            kernel = amplitude * kernels.Matern52Kernel(metric)
        elif Kernel == "Exp":
            kernel = amplitude * kernels.ExpKernel(metric)
        elif Kernel == "QP":
            log_period = np.log(period)
            kernel = amplitude * kernels.ExpKernel(metric)*kernels.ExpSine2Kernel(gamma,log_period)

        #Save the kernel name as an attribute
        self.kernel = kernel
        #Compute the kernel with George
        self.gp = george.GP(self.kernel,mean=1)
        #We compute the covariance matrix using the binned data
        self.gp.compute(self.time_bin, self.ferr_bin)
Пример #3
0
def get_kernel(architecture, kernel_name, domain_name_lst, n_dims):

    if architecture == "trans":
        mapping = trans_domain_kernel_mapping
    else:
        mapping = None

    kernel = None
    initial_ls = np.ones([n_dims])

    if kernel_name == "constant":
        kernel = kernels.ConstantKernel(1, ndim=n_dims)
    elif kernel_name == "polynomial":
        kernel = kernels.PolynomialKernel(log_sigma2=1, order=3, ndim=n_dims)
    elif kernel_name == "linear":
        kernel = kernels.LinearKernel(log_gamma2=1, order=3, ndim=n_dims)
    elif kernel_name == "dotproduct":
        kernel = kernels.DotProductKernel(ndim=n_dims)
    elif kernel_name == "exp":
        kernel = kernels.ExpKernel(initial_ls, ndim=n_dims)
    elif kernel_name == "expsquared":
        kernel = kernels.ExpSquaredKernel(initial_ls, ndim=n_dims)
    elif kernel_name == "matern32":
        kernel = kernels.Matern32Kernel(initial_ls, ndim=n_dims)
    elif kernel_name == "matern52":
        kernel = kernels.Matern52Kernel(initial_ls, ndim=n_dims)
    elif kernel_name == "rationalquadratic":
        kernel = kernels.RationalQuadraticKernel(log_alpha=1,
                                                 metric=initial_ls,
                                                 ndim=n_dims)
    elif kernel_name == "expsine2":
        kernel = kernels.ExpSine2Kernel(1, 2, ndim=n_dims)
    elif kernel_name == "heuristic":
        kernel = mapping[domain_name_lst[0]](ndim=n_dims, axes=0)
        for i in range(len(domain_name_lst[1:])):
            d = domain_name_lst[1:][i]
            kernel += mapping[d](ndim=n_dims, axes=i)
    elif kernel_name == "logsquared":
        kernel = kernels.LogSquaredKernel(initial_ls, ndim=n_dims)

    return kernel
Пример #4
0
 def init_gps(self):
     self.gps = []
     for i in range(self.nparams):
         if self.k_type[i] == 0:
             kernel = kernels.ConstantKernel(
                 self.logyvars[i], ndim=6) * kernels.ExpSquaredKernel(
                     metric=np.eye(6), ndim=6)
         elif self.k_type[i] == 1:
             kernel = kernels.ConstantKernel(self.logyvars[i],
                                             ndim=6) * kernels.ExpKernel(
                                                 metric=np.eye(6), ndim=6)
         elif self.k_type[i] == 2:
             kernel = kernels.ConstantKernel(
                 self.logyvars[i], ndim=6) * kernels.Matern32Kernel(
                     metric=np.eye(6), ndim=6)
         elif self.k_type[i] == 3:
             kernel = kernels.ConstantKernel(
                 self.logyvars[i], ndim=6) * kernels.Matern52Kernel(
                     metric=np.eye(6), ndim=6)
         tmp = copy.copy(george.GP(kernel))
         tmp.compute(self.xall, self.yerr[:, i])
         self.gps.append(tmp)
Пример #5
0
    def get_kernel(self, kernel_name, i):
        """get individual kernels"""
        metric = (1. / np.exp(self.coeffs[kernel_name]))**2
        if self.gp_code_name == 'george':
            if kernel_name == 'Matern32':
                kernel = kernels.Matern32Kernel(metric,
                                                ndim=self.nkernels,
                                                axes=i)
            if kernel_name == 'ExpSquared':
                kernel = kernels.ExpSquaredKernel(metric,
                                                  ndim=self.nkernels,
                                                  axes=i)
            if kernel_name == 'RationalQuadratic':
                kernel = kernels.RationalQuadraticKernel(log_alpha=1,
                                                         metric=metric,
                                                         ndim=self.nkernels,
                                                         axes=i)
            if kernel_name == 'Exp':
                kernel = kernels.ExpKernel(metric, ndim=self.nkernels, axes=i)

        if self.gp_code_name == 'tinygp':
            if kernel_name == 'Matern32':
                kernel = tinygp.kernels.Matern32(metric,
                                                 ndim=self.nkernels,
                                                 axes=i)
            if kernel_name == 'ExpSquared':
                kernel = tinygp.kernels.ExpSquared(metric,
                                                   ndim=self.nkernels,
                                                   axes=i)
            if kernel_name == 'RationalQuadratic':
                kernel = tinygp.kernels.RationalQuadratic(alpha=1,
                                                          scale=metric,
                                                          ndim=self.nkernels,
                                                          axes=i)
            if kernel_name == 'Exp':
                kernel = tinygp.kernels.Exp(metric, ndim=self.nkernels, axes=i)

        return kernel
    def lnlikelihood(self):

        lnprob = 0.0

        for data_set_name in self.data.keys():

            t, y, yerr = self.data[data_set_name]
            mag = self.magnification(t)

            if self.use_gaussian_process_model:
                a = np.exp(self.ln_a[data_set_name])
                tau = np.exp(self.ln_tau[data_set_name])
                gp = george.GP(a * kernels.ExpKernel(tau))
                gp.compute(t, yerr)
                self.cov = gp.get_matrix(t)
                result, lp = self.linear_fit(data_set_name, mag)
                model = self.compute_lightcurve(data_set_name, t)
                lnprob = gp.lnlikelihood(y - model)

            else:
                result, lp = self.linear_fit(data_set_name, mag)
                lnprob += lp

        return lnprob
Пример #7
0
from celerite import terms

__all__ = ["george_kernels", "george_solvers",
		"celerite_terms",
		"setup_george_kernel", "setup_celerite_terms"]

george_kernels = {
	"Exp2": kernels.ExpSquaredKernel(10**2),
	"Exp2ESin2": (kernels.ExpSquaredKernel(10**2) *
				kernels.ExpSine2Kernel(2 / 1.3**2, 1.0)),
	"ESin2": kernels.ExpSine2Kernel(2 / 1.3**2, 1.0),
	"27dESin2": kernels.ExpSine2Kernel(2 / 1.3**2, 27.0 / 365.25),
	"RatQ": kernels.RationalQuadraticKernel(0.8, 0.1**2),
	"Mat32": kernels.Matern32Kernel((0.5)**2),
	"Exp": kernels.ExpKernel((0.5)**2),
	# "W": kernels.WhiteKernel,  # deprecated, delegated to `white_noise`
	"B": kernels.ConstantKernel,
}
george_solvers = {
	"basic": george.BasicSolver,
	"HODLR": george.HODLRSolver,
}

celerite_terms = {
	"N": terms.Term(),
	"B": terms.RealTerm(log_a=-6., log_c=-np.inf,
				bounds={"log_a": [-30, 30],
						"log_c": [-np.inf, np.inf]}),
	"W": terms.JitterTerm(log_sigma=-25,
				bounds={"log_sigma": [-30, 30]}),
    def plot_lightcurves(self):

        plt.figure()

        colour = iter(plt.cm.jet(np.linspace(0, 1, len(self.data))))

        xmin = self.initial_parameters[1] - 2 * self.initial_parameters[2]
        xmax = self.initial_parameters[1] + 2 * self.initial_parameters[2]

        n_data = len(self.data)
        for i, data_set_name in enumerate(self.data.keys()):

            t, y, yerr = self.data[data_set_name]
            #c=next(colour)
            c = 'r'

            if i == 0:
                plt.subplot(n_data, 1, i + 1)
                ax1 = plt.gca()
            else:
                plt.subplot(n_data, 1, i + 1, sharex=ax1)

            y_cond = y
            if self.eigen_lightcurves is not None:
                if data_set_name in self.eigen_lightcurves:
                    coeffs, _ = self.linear_fit(data_set_name,
                                                self.magnification(t))
                    ci = 1
                    if self.fit_blended:
                        ci = 2
                    eigs = self.eigen_lightcurves[data_set_name]
                    for j in range(eigs.shape[0]):
                        y_cond -= coeffs[ci + j] * eigs[j, :]

            plt.errorbar(t, y_cond, yerr=yerr, fmt=".", color=c, capsize=0)
            ax = plt.gca()
            ax.set_xlim(xmin, xmax)
            plt.xlabel(r"$\Delta t (d)$")
            plt.ylabel(data_set_name + r"  $\Delta F$")

            x = np.linspace(xmin, xmax, 3000)

            if not (self.use_gaussian_process_model):
                plt.plot(x,
                         self.compute_lightcurve(data_set_name, x),
                         color="k")
                ylim = ax.get_ylim()

# Plot posterior samples.
            for s in self.samples[np.random.randint(len(self.samples),
                                                    size=self.n_plot_samples)]:

                if self.use_gaussian_process_model:

                    # Set up the GP for this sample.
                    a, tau = np.exp(s[3 + 2 * i:3 + 2 * i + 2])
                    gp = george.GP(a * kernels.ExpKernel(tau))
                    gp.compute(t, yerr)
                    self.cov = gp.get_matrix(t)
                    modelt = self.compute_lightcurve(data_set_name,
                                                     t,
                                                     params=s)
                    modelx = self.compute_lightcurve(data_set_name,
                                                     x,
                                                     params=s)

                    # Compute the prediction conditioned on the observations
                    # and plot it.
                    m = gp.sample_conditional(y - modelt, x) + modelx
                    plt.plot(x, m, color="#4682b4", alpha=0.3)

                else:

                    plt.plot(x, self.compute_lightcurve(data_set_name,x,params=s), \
                      color="#4682b4",alpha=0.3)

            if not (self.use_gaussian_process_model):
                ax.set_ylim(ylim)

        plt.savefig(self.plotprefix + '-lc.png')

        plt.close()
Пример #9
0
def find_george_MAP(t,
                    y,
                    yerr,
                    sigma0,
                    tau0,
                    prior='None',
                    set_bounds=True,
                    sig_lims=[0.02, 0.7],
                    tau_lims=[1, 550],
                    verbose=False):
    '''
    A wrapper to find the MAP estimate of 
    DRW parameters as expressed by the George ExpKernel,
    where 
    k(r^2) = a * exp(-sqrt(r^2)/l2)
   
    where l2 = metric = tau^2 
    and a == sigma ^ 2
    
    thus the parameters of this kernel are 

    'k1:log_constant' ,  'k2:metric:log_M_0_0'

    i.e.  k1 =  np.log(a) = np.log(sigma^2) = 2 np.log(sigma)
    and  k2 = np.log(l2) = np.log(tau^2) = 2 np.log(tau)

    are the hyperparameters for which the log-posterior
    is optimized (neg-logp  is minimized to be exact), 

    and the original DRW parameters are recovered as :


    k1 = res[0]
    k2 = res[1]
    
    sigma =  exp( k1/2 )
    tau = exp( k2/2 )


    Parameters :
    -------------
    t , y , yerr :  arrays of time,  photometry, photometric uncertainty 
    sigma0, tau0 : starting values to initialize  the DRW kernel 
    set_bounds : True or False,  whether to set boundaries on parameters. 
                True by default 
    sig_lims, tau_lims : 2-element arrays, for [min,max]  values of params
    verbose : True or False, whether to print more info about the fit . 
              False by default


    '''
    a = sigma0**2.0
    kernel = a * kernels.ExpKernel(metric=tau0**2.0)

    gp = george.GP(kernel, mean=np.mean(y))
    gp.compute(t, yerr)

    # set initial params
    initial_params = gp.get_parameter_vector()
    if verbose:
        print('Initial params:', initial_params)

    # set boundaries
    if set_bounds:
        if verbose:
            print('sig_lims:', sig_lims, 'tau_lims:', tau_lims)
        tau_bounds, sigma_bounds = tau_lims, sig_lims

        sig_min = min(sigma_bounds)
        sig_max = max(sigma_bounds)
        tau_min = min(tau_bounds)
        tau_max = max(tau_bounds)

        log_const_bounds = (np.log(sig_min**2.0), np.log(sig_max**2.0))
        log_M00_bounds = (np.log(tau_min**2.0), np.log(tau_max**2.0))
        bounds = [log_const_bounds, log_M00_bounds]

    else:  # - inf to + inf
        bounds = gp.get_parameter_bounds()
    if verbose:
        print('bounds for fitted params are ', bounds)
        print('for params ', gp.get_parameter_dict().keys())

    # wrap the neg_log_posterior for a chosen prior
    def neg_log_like(params, y, gp):
        return neg_log_posterior(params, y, gp, prior, 'george')

    # find MAP solution
    r = minimize(neg_log_like,
                 initial_params,
                 method="L-BFGS-B",
                 bounds=bounds,
                 args=(y, gp))
    if verbose:
        print(r)
    gp.set_parameter_vector(r.x)
    #res = gp.get_parameter_dict()

    #a = np.exp(r.x[0])
    #l2 =  np.exp(r.x[1])
    #sigma_fit = np.sqrt(a)
    #tau_fit = np.sqrt(l2)
    sigma_fit = np.exp(r.x[0] / 2.0)
    tau_fit = np.exp(r.x[1] / 2.0)
    if verbose:
        print('sigma_fit=', sigma_fit, 'tau_fit=', tau_fit)

    return sigma_fit, tau_fit, gp