Ejemplo n.º 1
0
def showResults(datafolder, out_folder, **fitKwargs):
    dataset = juliet.load(input_folder=out_folder)
    try:
        results = pickle.load(open(out_folder + '/results.pkl', 'rb'))
    except FileNotFoundError:
        results = dataset.fit(
            use_dynesty=False, dynamic=True,
            **fitKwargs)  # has to be ~same call as during fit

    # plot posteriors
    fig = plots.plot_cornerPlot(results,
                                pl=results.pl,
                                pu=results.pu,
                                quantiles=[0.16, 0.5, 0.84],
                                show_titles=True,
                                title_kwargs={"fontsize": 16},
                                title_fmt='.2f',
                                rasterized=True,
                                label_kwargs={"fontsize": 16})
    fig.savefig(out_folder + '/cornerPosteriors.pdf')

    # plot single posterior plots
    plots.plot_posteriors(results, out_folder)

    # Plot the photometry with fit
    if dataset.ninstruments_lc > 0:
        fig, axs = plots.plot_photometry(dataset, results)
        axs[0].legend(loc='lower left',
                      ncol=99,
                      bbox_to_anchor=(0., 1.),
                      frameon=False,
                      columnspacing=1.6)
        fig.savefig(out_folder + '/photometryFitted.pdf')

        phasedPlots = plots.plot_phasedPhotometry(dataset,
                                                  results,
                                                  instrument=None)
        for inst in phasedPlots:
            phasedPlots[inst][0].savefig(out_folder +
                                         '/phasedPhot_{}.pdf'.format(inst))

    # plot RVs with fit
    if dataset.ninstruments_rv > 0:
        fig, ax = plots.plot_rv_fit(dataset, results)
        fig.savefig(out_folder + '/RVsFitted.pdf')

    # plot periodograms
    fig, axs = plots.plot_periodograms(
        datafolder + 'TIC237913194_activity.dat', out_folder, results)

    # plot RV-BS
    fig, ax = plots.plot_RV_BS(datafolder + 'TIC237913194_activity.dat',
                               out_folder, results)

    lnZstr = r'Log - evidence: {0: .2f} $\pm$ {1: .2f}'.format(
        results.posteriors['lnZ'], results.posteriors['lnZerr'])
    print(lnZstr)
    return results
Ejemplo n.º 2
0
def printTables(out_folder):
    """ print Latex-formatted tables for priors, posteriors to files"""
    dataset = juliet.load(input_folder=out_folder)
    try:
        results = pickle.load(open(out_folder + '/results.pkl', 'rb'))
    except FileNotFoundError:
        results = dataset.fit(
            use_dynesty=False, dynamic=True,
            **fitKwargs)  # has to be ~same call as during fit
    latextable.print_prior_table(dataset)
    latextable.print_posterior_table(dataset, results, precision=2)
Ejemplo n.º 3
0
def main(datafolder, out_folder, GP):
    """ run the fits and save the results to the `out_folder`"""

    priors, params = get_priors(GP)
    times_lc, fluxes, fluxes_error, gp_times_lc = read_photometry(
        datafolder,
        plotPhot=False,
        outlierIndices=outlierIndices,
        instruments_lc=instruments_lc)
    times_rv, rvs, rvs_error = read_rv(datafolder,
                                       kms=True,
                                       subtract_median=True)

    if GP:
        GP_regressors = gp_times_lc
    else:
        GP_regressors = None

    dataset = juliet.load(
        priors=priors,
        t_lc=times_lc,
        y_lc=fluxes,
        yerr_lc=fluxes_error,
        t_rv=times_rv,
        y_rv=rvs,
        yerr_rv=rvs_error,
        GP_regressors_lc=GP_regressors,
        lc_instrument_supersamp=['TESSERACT+TESS'],
        lc_exptime_supersamp=[0.020434],
        lc_n_supersamp=[
            20
        ],  # supersample to account for long cadence, 30min = 0.020434d
        out_folder=out_folder,
        verbose=True)

    results = dataset.fit(use_dynesty=False,
                          n_live_points=1500,
                          ecclim=0.7,
                          dynamic=True,
                          pl=pl,
                          pu=pu)

    lnZstr = r'Log - evidence: {0: .2f} $\pm$ {1: .2f}'.format(
        results.posteriors['lnZ'], results.posteriors['lnZerr'])
    print(lnZstr)
    with open(out_folder + "/lnZ={:.2f}.txt".format(results.posteriors['lnZ']),
              "w") as text_file:
        print(lnZstr, file=text_file)

    return results
    def _juliet_detrend(self):
        """
        Calls juliet and performs the Gaussian Process on the data.
        """
        try:
            import juliet
        except ModuleNotFoundError:
            print('Juliet is not present. Skipping')
            return np.ones(self.lc.flux.size)

        out_folder = 'TIC%d' % self.TIC

        times, fluxes, fluxes_error = {}, {}, {}
        times['TESS'], fluxes['TESS'], fluxes_error['TESS'] = self.lc.time+2457000, self.lc.flux,\
                                                              self.lc.flux_err

        # Name of the parameters to be fit:
        params_inst = ['mdilution_TESS', 'mflux_TESS', 'sigma_w_TESS']
        params_gp = ['GP_sigma_TESS', 'GP_timescale_TESS']

        dists_inst = ['fixed', 'normal', 'loguniform']
        dists_gp = ['loguniform', 'loguniform']

        hyperps_inst = [1.0, [0., 0.1], [0.1, 1000.]]
        hyperps_gp = [[1e-6, 1e6], [1e-3, 1e3]]

        params = params_inst
        dists = dists_inst
        hyperps = hyperps_inst

        params += params_gp
        dists += dists_gp
        hyperps += hyperps_gp

        priors = {}
        for param, dist, hyperp in zip(params, dists, hyperps):
            priors[param] = {}
            priors[param]['distribution'], priors[param][
                'hyperparameters'] = dist, hyperp

        dataset = juliet.load(priors=priors, t_lc = times, y_lc = fluxes,\
                              yerr_lc = fluxes_error, GP_regressors_lc=times,\
                              out_folder = out_folder, verbose=True)
        results = dataset.fit()
        model_fit = results.lc.evaluate('TESS')

        del dataset, results, times, fluxes, fluxes_error
        return model_fit
Ejemplo n.º 5
0
    'GP_rho_inst', 'GP_timescale_inst'
]
dists = [
    'fixed', 'normal', 'loguniform', 'loguniform', 'loguniform', 'loguniform'
]
hyper = [1, [0, 0.1], [0.1, 1e4], [1e-8, 1e4], [1e-3, 1e3], [1e-6, 1e6]]

priors = {}
for par, dis, hyp in zip(params, dists, hyper):
    priors[par] = {}
    priors[par]['distribution'], priors[par]['hyperparameters'] = dis, hyp

dataset = juliet.load(priors=priors,
                      t_lc=time,
                      y_lc=flux,
                      yerr_lc=ferr,
                      verbose=True,
                      GP_regressors_lc=time,
                      out_folder='GPO_' + args.File.split('.')[0])

results = dataset.fit(
    n_live_points=750)  #, use_dynesty=True, dynesty_nthreads=30)

model_fit = results.lc.evaluate('inst', t=t, GPregressors=t)
#gp_fit    = results.lc.model['inst']['GP']

fig, ax = plt.subplots(figsize=[15, 6], nrows=2, sharex=True)

ax[0].errorbar(t, f, yerr=e, fmt='.', color='k')
ax[0].plot(t, model_fit, color='r', zorder=100)
Ejemplo n.º 6
0
starting_point = {}
starting_point['t0_p1'] = 0.
starting_point['p_p1'] = 0.1
starting_point['q1_inst'] = 0.5
starting_point['q2_inst'] = 0.5
starting_point['a_p1'] = 3.6
starting_point['mflux_inst'] = 0.
starting_point['sigma_w_inst'] = 100.

# And fit:
samplers = [
    'dynamic_dynesty', 'dynesty', 'emcee', 'ultranest',
    'slicesampler_ultranest', 'zeus', 'multinest'
]
all_times = {}
for sampler in samplers:
    dataset = juliet.load(priors=priors,
                          t_lc=times,
                          y_lc=fluxes,
                          yerr_lc=errors,
                          out_folder=sampler + '-test',
                          starting_point=starting_point)
    t0 = time.time()
    dataset.fit(sampler=sampler, progress=True)
    t1 = time.time()
    total = t1 - t0
    all_times[sampler] = total
    print(sampler, ' took ', total, ' seconds to run.')
print('timing results (in seconds):')
print(all_times)
Ejemplo n.º 7
0
    def do_ttv_tfit_juliet(self,
                           direct,
                           time=0.,
                           flux=0.,
                           flux_err=0.,
                           mptsintransit=0.,
                           nsupersample=29,
                           exptimesupersample=0.0201389,
                           nlive=400,
                           priors=None,
                           reset_out=False):
        """One-step Kepler transit light curve fitting, for transits with TTVs. Using Juliet w/ pymultinest. Only fits circular transits (e, w fixed at 0!).
        Fixes TTV midpoints. Uses Kepler catalog orbital period + errorbars to simluate a fit period distribution.

        Parameters
        ----------
        direct: str
            Output path.
        mptsintransit: array or float, default 0.
            Input irregular transit midpoints with TTVs. If 0, uses default, calculated midpoints.
        nsupersample: int, default 29
            Number of flux points over which to supersample, default 29 (for Kepler long cadence.)
        exptimesupersample: float, default 0.0201389 (29 min for Kepler long cadence)
            Time over which to supersample, default
        nlive: int, default 400
            Number of pymultinest live points
        priors: default None
            Custom transit fitting priors for juliet fit
        reset_out: boolean, default False
            Reset output directory?

        Returns
        -------
        dataset: juliet object
            Juliet dataset
        results: juliet object
            Juliet results

        """

        import juliet

        times, fluxes, fluxes_error = {}, {}, {}
        if type(time) == float:
            times['KEPLER'], fluxes['KEPLER'], fluxes_error[
                'KEPLER'] = self.time_intransit, self.flux_intransit, self.fluxerr_intransit
        else:
            times['KEPLER'], fluxes['KEPLER'], fluxes_error[
                'KEPLER'] = time, flux, flux_err

        if priors == None:
            priors = {}

            params = ['p_p1','b_p1','q1_KEPLER','q2_KEPLER','ecc_p1','omega_p1',\
                          'a_p1', 'mdilution_KEPLER', 'mflux_KEPLER', 'sigma_w_KEPLER']

            dists = ['normal','uniform','uniform','uniform','fixed','fixed',\
                             'uniform', 'fixed', 'normal', 'loguniform']

            hyperps = [[self.rprs, 0.001], [0., 1.], [0., 1.], [0., 1.], 0.0,
                       90., [0., 200.], 1.0, [0., 0.1], [0.1, 1000.]]

            ttvparams = []
            ttvdists = []
            ttvhyperps = []

            if type(mptsintransit) == float:
                mptsintransit = self.midpoints
            for mpt in range(1, len(mptsintransit) + 1):
                ttvparams.append('T_p1_KEPLER_' + str(mpt))
                ttvdists.append('fixed')
                ttvhyperps.append(self.midpoints[mpt - 1])

            params = params + ttvparams
            dists = dists + ttvdists
            hyperps = hyperps + ttvhyperps

            for param, dist, hyperp in zip(params, dists, hyperps):
                priors[param] = {}
                priors[param]['distribution'], priors[param][
                    'hyperparameters'] = dist, hyperp

            if reset_out == True:
                import os
                if os.path.exists(direct):
                    os.rmdir(direct)

            if nsupersample == None or exptimesupersample == None:
                dataset = juliet.load(priors=priors,
                                      t_lc=times,
                                      y_lc=fluxes,
                                      yerr_lc=fluxes_error,
                                      out_folder=direct)
            else:
                dataset = juliet.load(
                    priors=priors,
                    t_lc=times,
                    y_lc=fluxes,
                    yerr_lc=fluxes_error,
                    out_folder=direct,
                    lc_instrument_supersamp=['KEPLER'],
                    lc_n_supersamp=[nsupersample],
                    lc_exptime_supersamp=[exptimesupersample])
            print("Fitting KOI " + str(self.nkoi))
            results = dataset.fit(ecclim=0., n_live_points=nlive)

            self.per_dist = np.random.normal(
                self.period,
                np.mean((abs(self.period_uerr), abs(self.period_lerr))),
                size=len(results.posteriors['posterior_samples']['p_p1']))
            self.rprs_dist = results.posteriors['posterior_samples']['p_p1']
            self.ars_dist = results.posteriors['posterior_samples']['a_p1']
            self.i_dist = np.arccos(
                results.posteriors['posterior_samples']['b_p1'] *
                (1. / self.ars_dist)) * (180. / np.pi)

            self.tfit_period = mode(self.per_dist)
            self.tfit_rprs = mode(self.rprs_dist)
            self.tfit_ars = mode(self.ars_dist)
            self.tfit_i = mode(self.i_dist)

            return dataset, results
Ejemplo n.º 8
0
    def do_tfit_juliet(self,
                       direct,
                       nsupersample=29,
                       exptimesupersample=0.0201389,
                       nlive=400,
                       priors=None,
                       reset_out=False):
        """One-step Kepler transit light curve fitting. Using Juliet w/ pymultinest. Only fits circular transits (e, w fixed at 0!).

        Parameters
        ----------
        direct: str
            Output path.
        nsupersample: int, default 29
            Number of flux points over which to supersample, default 29 (for Kepler long cadence.)
        exptimesupersample: float, default 0.0201389 (29 min for Kepler long cadence)
            Time over which to supersample, default
        nlive: int, default 400
            Number of pymultinest live points
        priors: default None
            Custom transit fitting priors for juliet fit
        reset_out: boolean, default False
            Reset output directory?

        Returns
        -------
        dataset: juliet object
            Juliet dataset
        results: juliet object
            Juliet results


        """
        import juliet

        times, fluxes, fluxes_error = {}, {}, {}
        times['KEPLER'], fluxes['KEPLER'], fluxes_error[
            'KEPLER'] = self.time_intransit, self.flux_intransit, self.fluxerr_intransit

        if priors == None:
            priors = {}

            params = ['P_p1','t0_p1','p_p1','b_p1','q1_KEPLER','q2_KEPLER','ecc_p1','omega_p1',\
                          'a_p1', 'mdilution_KEPLER', 'mflux_KEPLER', 'sigma_w_KEPLER']

            dists = ['normal','normal','normal','uniform','uniform','uniform','fixed','fixed',\
                             'uniform', 'fixed', 'normal', 'loguniform']

            hyperps = [[self.period, 0.001], [self.epoch, 0.001],
                       [self.rprs, 0.001], [0., 1.], [0., 1.], [0., 1.], 0.0,
                       90., [0., 200.], 1.0, [0., 0.1], [0.1, 1000.]]

            for param, dist, hyperp in zip(params, dists, hyperps):
                priors[param] = {}
                priors[param]['distribution'], priors[param][
                    'hyperparameters'] = dist, hyperp

            if reset_out == True:
                import os
                if os.path.exists(direct):
                    os.rmdir(direct)

            if nsupersample == None or exptimesupersample == None:
                dataset = juliet.load(priors=priors,
                                      t_lc=times,
                                      y_lc=fluxes,
                                      yerr_lc=fluxes_error,
                                      out_folder=direct)
            else:
                dataset = juliet.load(
                    priors=priors,
                    t_lc=times,
                    y_lc=fluxes,
                    yerr_lc=fluxes_error,
                    out_folder=direct,
                    lc_instrument_supersamp=['KEPLER'],
                    lc_n_supersamp=[nsupersample],
                    lc_exptime_supersamp=[exptimesupersample])

            results = dataset.fit(ecclim=0., n_live_points=nlive)

            self.per_dist = results.posteriors['posterior_samples']['P_p1']
            self.rprs_dist = results.posteriors['posterior_samples']['p_p1']
            self.ars_dist = results.posteriors['posterior_samples']['a_p1']
            self.i_dist = np.arccos(
                results.posteriors['posterior_samples']['b_p1'] *
                (1. / self.ars_dist)) * (180. / np.pi)
            self.t0_dist = results.posteriors['posterior_samples']['t0_p1']

            self.tfit_period = mode(self.per_dist)
            self.tfit_rprs = mode(self.rprs_dist)
            self.tfit_ars = mode(self.ars_dist)
            self.tfit_i = mode(self.i_dist)
            self.tfit_t0 = mode(self.t0_dist)

            return dataset, results
Ejemplo n.º 9
0
def fit(t,
        f,
        ferr,
        sector,
        P,
        P_err,
        t0,
        t0_err,
        ecc,
        omega,
        GPmodel='ExpMatern',
        outpath='planetfit',
        method='',
        in_transit_length=0.,
        fit_catwoman=False):

    # Scale t0 to the transit closest to the center of the TESS observations:
    n = int((np.mean(t) - t0) / P)
    t0 += n * P
    t0_err = np.sqrt(t0_err**2 + (n * P_err)**2)

    # Define priors:
    priors = {}

    # First define parameter names, distributions and hyperparameters for GP-independant parameters:
    if not fit_catwoman:

        params1 = ['P_p1', 't0_p1', 'p_p1', 'b_p1', 'q1_TESS', 'q2_TESS', \
                   'ecc_p1', 'omega_p1', 'a_p1']

        params1_instrument = ['mdilution_TESS', 'mflux_TESS', 'sigma_w_TESS']

        dists1 = ['normal', 'normal', 'uniform', 'uniform', 'uniform', 'uniform', \
                   'fixed','fixed','loguniform']

        dists1_instrument = ['fixed', 'normal', 'loguniform']

        hyperps1 = [[P,P_err], [t0, 0.1], [0., 1.], [0., 2.], [0., 1.], [0., 1.], \
                   ecc, omega, [1., 100.]]

    else:

        params1 = ['P_p1', 't0_p1', 'p1_p1', 'p2_p1', 'phi_p1', 'b_p1', 'q1_TESS', 'q2_TESS', \
                   'ecc_p1', 'omega_p1', 'a_p1']

        params1_instrument = ['mdilution_TESS', 'mflux_TESS', 'sigma_w_TESS']

        dists1 = ['normal', 'normal', 'uniform', 'uniform', 'fixed', 'uniform', 'uniform', 'uniform', \
                   'fixed','fixed','loguniform']

        dists1_instrument = ['fixed', 'normal', 'loguniform']

        hyperps1 = [[P,P_err], [t0, 0.1], [0., 1.], [0., 1.], 90., [0., 2.], [0., 1.], [0., 1.], \
                   ecc, omega, [1., 100.]]

    hyperps1_instrument = [1., [0., 0.1], [0.1, 10000.]]

    # Now define hyperparameters of the GP depending on the chosen kernel:
    if GPmodel == 'ExpMatern':
        params2 = ['GP_sigma_TESS', 'GP_timescale_TESS', 'GP_rho_TESS']
        dists2 = ['loguniform', 'loguniform', 'loguniform']
        hyperps2 = [[1e-5, 10000.], [1e-3, 1e2], [1e-3, 1e2]]
    elif GPmodel == 'Matern':
        params2 = ['GP_sigma_TESS', 'GP_rho_TESS']
        dists2 = ['loguniform', 'loguniform']
        hyperps2 = [[1e-5, 10000.], [1e-3, 1e2]]
    elif GPmodel == 'QP':
        params2 = ['GP_B_TESS', 'GP_C_TESS', 'GP_L_TESS', 'GP_Prot_TESS']
        dists2 = ['loguniform', 'loguniform', 'loguniform', 'loguniform']
        hyperps2 = [[1e-5, 1e3], [1e-5, 1e4], [1e-3, 1e3], [1., 1e2]]

    # If method is blank, fit both simultaneously. If set to "fit_out", fit out-of-transit lightcurve first, use posteriors of that
    # fit as priors to an in-transit fit. The in_transit_length measures in days what is "in-transit", centered around t0:
    if method == '':
        params = params1 + params1_instrument + params2
        dists = dists1 + dists1_instrument + dists2
        hyperps = hyperps1 + hyperps1_instrument + hyperps2

        # Populate the priors dictionary:
        for param, dist, hyperp in zip(params, dists, hyperps):
            priors[param] = {}
            priors[param]['distribution'], priors[param][
                'hyperparameters'] = dist, hyperp

        # Port data in the juliet format:
        tt, ff, fferr = {}, {}, {}
        tt['TESS'], ff['TESS'], fferr['TESS'] = t, f, ferr

        # Run fit:
        dataset = juliet.load(priors=priors, t_lc = tt, y_lc = ff, \
                              yerr_lc = fferr, GP_regressors_lc = tt, out_folder = outpath+'_'+GPmodel)
        results = dataset.fit(n_live_points=500, verbose=True)
    else:
        # Peform GP fit first:
        params = params1_instrument + params2
        dists = dists1_instrument + dists2
        hyperps = hyperps1_instrument + hyperps2

        # Populate priors dict:
        for param, dist, hyperp in zip(params, dists, hyperps):
            priors[param] = {}
            priors[param]['distribution'], priors[param][
                'hyperparameters'] = dist, hyperp

        # Select only out-of-transit data. For this, work on phase-space:
        phases = juliet.utils.get_phases(t, P, t0)
        idx_oot = np.where(np.abs(phases * P) >= in_transit_length * 0.5)[0]

        # Save data dict:
        tt, ff, fferr = {}, {}, {}
        tt['TESS'], ff['TESS'], fferr['TESS'] = t[idx_oot], f[idx_oot], ferr[
            idx_oot]

        # Run GP-only fit:
        dataset = juliet.load(priors=priors, t_lc = tt, y_lc = ff, \
                              yerr_lc = fferr, GP_regressors_lc = tt, out_folder = outpath+'_'+GPmodel+'_out_of_transit')
        results = dataset.fit(n_live_points=500, verbose=True)

        # Now use posteriors of that fit to fit the in-transit data. Assume truncated normals for the GP hyperparameters:
        for i in range(len(params2)):
            posterior = results.posteriors['posterior_samples'][params2[i]]
            mu, sigma = np.median(posterior), np.sqrt(np.var(posterior))
            dists2[i] = 'truncatednormal'
            hyperps2[i] = [mu, sigma, hyperps2[i][0], hyperps2[i][1]]

        # Same for sigma_w and mflux:
        dists1_instrument[2] = 'truncatednormal'
        posterior = results.posteriors['posterior_samples']['sigma_w_TESS']
        mu, sigma = np.median(posterior), np.sqrt(np.var(posterior))
        hyperps1_instrument[2] = [
            mu, sigma, hyperps1_instrument[2][0], hyperps1_instrument[2][1]
        ]

        # Normal for mflux:
        dists1_instrument[1] = 'normal'
        posterior = results.posteriors['posterior_samples']['mflux_TESS']
        mu, sigma = np.median(posterior), np.sqrt(np.var(posterior))
        hyperps1_instrument[1] = [mu, sigma]

        # Populate prior dict:
        params = params1 + params1_instrument + params2
        dists = dists1 + dists1_instrument + dists2
        hyperps = hyperps1 + hyperps1_instrument + hyperps2

        # Populate the priors dictionary:
        for param, dist, hyperp in zip(params, dists, hyperps):
            priors[param] = {}
            priors[param]['distribution'], priors[param][
                'hyperparameters'] = dist, hyperp

        # And with those changes, fit the in-transit data:
        idx_in = np.where(np.abs(phases * P) < in_transit_length * 0.5)[0]

        # Save data dict:
        tt['TESS'], ff['TESS'], fferr['TESS'] = t[idx_in], f[idx_in], ferr[
            idx_in]

        # Run fit:
        if not fit_catwoman:

            dataset = juliet.load(priors=priors, t_lc = tt, y_lc = ff, \
                    yerr_lc = fferr, GP_regressors_lc = tt, out_folder = outpath+'_'+GPmodel+'_in_transit_batman')

        else:

            dataset = juliet.load(priors=priors, t_lc = tt, y_lc = ff, \
                    yerr_lc = fferr, GP_regressors_lc = tt, out_folder = outpath+'_'+GPmodel+'_in_transit_catwoman')

        results = dataset.fit(n_live_points=500, verbose=True)
Ejemplo n.º 10
0
def multisector_fit(tt,
                    ff,
                    fferr,
                    P,
                    P_err,
                    t0,
                    t0_err,
                    ecc,
                    omega,
                    GPmodel='ExpMatern',
                    outpath='planetfit',
                    method='',
                    in_transit_length=0.,
                    good_sectors=None,
                    fit_catwoman=False,
                    nthreads=4):

    if good_sectors is not None:
        t, f, ferr = {}, {}, {}
        for goodsector in good_sectors:
            t[goodsector], f[goodsector], ferr[goodsector] = np.copy(
                tt[goodsector]), np.copy(ff[goodsector]), np.copy(
                    fferr[goodsector])

    else:
        t, f, ferr = tt.copy(), ff.copy(), fferr.copy()

    # Go through sectors, mask in_transit data if method is not '':
    if method != '':
        for sector in t.keys():
            phases = juliet.utils.get_phases(t[sector], P, t0)
            idx_in = np.where(np.abs(phases * P) < in_transit_length * 0.5)[0]
            t[sector], f[sector], ferr[sector] = t[sector][idx_in], f[sector][
                idx_in], ferr[sector][idx_in]

    # Put all times in a big time-array:
    all_t = np.array([])
    for sector in t.keys():
        all_t = np.append(all_t, t[sector])

    # Scale t0 to the transit closest to the maximum of the TESS observations:
    print('all_t:', all_t)
    print('max t:', np.max(all_t))
    print('t0:', t0)
    print('P:', P)
    n = int((np.max(all_t) - t0) / P)
    t0 += n * P
    t0_err = np.sqrt(t0_err**2 + (n * P_err)**2)

    # Define priors:
    priors = {}

    # All sectors string:
    all_sectors = '_'.join(list(t.keys()))

    # First define parameter names, distributions and hyperparameters for sector-independant parameters:

    if not fit_catwoman:

        params = ['P_p1', 't0_p1', 'p_p1', 'b_p1', 'q1_'+all_sectors, 'q2_'+all_sectors, \
                   'ecc_p1', 'omega_p1', 'a_p1', 'mdilution_'+all_sectors]

        dists = ['normal', 'normal', 'uniform', 'uniform', 'uniform', 'uniform', \
                   'fixed','fixed','loguniform', 'fixed']

        hyperps = [[P,P_err], [t0, 0.1], [0., 1.], [0., 1.], [0., 1.], [0., 1.], \
                   ecc, omega, [1., 100.], 1.]

    else:

        params = ['P_p1', 't0_p1', 'p1_p1', 'p2_p1', 'phi_p1', 'b_p1', 'q1_'+all_sectors, 'q2_'+all_sectors, \
                   'ecc_p1', 'omega_p1', 'a_p1', 'mdilution_'+all_sectors]

        dists = ['normal', 'normal', 'uniform', 'uniform', 'fixed', 'uniform', 'uniform', 'uniform', \
                   'fixed','fixed','loguniform', 'fixed']

        hyperps = [[P,P_err], [t0, 0.1], [0., 1.], [0., 1.], 90., [0., 2.], [0., 1.], [0., 1.], \
                   ecc, omega, [1., 100.], 1.]

    # Now, depending on the method, iterate to check the priors for the GP, mflux and sigma_w parameters for each
    # sector:
    if method == '':
        for sector in t.keys():

            if GPmodel == 'ExpMatern':
                gpparams = ['GP_sigma', 'GP_timescale', 'GP_rho']
                gplimits = [[1e-5, 10000.], [1e-3, 1e2], [1e-3, 1e2]]
            elif GPmodel == 'Matern':
                gpparams = ['GP_sigma', 'GP_rho']
                gplimits = [[1e-5, 10000.], [1e-3, 1e2]]
            elif GPmodel == 'QP':
                gpparams = ['GP_B', 'GP_C', 'GP_L', 'GP_Prot']
                gplimits = [[1e-5, 1e3], [1e-5, 1e4], [1e-3, 1e2], [1., 1e2]]
            for i in range(len(gplimits)):
                gpparam = gpparams[i]
                params += [gpparam + '_' + sector]
                dists += ['loguniform']
                hyperps += [[gplimits[i][0], gplimits[i][1]]]

            params += ['mflux' + '_' + sector]
            dists += ['normal']
            hyperps += [[0., 0.1]]

            params += ['sigma_w' + '_' + sector]
            dists += ['loguniform']
            hyperps += [[0.1, 10000.]]

        # Populate the priors dictionary:
        for param, dist, hyperp in zip(params, dists, hyperps):
            priors[param] = {}
            priors[param]['distribution'], priors[param][
                'hyperparameters'] = dist, hyperp
        # Run fit:
        dataset = juliet.load(priors=priors, t_lc = t, y_lc = f, \
                              yerr_lc = ferr, GP_regressors_lc = t, out_folder = outpath+'/multisector_FULL_'+GPmodel)

        # If more than 4 sectors are fit, free parameters are larger than 30 --- so use dynesty:
        if len(t.keys()) >= 4:
            results = dataset.fit(sampler='dynamic_dynesty',
                                  bound='single',
                                  n_effective=100,
                                  use_stop=False,
                                  nthreads=4)
        else:
            results = dataset.fit(n_live_points=1000, verbose=True)
    else:
        for sector in t.keys():

            # Extract GP hyperparameters; add them to the params, dists and hyperps lists:
            posteriors = pickle.load(
                open(
                    outpath + '/' + sector + '_' + GPmodel +
                    '_out_of_transit/posteriors.pkl', 'rb'))

            if GPmodel == 'ExpMatern':
                gpparams = ['GP_sigma', 'GP_timescale', 'GP_rho']
                gplimits = [[1e-5, 10000.], [1e-3, 1e2], [1e-3, 1e2]]
            elif GPmodel == 'Matern':
                gpparams = ['GP_sigma', 'GP_rho']
                gplimits = [[1e-5, 10000.], [1e-3, 1e2]]
            elif GPmodel == 'QP':
                gpparams = ['GP_B', 'GP_C', 'GP_L', 'GP_Prot']
                gplimits = [[1e-5, 1e3], [1e-5, 1e4], [1e-3, 1e2], [1., 1e2]]
            for i in range(len(gplimits)):
                gpparam = gpparams[i]
                posterior = posteriors['posterior_samples'][gpparam + '_TESS']
                mu, sigma = np.median(posterior), np.sqrt(np.var(posterior))
                params += [gpparam + '_' + sector]
                dists += ['truncatednormal']
                hyperps += [[mu, sigma, gplimits[i][0], gplimits[i][1]]]

            # Add mflux and sigma_w:
            params += ['mflux' + '_' + sector]
            dists += ['normal']
            posterior = posteriors['posterior_samples']['mflux_TESS']
            mu, sigma = np.median(posterior), np.sqrt(np.var(posterior))
            hyperps += [[mu, sigma]]

            params += ['sigma_w' + '_' + sector]
            dists += ['truncatednormal']
            posterior = posteriors['posterior_samples']['sigma_w_TESS']
            mu, sigma = np.median(posterior), np.sqrt(np.var(posterior))
            hyperps += [[mu, sigma, 0.1, 10000.]]

        # Populate the priors dictionary:
        for param, dist, hyperp in zip(params, dists, hyperps):
            priors[param] = {}
            priors[param]['distribution'], priors[param][
                'hyperparameters'] = dist, hyperp

        # Run fit:
        if not fit_catwoman:

            dataset = juliet.load(priors=priors, t_lc = t, y_lc = f, \
                                  yerr_lc = ferr, GP_regressors_lc = t, out_folder = outpath+'/multisector_in_transit_'+GPmodel+'_batman')

        else:

            dataset = juliet.load(priors=priors, t_lc = t, y_lc = f, \
                                  yerr_lc = ferr, GP_regressors_lc = t, out_folder = outpath+'/multisector_in_transit_'+GPmodel+'_catwoman')

        # If more than 4 sectors are fit, free parameters are larger than 30 --- so use dynesty:
        if len(t.keys()) >= 4:
            results = dataset.fit(sampler='dynamic_dynesty',
                                  bound='single',
                                  n_effective=100,
                                  use_stop=False,
                                  nthreads=nthreads)
        else:
            results = dataset.fit(n_live_points=1000, verbose=True)
Ejemplo n.º 11
0
def fit_transit_by_transit(P,
                           P_err,
                           t0,
                           t0_err,
                           ecc,
                           omega,
                           GPmodel='ExpMatern',
                           outpath='planetfit',
                           in_transit_length=0.):

    # First, extract both sectors and folders of those sectors which have out-of-transit fits already done:
    oot_folders = glob.glob(outpath + '/TESS*_' + GPmodel + '_out_of_transit')

    for oot_folder in oot_folders:
        print('Working on', oot_folder)
        it_folder = oot_folder.split('out_of_transit')[0] + 'in_transit'

        # Define priors:
        priors = {}

        # First define parameter names, distributions and hyperparameters for GP-independant parameters:
        params1 = ['P_p1', 't0_p1', 'r1_p1', 'r2_p1', 'q1_TESS', 'q2_TESS', \
                   'ecc_p1', 'omega_p1', 'a_p1']

        params1_instrument = ['mdilution_TESS', 'mflux_TESS', 'sigma_w_TESS']

        dists1 = ['normal', 'normal', 'uniform', 'uniform', 'uniform', 'uniform', \
                   'fixed','fixed','loguniform']

        dists1_instrument = ['fixed', 'normal', 'loguniform']

        hyperps1 = [[P,P_err], [t0, 0.1], [0., 1.], [0., 1.], [0., 1.], [0., 1.], \
                   ecc, omega, [1., 100.]]

        hyperps1_instrument = [1., [0., 0.1], [0.1, 10000.]]

        # Now define hyperparameters of the GP depending on the chosen kernel:
        if GPmodel == 'ExpMatern':
            params2 = ['GP_sigma_TESS', 'GP_timescale_TESS', 'GP_rho_TESS']
            dists2 = ['loguniform', 'loguniform', 'loguniform']
            hyperps2 = [[1e-5, 10000.], [1e-3, 1e2], [1e-3, 1e2]]
        elif GPmodel == 'Matern':
            params2 = ['GP_sigma_TESS', 'GP_rho_TESS']
            dists2 = ['loguniform', 'loguniform']
            hyperps2 = [[1e-5, 10000.], [1e-3, 1e2]]
        elif GPmodel == 'QP':
            params2 = ['GP_B_TESS', 'GP_C_TESS', 'GP_L_TESS', 'GP_Prot_TESS']
            dists2 = ['loguniform', 'loguniform', 'loguniform', 'loguniform']
            hyperps2 = [[1e-5, 1e3], [1e-5, 1e4], [1e-3, 1e3], [1., 1e2]]

        # Extract posteriors from out-of-transit GP fit first:
        params = params1_instrument + params2
        dists = dists1_instrument + dists2
        hyperps = hyperps1_instrument + hyperps2

        # Populate priors dict:
        for param, dist, hyperp in zip(params, dists, hyperps):
            priors[param] = {}
            priors[param]['distribution'], priors[param][
                'hyperparameters'] = dist, hyperp

        dataset = juliet.load(input_folder=oot_folder)
        results = dataset.fit()

        for i in range(len(params2)):
            posterior = results.posteriors['posterior_samples'][params2[i]]
            mu, sigma = np.median(posterior), np.sqrt(np.var(posterior))
            dists2[i] = 'truncatednormal'
            hyperps2[i] = [mu, sigma, hyperps2[i][0], hyperps2[i][1]]

        # Same for sigma_w and mflux:
        dists1_instrument[2] = 'truncatednormal'
        posterior = results.posteriors['posterior_samples']['sigma_w_TESS']
        mu, sigma = np.median(posterior), np.sqrt(np.var(posterior))
        hyperps1_instrument[2] = [
            mu, sigma, hyperps1_instrument[2][0], hyperps1_instrument[2][1]
        ]

        # Normal for mflux:
        dists1_instrument[1] = 'normal'
        posterior = results.posteriors['posterior_samples']['mflux_TESS']
        mu, sigma = np.median(posterior), np.sqrt(np.var(posterior))
        hyperps1_instrument[1] = [mu, sigma]

        # Populate prior dict:
        params = params1 + params1_instrument + params2
        dists = dists1 + dists1_instrument + dists2
        hyperps = hyperps1 + hyperps1_instrument + hyperps2

        # Populate the priors dictionary:
        for param, dist, hyperp in zip(params, dists, hyperps):
            priors[param] = {}
            priors[param]['distribution'], priors[param][
                'hyperparameters'] = dist, hyperp

        # Now extract in-transit data from in-transit fit to sector:
        dataset = juliet.load(input_folder=it_folder)

        # Iterate through each of the transits in the sector:
        idx = np.where(np.abs(np.diff(dataset.t_lc)) > 0.5)[0]
        start_idx = -1

        print('Detected', len(idx), 'transits')
        for i in idx:
            tt, ff, fferr = {}, {}, {}
            tt['TESS'], ff['TESS'], fferr['TESS'] = dataset.t_lc[
                start_idx +
                1:i], dataset.y_lc[start_idx +
                                   1:i], dataset.yerr_lc[start_idx + 1:i]

            # Guess which t0 this dataset corresponds to:
            mid_idx = int(len(tt['TESS']) * 0.5)
            tmid = tt['TESS'][mid_idx]
            n = (tmid - t0) / P
            tc = t0 + n * P

            # Check if there is any time-datapoint that covers, at least, an hour around mid-transit:
            n_onehour = len(np.where(np.abs(tt['TESS'] - tc) < 1. / 24.)[0])

            # If there are datapoints, fit the dataset. Use that central time as the t0 mean on the prior:
            if n_onehour > 0:
                priors['t0_p1']['hyperparameters'][0] = tc

                # Run fit:
                transit_dataset = juliet.load(priors=priors, t_lc = tt, y_lc = ff, \
                          yerr_lc = fferr, GP_regressors_lc = tt, out_folder = outpath+'/transit_'+str(n)+'_'+GPmodel+'_in_transit')
                results = transit_dataset.fit(n_live_points=500, verbose=True)
            else:
                print('Transit at', tc, ' doesnt have n_onehour apparently:',
                      np.abs(tt['TESS'] - tc))
            start_idx = i
Ejemplo n.º 12
0
# an exponential model in not-log-space). We set rv_intercept to zero because we are already fitting for it with 
# mu_chile:
params = ['K_p1', 'P_p1', 't0_p1', 'ecc_p1', 'omega_p1',
          'mu_chile', 'sigma_w_chile', 'GP_sigma_chile', 'GP_alpha0_chile', 'rv_slope','rv_intercept']

dists = ['fixed', 'fixed', 'fixed', 'fixed', 'fixed',
         'normal', 'loguniform', 'loguniform', 'loguniform', 'uniform','fixed']

hyperps = [0., 1., 0., 0., 90.,
          [-100.,100.], [1e-5,100.], [1e-5,100.], [1e-5,100.], [-1e3,1e3],0.]

# Gather the priors in a dictionary:
priors = juliet.utils.generate_priors(params,dists,hyperps)

# Define the dataset:
dataset = juliet.load(priors = priors, t_rv = t, y_rv = data, yerr_rv = errors, 
                      GP_regressors_rv = t, out_folder = 'fit_'+str(int(np.max(days))))

# Fit (ta defines a zero-point for the times --- in our case is 0, but for exoplanet data typically is a random 
# julian date):
results = dataset.fit(n_live_points = 1000, ta=0.)

# Now that fit has run, generate a set of model_times, and extrapolate a bit the model:
model_times = np.linspace(np.min(days),np.max(days)+4.,1000)
# Evaluate the model in those times, get mean of sampled models and 68% credibility bands:
model, m_up, m_down = results.rv.evaluate('chile',t = model_times, GPregressors = model_times, return_err = True, all_samples = True)
# Same, to get 95% credibility bands:
model, m_up95, m_down95 = results.rv.evaluate('chile',t = model_times, GPregressors = model_times, return_err = True, alpha = 0.95, all_samples = True)

# Plot data in original non-log space; convert model evaluations above as well:
plt.plot(days,np.exp(infected),'o',color='black',mfc='white')
plt.fill_between(model_times,np.exp(m_down),np.exp(m_up),color='cornflowerblue',alpha=0.5)