예제 #1
0
    def sample_mod(self):
        '''
        '''
        # Sampling model
        np.random.seed(42)
        sampler = xo.PyMC3Sampler(finish=300, chains=4)

        with self.model:
            #burnin = sampler.tune(tune=800, start=self.soln, step_kwargs=dict(target_accept=0.9))
            burnin = sampler.tune(tune=4500, start=self.soln, step_kwargs=dict(target_accept=0.9))
            self.trace = sampler.sample(draws=2000)
예제 #2
0
파일: lc.py 프로젝트: tagordon/k2rot
 def mcmc(self,
          draws=500,
          tune=500,
          target_accept=0.9,
          progressbar=False,
          cores=4):
     sampler = xo.PyMC3Sampler(finish=200)
     with self.model:
         #sampler.tune(tune=tune, start=self.map_soln, step=xo.get_dense_nuts_step(), step_kwargs=dict(target_accept=target_accept), progressbar=progressbar, cores=cores)
         sampler.tune(tune=tune,
                      start=self.map_soln,
                      step_kwargs=dict(target_accept=target_accept),
                      progressbar=progressbar,
                      cores=cores)
         trace = sampler.sample(draws=draws,
                                progressbar=progressbar,
                                cores=cores)
     return trace
예제 #3
0
def best_model(model):
    sampler = xo.PyMC3Sampler(finish=200)
    with model:
        sampler.tune(tune=2000, step_kwargs=dict(target_accept=0.9))
        trace = sampler.sample(draws=2000)
    return trace
예제 #4
0
    def gp_rotation(self,
                    init_period=None,
                    tune=2000,
                    draws=2000,
                    prediction=True,
                    cores=None):
        """
        Calculate a rotation period using a Gaussian process method.

        Args:
            init_period (Optional[float]): Your initial guess for the rotation
                period. The default is the Lomb-Scargle period.
            tune (Optional[int]): The number of tuning samples. Default is
                2000.
            draws (Optional[int]): The number of samples. Default is 2000.
            prediction (Optional[Bool]): If true, a prediction will be
                calculated for each sample. This is useful for plotting the
                prediction but will slow down the whole calculation.
            cores (Optional[int]): The number of cores to use. Default is
                None (for running one process).

        Returns:
            gp_period (float): The GP rotation period in days.
            errp (float): The upper uncertainty on the rotation period.
            errm (float): The lower uncertainty on the rotation period.
            logQ (float): The Q factor.
            Qerrp (float): The upper uncertainty on the Q factor.
            Qerrm (float): The lower uncertainty on the Q factor.
        """
        self.prediction = prediction

        x = np.array(self.time, dtype=float)
        # Median of data must be zero
        y = np.array(self.flux, dtype=float) - np.median(self.flux)
        yerr = np.array(self.flux_err, dtype=float)

        if init_period is None:
            # Calculate ls period
            init_period = self.ls_rotation()

        with pm.Model() as model:

            # The mean flux of the time series
            mean = pm.Normal("mean", mu=0.0, sd=10.0)

            # A jitter term describing excess white noise
            logs2 = pm.Normal("logs2", mu=2 * np.log(np.min(yerr)), sd=5.0)

            # The parameters of the RotationTerm kernel
            logamp = pm.Normal("logamp", mu=np.log(np.var(y)), sd=5.0)
            logperiod = pm.Normal("logperiod", mu=np.log(init_period), sd=5.0)
            logQ0 = pm.Normal("logQ0", mu=1.0, sd=10.0)
            logdeltaQ = pm.Normal("logdeltaQ", mu=2.0, sd=10.0)
            mix = pm.Uniform("mix", lower=0, upper=1.0)

            # Track the period as a deterministic
            period = pm.Deterministic("period", tt.exp(logperiod))

            # Set up the Gaussian Process model
            kernel = xo.gp.terms.RotationTerm(log_amp=logamp,
                                              period=period,
                                              log_Q0=logQ0,
                                              log_deltaQ=logdeltaQ,
                                              mix=mix)
            gp = xo.gp.GP(kernel, x, yerr**2 + tt.exp(logs2), J=4)

            # Compute the Gaussian Process likelihood and add it into the
            # the PyMC3 model as a "potential"
            pm.Potential("loglike", gp.log_likelihood(y - mean))

            # Compute the mean model prediction for plotting purposes
            if prediction:
                pm.Deterministic("pred", gp.predict())

            # Optimize to find the maximum a posteriori parameters
            self.map_soln = xo.optimize(start=model.test_point)
            # print(self.map_soln)
            # print(xo.utils.eval_in_model(model.logpt, self.map_soln))
            # assert 0

            # Sample from the posterior
            np.random.seed(42)
            sampler = xo.PyMC3Sampler()
            with model:
                print("sampling...")
                sampler.tune(tune=tune,
                             start=self.map_soln,
                             step_kwargs=dict(target_accept=0.9),
                             cores=cores)
                trace = sampler.sample(draws=draws, cores=cores)

            # Save samples
            samples = pm.trace_to_dataframe(trace)
            self.samples = samples

            self.period_samples = trace["period"]
            self.gp_period = np.median(self.period_samples)
            lower = np.percentile(self.period_samples, 16)
            upper = np.percentile(self.period_samples, 84)
            self.errm = self.gp_period - lower
            self.errp = upper - self.gp_period
            self.logQ = np.median(trace["logQ0"])
            upperQ = np.percentile(trace["logQ0"], 84)
            lowerQ = np.percentile(trace["logQ0"], 16)
            self.Qerrp = upperQ - self.logQ
            self.Qerrm = self.logQ - lowerQ

        self.trace = trace

        return self.gp_period, self.errp, self.errm, self.logQ, self.Qerrp, \
            self.Qerrm
예제 #5
0
    rho_save_sky = pm.Deterministic("rhoSaveSky", rho)
    theta_save_sky = pm.Deterministic("thetaSaveSky", theta)

    rho, theta = orbit.get_relative_angles(t_data, parallax)
    rho_save_data = pm.Deterministic("rhoSaveData", rho)
    theta_save_data = pm.Deterministic("thetaSaveData", theta)

    # save RV plots
    t_dense = pm.Deterministic("tDense", xs_phase * P + jd0)
    rv1_dense = pm.Deterministic(
        "RV1Dense",
        conv * orbit.get_star_velocity(t_dense - jd0)[2] + gamma_keck)
    rv2_dense = pm.Deterministic(
        "RV2Dense",
        conv * orbit.get_planet_velocity(t_dense - jd0)[2] + gamma_keck)

with model:
    map_sol0 = xo.optimize(vars=[a_ang, phi])
    map_sol1 = xo.optimize(map_sol0, vars=[a_ang, phi, omega, Omega])
    map_sol2 = xo.optimize(map_sol1,
                           vars=[a_ang, logP, phi, omega, Omega, incl, e])
    map_sol3 = xo.optimize(map_sol2)

# now let's actually explore the posterior for real
sampler = xo.PyMC3Sampler(finish=500, chains=4)
with model:
    burnin = sampler.tune(tune=2000, step_kwargs=dict(target_accept=0.9))
    trace = sampler.sample(draws=3000)

pm.backends.ndarray.save_trace(trace, directory="current", overwrite=True)
예제 #6
0
def PLD(tpf,
        planet_mask=None,
        aperture=None,
        return_soln=False,
        return_quick_corrected=False,
        sigma=5,
        trim=0,
        ndraws=1000):
    ''' Use exoplanet, pymc3 and theano to perform PLD correction

    Parameters
    ----------
    tpf : lk.TargetPixelFile
        Target Pixel File to Correct
    planet_mask : np.ndarray
        Boolean array. Cadences where planet_mask is False will be excluded from the
        PLD correction. Use this to mask out planet transits.
    '''
    if planet_mask is None:
        planet_mask = np.ones(len(tpf.time), dtype=bool)
    if aperture is None:
        aperture = tpf.pipeline_mask

    time = np.asarray(tpf.time, np.float64)
    if trim > 0:
        flux = np.asarray(tpf.flux[:, trim:-trim, trim:-trim], np.float64)
        flux_err = np.asarray(tpf.flux_err[:, trim:-trim, trim:-trim],
                              np.float64)
        aper = np.asarray(aperture, bool)[trim:-trim, trim:-trim]
    else:
        flux = np.asarray(tpf.flux, np.float64)
        flux_err = np.asarray(tpf.flux_err, np.float64)
        aper = np.asarray(aperture, bool)

    raw_flux = np.asarray(np.nansum(flux[:, aper], axis=(1)), np.float64)
    raw_flux_err = np.asarray(
        np.nansum(flux_err[:, aper]**2, axis=(1))**0.5, np.float64)

    raw_flux_err /= np.median(raw_flux)
    raw_flux /= np.median(raw_flux)
    raw_flux -= 1

    # Setting to Parts Per Thousand keeps us from hitting machine precision errors...
    raw_flux *= 1e3
    raw_flux_err *= 1e3

    # Build the first order PLD basis
    #    X_pld = np.reshape(flux[:, aper], (len(flux), -1))
    saturation = (np.nanpercentile(flux, 100, axis=0) > 175000)
    X_pld = np.reshape(flux[:, aper & ~saturation], (len(tpf.flux), -1))

    extra_pld = np.zeros((len(time), np.any(saturation, axis=0).sum()))
    idx = 0
    for column in saturation.T:
        if column.any():
            extra_pld[:, idx] = np.sum(flux[:, column, :], axis=(1, 2))
            idx += 1
    X_pld = np.hstack([X_pld, extra_pld])

    # Remove NaN pixels
    X_pld = X_pld[:, ~((~np.isfinite(X_pld)).all(axis=0))]
    X_pld = X_pld / np.sum(flux[:, aper], axis=-1)[:, None]

    # Build the second order PLD basis and run PCA to reduce the number of dimensions
    X2_pld = np.reshape(X_pld[:, None, :] * X_pld[:, :, None], (len(flux), -1))
    # Remove NaN pixels
    X2_pld = X2_pld[:, ~((~np.isfinite(X2_pld)).all(axis=0))]
    U, _, _ = np.linalg.svd(X2_pld, full_matrices=False)
    X2_pld = U[:, :X_pld.shape[1]]

    # Construct the design matrix and fit for the PLD model
    X_pld = np.concatenate((np.ones((len(flux), 1)), X_pld, X2_pld), axis=-1)

    # Create a matrix with small numbers along diagonal to ensure that
    # X.T * sigma^-1 * X is not singular, and prevent it from being non-invertable
    diag = np.diag((1e-8 * np.ones(X_pld.shape[1])))

    if (~np.isfinite(X_pld)).any():
        raise ValueError('NaNs in components.')
    if (np.any(raw_flux_err == 0)):
        raise ValueError('Zeros in raw_flux_err.')

    def build_model(mask=None, start=None):
        ''' Build a PYMC3 model

        Parameters
        ----------
        mask : np.ndarray
            Boolean array to mask cadences. Cadences that are False will be excluded
            from the model fit
        start : dict
            MAP Solution from exoplanet

        Returns
        -------
        model : pymc3.model.Model
            A pymc3 model
        map_soln : dict
            Best fit solution
        '''

        if mask is None:
            mask = np.ones(len(time), dtype=bool)

        with pm.Model() as model:
            # GP
            # --------
            logs2 = pm.Normal("logs2", mu=np.log(np.var(raw_flux[mask])), sd=4)
            #            pm.Potential("logs2_prior1", tt.switch(logs2 < -3, -np.inf, 0.0))

            logsigma = pm.Normal("logsigma",
                                 mu=np.log(np.std(raw_flux[mask])),
                                 sd=4)
            logrho = pm.Normal("logrho", mu=np.log(150), sd=4)
            kernel = xo.gp.terms.Matern32Term(log_rho=logrho,
                                              log_sigma=logsigma)
            gp = xo.gp.GP(kernel, time[mask],
                          tt.exp(logs2) + raw_flux_err[mask]**2)

            # Motion model
            #------------------
            A = tt.dot(X_pld[mask].T, gp.apply_inverse(X_pld[mask]))
            A = A + diag  # Add small numbers to diagonal to prevent it from being singular
            B = tt.dot(X_pld[mask].T, gp.apply_inverse(raw_flux[mask, None]))
            C = tt.slinalg.solve(A, B)
            motion_model = pm.Deterministic("motion_model",
                                            tt.dot(X_pld[mask], C)[:, 0])

            # Likelihood
            #------------------
            pm.Potential("obs",
                         gp.log_likelihood(raw_flux[mask] - motion_model))

            # gp predicted flux
            gp_pred = gp.predict()
            pm.Deterministic("gp_pred", gp_pred)
            pm.Deterministic("weights", C)

            # Optimize
            #------------------
            if start is None:
                start = model.test_point
            map_soln = xo.optimize(start=start, vars=[logsigma])
            map_soln = xo.optimize(start=start, vars=[logrho, logsigma])
            map_soln = xo.optimize(start=start, vars=[logsigma])
            map_soln = xo.optimize(start=start, vars=[logrho, logsigma])
            map_soln = xo.optimize(start=map_soln, vars=[logs2])
            map_soln = xo.optimize(start=map_soln,
                                   vars=[logrho, logsigma, logs2])
            return model, map_soln, gp

    # First rough correction
    log.info('Optimizing roughly')
    with silence():
        model0, map_soln0, gp = build_model(mask=planet_mask)

    # Remove outliers, make sure to remove a few nearby points incase of flares.
    with model0:
        motion = np.dot(X_pld, map_soln0['weights']).reshape(-1)
        stellar = xo.eval_in_model(gp.predict(time), map_soln0)
        corrected = raw_flux - motion - stellar
        mask = ~sigma_clip(corrected, sigma=sigma).mask
        mask = ~(convolve(mask, Box1DKernel(3), fill_value=1) != 1)
        mask &= planet_mask

    # Optimize PLD
    log.info('Optimizing without outliers')
    with silence():
        model, map_soln, gp = build_model(mask, map_soln0)
    lc_fig = _plot_light_curve(map_soln, model, mask, time, raw_flux,
                               raw_flux_err, X_pld, gp)

    if return_soln:
        motion = np.dot(X_pld, map_soln['weights']).reshape(-1)
        with model:
            stellar = xo.eval_in_model(gp.predict(time), map_soln)
        return model, map_soln, motion, stellar

    if return_quick_corrected:
        raw_lc = tpf.to_lightcurve()
        clc = lk.KeplerLightCurve(
            time=time,
            flux=(raw_flux - stellar - motion) * 1e-3 + 1,
            flux_err=(raw_flux_err) * 1e-3,
            time_format=raw_lc.time_format,
            centroid_col=tpf.estimate_centroids()[0],
            centroid_row=tpf.estimate_centroids()[0],
            quality=raw_lc.quality,
            channel=raw_lc.channel,
            campaign=raw_lc.campaign,
            quarter=raw_lc.quarter,
            mission=raw_lc.mission,
            cadenceno=raw_lc.cadenceno,
            targetid=raw_lc.targetid,
            ra=raw_lc.ra,
            dec=raw_lc.dec,
            label='{} PLD Corrected'.format(raw_lc.targetid))
        return clc

    # Burn in
    sampler = xo.PyMC3Sampler()
    with model:
        burnin = sampler.tune(tune=np.max([int(ndraws * 0.3), 150]),
                              start=map_soln,
                              step_kwargs=dict(target_accept=0.9),
                              chains=4)
    # Sample
    with model:
        trace = sampler.sample(draws=ndraws, chains=4)

    varnames = ["logrho", "logsigma", "logs2"]
    pm.traceplot(trace, varnames=varnames)

    samples = pm.trace_to_dataframe(trace, varnames=varnames)
    corner.corner(samples)

    # Generate 50 realizations of the prediction sampling randomly from the chain
    N_pred = 50
    pred_mu = np.empty((N_pred, len(time)))
    pred_motion = np.empty((N_pred, len(time)))
    with model:
        pred = gp.predict(time)
        for i, sample in enumerate(
                tqdm(xo.get_samples_from_trace(trace, size=N_pred),
                     total=N_pred)):
            pred_mu[i] = xo.eval_in_model(pred, sample)
            pred_motion[i, :] = np.dot(X_pld, sample['weights']).reshape(-1)

    star_model = np.mean(pred_mu + pred_motion, axis=0)
    star_model_err = np.std(pred_mu + pred_motion, axis=0)

    raw_lc = tpf.to_lightcurve()

    meta = {
        'samples': samples,
        'trace': trace,
        'pred_mu': pred_mu,
        'pred_motion': pred_motion
    }

    clc = lk.KeplerLightCurve(
        time=time,
        flux=(raw_flux - star_model) * 1e-3 + 1,
        flux_err=((raw_flux_err**2 + star_model_err**2)**0.5) * 1e-3,
        time_format=raw_lc.time_format,
        centroid_col=tpf.estimate_centroids()[0],
        centroid_row=tpf.estimate_centroids()[0],
        quality=raw_lc.quality,
        channel=raw_lc.channel,
        campaign=raw_lc.campaign,
        quarter=raw_lc.quarter,
        mission=raw_lc.mission,
        cadenceno=raw_lc.cadenceno,
        targetid=raw_lc.targetid,
        ra=raw_lc.ra,
        dec=raw_lc.dec,
        label='{} PLD Corrected'.format(raw_lc.targetid),
        meta=meta)
    return clc
예제 #7
0
def fit_planets(lc,
                period_value,
                t0_value,
                depth_value,
                R_star,
                M_star,
                T_star,
                texp=0.0204335,
                ndraws=1000):
    '''Fit planet parameters using exoplanet'''

    lc = lc.copy()
    shape = len(period_value)
    #    if (np.asarray([len(period_value), len(t0_value), len(depth_value)]) == shape).all() == False
    #        raise ValueError('All planet parameters must have the same shape!')

    x, y, yerr = np.asarray(lc.time, float), np.asarray(lc.flux,
                                                        float), np.asarray(
                                                            lc.flux_err, float)

    yerr /= np.median(y)
    y /= np.median(y)
    y -= 1

    y *= 1e3
    yerr *= 1e3

    def build_model(mask=None, start=None):
        if mask is None:
            mask = np.ones(len(x), dtype=bool)
        with pm.Model() as model:

            # Parameters for the stellar properties
            mean = pm.Normal("mean", mu=0.0, sd=10.0)
            u_star = xo.distributions.QuadLimbDark("u_star")

            m_star = pm.Normal("m_star", mu=M_star[0], sd=M_star[1])
            r_star = pm.Normal("r_star", mu=R_star[0], sd=R_star[1])
            t_star = pm.Normal("t_star", mu=T_star[0], sd=T_star[1])

            # Prior to require physical parameters
            pm.Potential("m_star_prior", tt.switch(m_star > 0, 0, -np.inf))
            pm.Potential("r_star_prior", tt.switch(r_star > 0, 0, -np.inf))

            # Orbital parameters for the planets
            logP = pm.Normal("logP",
                             mu=np.log(period_value),
                             sd=0.01,
                             shape=shape)
            t0 = pm.Normal("t0", mu=t0_value, sd=0.01, shape=shape)
            b = pm.Uniform("b", lower=0, upper=1, testval=0.5, shape=shape)
            logr = pm.Normal("logr",
                             sd=1.0,
                             mu=0.5 * np.log(np.array(depth_value)) +
                             np.log(R_star[0]),
                             shape=shape)
            r_pl = pm.Deterministic("r_pl", tt.exp(logr))
            ror = pm.Deterministic("ror", r_pl / r_star)

            # Tracking planet parameters
            period = pm.Deterministic("period", tt.exp(logP))

            # Orbit model
            orbit = xo.orbits.KeplerianOrbit(r_star=r_star,
                                             m_star=m_star,
                                             period=period,
                                             t0=t0,
                                             b=b)

            incl = pm.Deterministic('incl', orbit.incl)
            a = pm.Deterministic('a', orbit.a)

            teff = pm.Deterministic('teff', t_star * tt.sqrt(0.5 * (1 / a)))

            # Compute the model light curve using starry
            light_curves = xo.StarryLightCurve(u_star).get_light_curve(
                orbit=orbit, r=r_pl, t=x[mask], texp=texp) * 1e3
            light_curve = pm.math.sum(light_curves, axis=-1) + mean
            pm.Deterministic("light_curves", light_curves)

            pm.Normal('obs', mu=light_curve, sd=yerr[mask], observed=y[mask])

            # Optimize
            #------------------
            if start is None:
                start = model.test_point
            map_soln = xo.optimize(start=start, vars=[logr])
            map_soln = xo.optimize(start=map_soln, vars=[b])
            map_soln = xo.optimize(start=map_soln, vars=[logP, t0])
            map_soln = xo.optimize(start=map_soln, vars=[u_star])
            map_soln = xo.optimize(start=map_soln, vars=[logr])
            map_soln = xo.optimize(start=map_soln, vars=[b])
            map_soln = xo.optimize(start=map_soln, vars=[mean])
            map_soln = xo.optimize(start=map_soln)
            return model, map_soln

    with silence():
        model0, map_soln0 = build_model()

    corrected = y.copy()
    for planet in map_soln0['light_curves'].T:
        corrected -= planet
    mask = ~sigma_clip(corrected, sigma_upper=4, sigma_lower=10).mask
    mask = ~(convolve(mask, Box1DKernel(2), fill_value=1) != 1)

    with silence():
        model, map_soln = build_model(mask=mask, start=map_soln0)

    # Burn in
    sampler = xo.PyMC3Sampler()
    with model:
        burnin = sampler.tune(tune=np.max([int(ndraws * 0.3), 150]),
                              start=map_soln,
                              step_kwargs=dict(target_accept=0.9),
                              chains=4)
    # Sample
    with model:
        trace = sampler.sample(draws=ndraws, chains=4)

    return trace, mask
예제 #8
0
    # Optimize
    map_soln = pm.find_MAP(start=model.test_point, vars=[trend])
    map_soln = pm.find_MAP(start=map_soln)


def check_convergence(samples):
    tau = emcee.autocorr.integrated_time(samples, tol=0)
    num = samples.shape[0] * samples.shape[1]
    converged = np.all(tau * target_n_eff < num)
    converged &= np.all(len(samples) > 50 * tau)
    return converged, num / tau


# Run the PyMC3 sampler
chains = 2
sampler = xo.PyMC3Sampler(start=500, finish=500, window=500)
with model:
    burnin = sampler.tune(tune=100000,
                          start=map_soln,
                          chains=chains,
                          cores=1,
                          progressbar=False)

tottime = 0
trace = None
with model:
    while True:
        strt = time.time()
        trace = sampler.sample(draws=2000,
                               trace=trace,
                               chains=chains,