示例#1
0
def run_mcmc_1d(t, data, logS0_init, 
                logw0_init, logQ_init, 
                logsig_init, t0_init, 
                r_init, d_init, tin_init):
    
    with pm.Model() as model:
        #logsig = pm.Uniform("logsig", lower=-20.0, upper=0.0, testval=logsig_init)

        # The parameters of the SHOTerm kernel
        #logS0 = pm.Uniform("logS0", lower=-50.0, upper=0.0, testval=logS0_init)
        #logQ = pm.Uniform("logQ", lower=-50.0, upper=20.0, testval=logQ_init)
        #logw0 = pm.Uniform("logw0", lower=-50.0, upper=20.0, testval=logw0_init)
        
        # The parameters for the transit mean function
        t0 = pm.Uniform("t0", lower=t[0], upper=t[-1], testval=t0_init)
        r = pm.Uniform("r", lower=0.0, upper=1.0, testval=r_init)
        d = pm.Uniform("d", lower=0.0, upper=10.0, testval=d_init)
        tin = pm.Uniform("tin", lower=0.0, upper=10.0, testval=tin_init)
            
        # Deterministics
        # mean = pm.Deterministic("mean", utils.transit(t, t0, r, d, tin))
        transit = utils.theano_transit(t, t0, r, d, tin)

        # Set up the Gaussian Process model
        kernel = xo.gp.terms.SHOTerm(
            log_S0 = logS0_init,
            log_w0 = logw0_init,
            log_Q=logQ_init
        )
    
        diag = np.exp(2*logsig_init)*tt.ones((1, len(t)))
        gp = GP(kernel, t, diag, J=2)

        # Compute the Gaussian Process likelihood and add it into the
        # the PyMC3 model as a "potential"
        pm.Potential("loglike", gp.log_likelihood(data - transit))

        # Compute the mean model prediction for plotting purposes
        #pm.Deterministic("mu", gp.predict())
        map_soln = xo.optimize(start=model.test_point, verbose=False)
        
    with model:
        map_soln = xo.optimize(start=model.test_point)
        
    with model:
        trace = pm.sample(
            tune=500,
            draws=500,
            start=map_soln,
            cores=2,
            chains=2,
            step=xo.get_dense_nuts_step(target_accept=0.9),
        )
    return trace
示例#2
0
def run_optimizer(pm_model, return_logl=False):
    with pm_model:
        map_params = xo.optimize(start=pm_model.test_point, vars=pm_model.m_b)
        map_params = xo.optimize(start=map_params, vars=[pm_model.ln_delta_t0])
        map_params = xo.optimize(
            start=map_params,
            vars=[pm_model.ln_delta_t0, pm_model.ln_A0, pm_model.ln_tE],
        )
        map_params, info = xo.optimize(start=map_params, return_info=True)

    if return_logl == True:
        return map_params, info.fun
    else:
        return map_params
示例#3
0
    def granulation_model(self):
        peak = self.period_prior()
        x = self.lc.lcf.time
        y = self.lc.lcf.flux
        yerr = self.lc.lcf.flux_err
        with pm.Model() as model:
            # The mean flux of the time series
            mean = pm.Normal("mean", mu=0.0, sd=10.0)

            # A jitter term describing excess white noise
            logs2 = pm.Normal("logs2", mu=2*np.log(np.min(sigmaclip(yerr)[0])), sd=1.0)

            logw0 = pm.Bound(pm.Normal, lower=-0.5, upper=np.log(2 * np.pi / self.min_period))("logw0", mu=0.0, sd=5)
            logSw4 = pm.Normal("logSw4", mu=np.log(np.var(y)), sd=5)
            kernel = xo.gp.terms.SHOTerm(log_Sw4=logSw4, log_w0=logw0, Q=1 / np.sqrt(2))

            #GP model
            gp = xo.gp.GP(kernel, x, yerr**2 + tt.exp(logs2))

            # Compute the Gaussian Process likelihood and add it into the
    	    # the PyMC3 model as a "potential"
            pm.Potential("loglike", gp.log_likelihood(y - mean))

    	    # Compute the mean model prediction for plotting purposes
            pm.Deterministic("pred", gp.predict())

    	    # Optimize to find the maximum a posteriori parameters
            map_soln = xo.optimize(start=model.test_point)
        return model, map_soln
示例#4
0
    def singleband_gp(self, lower=5, upper=50, seed=42):
        # x, y, yerr = make_data_nice(x, y, yerr)
        np.random.seed(seed)

        with pm.Model() as model:

            # The mean flux of the time series
            mean = pm.Normal("mean", mu=0.0, sd=10.0)

            # A jitter term describing excess white noise
            logs2 = pm.Normal("logs2",
                              mu=2 * np.log(np.mean(self.yerr)),
                              sd=2.0)

            # A term to describe the non-periodic variability
            logSw4 = pm.Normal("logSw4", mu=np.log(np.var(self.y)), sd=5.0)
            logw0 = pm.Normal("logw0", mu=np.log(2 * np.pi / 10), sd=5.0)

            # The parameters of the RotationTerm kernel
            logamp = pm.Normal("logamp", mu=np.log(np.var(self.y)), sd=5.0)
            BoundedNormal = pm.Bound(pm.Normal,
                                     lower=np.log(lower),
                                     upper=np.log(upper))
            logperiod = BoundedNormal("logperiod",
                                      mu=np.log(self.init_period),
                                      sd=5.0)
            logQ0 = pm.Normal("logQ0", mu=1.0, sd=10.0)
            logdeltaQ = pm.Normal("logdeltaQ", mu=2.0, sd=10.0)
            mix = xo.distributions.UnitUniform("mix")

            # Track the period as a deterministic
            period = pm.Deterministic("period", tt.exp(logperiod))

            # Set up the Gaussian Process model
            kernel = xo.gp.terms.SHOTerm(log_Sw4=logSw4,
                                         log_w0=logw0,
                                         Q=1 / np.sqrt(2))
            kernel += xo.gp.terms.RotationTerm(log_amp=logamp,
                                               period=period,
                                               log_Q0=logQ0,
                                               log_deltaQ=logdeltaQ,
                                               mix=mix)
            gp = xo.gp.GP(kernel,
                          self.x,
                          self.yerr**2 + tt.exp(logs2),
                          mean=mean)

            # Compute the Gaussian Process likelihood and add it into the
            # the PyMC3 model as a "potential"
            gp.marginal("gp", observed=self.y)

            # Compute the mean model prediction for plotting purposes
            pm.Deterministic("pred", gp.predict())

            # Optimize to find the maximum a posteriori parameters
            map_soln = xo.optimize(start=model.test_point)

        self.model = model
        self.map_soln = model
        return map_soln, model
示例#5
0
def run_pymc3_model(pos, pos_err, proper, proper_err, mean, cov):

    M = get_tangent_basis(pos[0] * 2 * np.pi / 360, pos[1] * 2 * np.pi / 360)
    # mean, cov = get_prior()

    with pm.Model() as model:

        vxyzD = pm.MvNormal("vxyzD", mu=mean, cov=cov, shape=4)
        vxyz = pm.Deterministic("vxyz", vxyzD[:3])
        log_D = pm.Deterministic("log_D", vxyzD[3])
        D = pm.Deterministic("D", tt.exp(log_D))

        xyz = pm.Deterministic("xyz", tt_eqtogal(pos[0], pos[1], D)[:, 0])

        pm_from_v, rv_from_v = tt_get_icrs_from_galactocentric(
            xyz, vxyz, pos[0], pos[1], D, M)

        pm.Normal("proper_motion",
                  mu=pm_from_v,
                  sigma=np.array(proper_err),
                  observed=np.array(proper))
        pm.Normal("parallax", mu=1. / D, sigma=pos_err[2], observed=pos[2])

        map_soln = xo.optimize()
        trace = pm.sample(tune=1500,
                          draws=1000,
                          start=map_soln,
                          step=xo.get_dense_nuts_step(target_accept=0.9))

    return trace
示例#6
0
    def fit_ttv(self, n, run_MCMC=False, ttv_start=None, verbose=True):
        """
        Fit a single transit with a transit timing variation, using the shape given by best-fit orbital parameters.
        """
        if verbose: print("Fitting ttv for transit number", n)

        # Get the transit lightcurve
        transit = self.lightcurve.get_transit(n, self.p_ref, self.t0_ref)
        t = transit.time * self.p_ref
        y = transit.flux
        sd = transit.flux_err

        if ttv_start is None:
            ttv_start = np.median(self.pars['ttvs'])

        with pm.Model() as model:
            ttv = pm.Normal("ttv", mu=ttv_start, sd=0.025)  # sd = 36 minutes

            orbit = xo.orbits.KeplerianOrbit(period=self.p_ref,
                                             t0=ttv,
                                             b=self.pars['b'])

            light_curves = xo.LimbDarkLightCurve(
                self.pars['u']).get_light_curve(orbit=orbit,
                                                r=self.pars['r'],
                                                t=t)
            light_curve = pm.math.sum(light_curves, axis=-1) + 1
            pm.Deterministic("transit_" + str(n), light_curve)

            pm.Normal("obs", mu=light_curve, sd=sd, observed=y)

            map_soln = xo.optimize(start=model.test_point,
                                   verbose=False,
                                   progress_bar=False)

        self.pars['ttvs'][n] = float(map_soln['ttv'])
        if verbose: print(f"\t ttv {n} = {self.pars['ttvs'][n]}")

        if run_MCMC:
            np.random.seed(42)
            with model:
                trace = pm.sample(
                    tune=500,
                    draws=500,
                    start=map_soln,
                    cores=1,
                    chains=2,
                    step=xo.get_dense_nuts_step(target_accept=0.9),
                )

            self.pars['ttvs'][n] = np.median(trace['ttv'])
            self.pars['e_ttvs'][n] = self.pars['ttvs'][n] - np.percentile(
                trace['ttv'], 16, axis=0)
            self.pars['E_ttvs'][n] = -self.pars['ttvs'][n] + np.percentile(
                trace['ttv'], 84, axis=0)

            if verbose:
                print(
                    f"\t ttv {n} = {self.pars['ttvs'][n]} /+ {self.pars['E_ttvs'][n]} /- {self.pars['e_ttvs'][n]}"
                )
示例#7
0
文件: utils.py 项目: megbedell/pmodes
def gp_fit(t, y, yerr, t_grid, integrated=False, exp_time=60.):
    # optimize kernel hyperparameters and return fit + predictions
    with pm.Model() as model:
        logS0 = pm.Normal("logS0", mu=0.4, sd=5.0, testval=np.log(np.var(y)))
        logw0 = pm.Normal("logw0", mu=-3.9, sd=0.1)
        logQ = pm.Normal("logQ", mu=3.5, sd=5.0)

        # Set up the kernel and GP
        kernel = terms.SHOTerm(log_S0=logS0, log_w0=logw0, log_Q=logQ)
        if integrated:
            kernel_int = terms.IntegratedTerm(kernel, exp_time)
            gp = GP(kernel_int, t, yerr**2)
        else:
            gp = GP(kernel, t, yerr**2)

        # Add a custom "potential" (log probability function) with the GP likelihood
        pm.Potential("gp", gp.log_likelihood(y))

    with model:
        map_soln = xo.optimize(start=model.test_point)
        mu, var = xo.eval_in_model(gp.predict(t_grid, return_var=True),
                                   map_soln)
        sd = np.sqrt(var)
        y_pred = xo.eval_in_model(gp.predict(t), map_soln)

    return map_soln, mu, sd, y_pred
def get_map_soln(model, verbose=False, ignore_warnings=True):
    """
    Get the maximum a posteriori probability estimate of the parameters.

    Parameters
    ----------
    model : `~pymc3.model`
        The model object.

    verbose : bool, optional
        Print details of optimization.

    ignore_warnings : bool, optional
        Silence warnings.

    Returns
    -------
    map_soln : dict
        A dictionary with the maximum a posteriori estimates of the variables.
    """
    # Ignore warnings from theano, unless specified elsewise
    if ignore_warnings:
        warnings.filterwarnings(action='ignore',
                                category=FutureWarning,
                                module='theano')

    with model:
        # Fit for the maximum a posteriori parameters
        map_soln = xo.optimize(start=model.test_point, verbose=verbose)
        map_soln = xo.optimize(
            start=map_soln,
            vars=[model.f0, model.period, model.t0, model.r],
            verbose=verbose)
        map_soln = xo.optimize(start=map_soln,
                               vars=model.rho_star,
                               verbose=verbose)
        map_soln = xo.optimize(start=map_soln, vars=model.t14, verbose=verbose)
        map_soln = xo.optimize(start=map_soln, verbose=verbose)

    # Reset warning filter
    warnings.resetwarnings()

    return map_soln
示例#9
0
def loglike(t0):
    print("t0={0}".format(t0))
    h = copy.deepcopy(holds)
    h['t0m'] = t0
    m = getmodel(holds=h, mu=mu, sig=sig)
    with m:
        newmap_soln = xo.optimize(start=start, verbose=False)
        ll = m.logp(newmap_soln)
        #r[i] = np.exp(newmap_soln['logrm'])
    return ll
示例#10
0
    def find_optimum(self):
        """
        Optimize to find the MAP solution.

        Returns:
            map_soln (dict): a dictionary containing the optimized parameters.
        """

        with self.model:
            map_soln = xo.optimize(start=self.model.test_point)

        self.map_soln = map_soln
        success = True
        if self.map_soln["step1"] == self.steps[0]:
            sucess = False
        return map_soln, success
示例#11
0
    def rotation_model(self):
        peak = self.period_prior()
        x = self.lc.lcf.time
        y = self.lc.lcf.flux
        yerr = self.lc.lcf.flux_err
        with pm.Model() as model:
            # The mean flux of the time series
            mean = pm.Normal("mean", mu=0.0, sd=10.0)

            # A jitter term describing excess white noise
            logs2 = pm.Normal("logs2",
                              mu=2 * np.log(np.min(sigmaclip(yerr)[0])),
                              sd=1.0)

            # The parameters of the RotationTerm kernel
            logamp = pm.Normal("logamp", mu=np.log(np.var(y)), sd=5.0)
            logperiod = pm.Bound(pm.Normal,
                                 lower=np.log(self.min_period),
                                 upper=np.log(self.max_period))(
                                     "logperiod",
                                     mu=np.log(peak["period"]),
                                     sd=2.0)
            logQ0 = pm.Uniform("logQ0", lower=-15, upper=5)
            logdeltaQ = pm.Uniform("logdeltaQ", lower=-15, upper=5)
            mix = pm.Uniform("mix", lower=0, upper=1.0)

            # Track the period as a deterministic
            period = pm.Deterministic("period", tt.exp(logperiod))

            kernel = xo.gp.terms.RotationTerm(log_amp=logamp,
                                              period=period,
                                              log_Q0=logQ0,
                                              log_deltaQ=logdeltaQ,
                                              mix=mix)
            gp = xo.gp.GP(kernel, x, yerr**2 + tt.exp(logs2), J=4)

            # Compute the Gaussian Process likelihood and add it into the
            # the PyMC3 model as a "potential"
            pm.Potential("loglike", gp.log_likelihood(y - mean))

            # Compute the mean model prediction for plotting purposes
            pm.Deterministic("pred", gp.predict())

            # Optimize to find the maximum a posteriori parameters
            map_soln = xo.optimize(start=model.test_point)
        return model, map_soln
示例#12
0
文件: test.py 项目: tagordon/k2rot_v2
                period=period,
                log_Q0=logQ0,
                log_deltaQ=logdQ,
                mix=mix
            )
            kernel += xo.gp.terms.SHOTerm(
                log_S0 = logS0,
                log_w0 = logw,
                log_Q = -np.log(np.sqrt(2))
            )                  
                      
            gp = xo.gp.GP(kernel, x, yerr**2 * np.ones_like(x), mean=mean, J=6)
            gp.marginal("gp", observed = y)

            start = model.test_point
            map_soln = xo.optimize(start=start, verbose=True)
            trace = pm.sample(
                tune=1000,
                draws=500,
                start=start,
                cores=28,
                chains=28,
                step=xo.get_dense_nuts_step(target_accept=0.9),
		progressbar=False
            )
            
        plotting.cornerplot(lc, trace, 'EPIC', smooth=True, truth_color=red);
        pl.savefig("{0}/{1}".format(outdir, cornerfile), dpi=200)
        
        acf_kwargs = {'color': 'k', 'linewidth': 3}
        pk_kwargs = {'color': red, 'linewidth': 1, 'linestyle': '--'}
def worker(task):
    (i1, i2), data, model_kw, basename = task

    g = GaiaData(data)

    cache_filename = os.path.abspath(f'../cache/tmp-{basename}_{i1}-{i2}.fits')
    if os.path.exists(cache_filename):
        print(f"({pid}) cache filename exists for index range: "
              f"{cache_filename}")
        return cache_filename

    print(f"({pid}) setting up model")
    helper = ComovingHelper(g)

    niter = 0
    while niter < 10:
        try:
            model = helper.get_model(**model_kw)
            break
        except OSError:
            print(f"{pid} failed to compile - trying again in 2sec...")
            time.sleep(5)
            niter += 1
            continue
    else:
        print(f"{pid} never successfully compiled. aborting")
        import socket
        print(socket.gethostname(), socket.getfqdn(),
              os.path.exists("/cm/shared/sw/pkg/devel/gcc/7.4.0/bin/g++"))
        return ''

    print(f"({pid}) done init model - running {len(g)} stars")

    probs = np.full(helper.N, np.nan)
    for n in range(helper.N):
        with model:
            pm.set_data({
                'y': helper.ys[n],
                'Cinv': helper.Cinvs[n],
                'M': helper.Ms[n]
            })

            test_pt = {
                'vxyz': helper.test_vxyz[n],
                'r': helper.test_r[n],
                'w': np.array([0.5, 0.5])
            }
            try:
                print("starting optimize")
                res = xo.optimize(start=test_pt,
                                  progress_bar=False,
                                  verbose=False)

                print("done optimize - starting sample")
                trace = pm.sample(
                    start=res,
                    tune=2000,
                    draws=1000,
                    cores=1,
                    chains=1,
                    step=xo.get_dense_nuts_step(target_accept=0.95),
                    progressbar=False)
            except Exception as e:
                print(str(e))
                continue

            # print("done sample - computing prob")
            ll_fg = trace.get_values(model.group_logp)
            ll_bg = trace.get_values(model.field_logp)
            post_prob = np.exp(ll_fg - np.logaddexp(ll_fg, ll_bg))
            probs[n] = post_prob.sum() / len(post_prob)

    # write probs to cache filename
    tbl = at.Table()
    tbl['source_id'] = g.source_id
    tbl['prob'] = probs
    tbl.write(cache_filename)

    return cache_filename
示例#14
0
        def run_fitting():
            times = self.light_curve_data_source.data['Time (BTJD)'].astype(
                np.float32)
            flux_errors = self.light_curve_data_source.data[
                'Normalized PDCSAP flux error']
            fluxes = self.light_curve_data_source.data[
                'Normalized PDCSAP flux']
            relative_times = self.light_curve_data_source.data['Time (days)']
            nan_indexes = np.union1d(
                np.argwhere(np.isnan(fluxes)),
                np.union1d(np.argwhere(np.isnan(times)),
                           np.argwhere(np.isnan(flux_errors))))
            fluxes = np.delete(fluxes, nan_indexes)
            flux_errors = np.delete(flux_errors, nan_indexes)
            times = np.delete(times, nan_indexes)
            relative_times = np.delete(relative_times, nan_indexes)
            with pm.Model() as model:
                # Stellar parameters
                mean = pm.Normal("mean", mu=0.0, sigma=10.0 * 1e-3)
                u = xo.distributions.QuadLimbDark("u")
                star_params = [mean, u]

                # Gaussian process noise model
                sigma = pm.InverseGamma("sigma",
                                        alpha=3.0,
                                        beta=2 * np.nanmedian(flux_errors))
                log_Sw4 = pm.Normal("log_Sw4", mu=0.0, sigma=10.0)
                log_w0 = pm.Normal("log_w0",
                                   mu=np.log(2 * np.pi / 10.0),
                                   sigma=10.0)
                kernel = xo.gp.terms.SHOTerm(log_Sw4=log_Sw4,
                                             log_w0=log_w0,
                                             Q=1.0 / 3)
                noise_params = [sigma, log_Sw4, log_w0]

                # Planet parameters
                log_ror = pm.Normal("log_ror",
                                    mu=0.5 * np.log(self_.depth),
                                    sigma=10.0 * 1e-3)
                ror = pm.Deterministic("ror", tt.exp(log_ror))
                depth = pm.Deterministic('Transit depth (relative flux)',
                                         tt.square(ror))
                planet_radius = pm.Deterministic('Planet radius (solar radii)',
                                                 ror * self_.star_radius)

                # Orbital parameters
                log_period = pm.Normal("log_period",
                                       mu=np.log(self_.period),
                                       sigma=1.0)
                t0 = pm.Normal('Transit epoch (BTJD)',
                               mu=self_.transit_epoch,
                               sigma=1.0)
                log_dur = pm.Normal("log_dur", mu=np.log(0.1), sigma=10.0)
                b = xo.distributions.ImpactParameter("b", ror=ror)

                period = pm.Deterministic('Transit period (days)',
                                          tt.exp(log_period))
                dur = pm.Deterministic('Transit duration (days)',
                                       tt.exp(log_dur))

                # Set up the orbit
                orbit = xo.orbits.KeplerianOrbit(period=period,
                                                 duration=dur,
                                                 t0=t0,
                                                 b=b,
                                                 r_star=self.star_radius)

                # We're going to track the implied density for reasons that will become clear later
                pm.Deterministic("rho_circ", orbit.rho_star)

                # Set up the mean transit model
                star = xo.LimbDarkLightCurve(u)

                def lc_model(t):
                    return mean + tt.sum(star.get_light_curve(
                        orbit=orbit, r=ror * self.star_radius, t=t),
                                         axis=-1)

                # Finally the GP observation model
                gp = xo.gp.GP(kernel,
                              times, (flux_errors**2) + (sigma**2),
                              mean=lc_model)
                gp.marginal("obs", observed=fluxes)

                # Double check that everything looks good - we shouldn't see any NaNs!
                print(model.check_test_point())

                # Optimize the model
                map_soln = model.test_point
                map_soln = xo.optimize(map_soln, [sigma])
                map_soln = xo.optimize(map_soln, [log_ror, b, log_dur])
                map_soln = xo.optimize(map_soln, noise_params)
                map_soln = xo.optimize(map_soln, star_params)
                map_soln = xo.optimize(map_soln)

            with model:
                gp_pred, lc_pred = xo.eval_in_model(
                    [gp.predict(), lc_model(times)], map_soln)

            x_fold = (times - map_soln['Transit epoch (BTJD)'] +
                      0.5 * map_soln['Transit period (days)']
                      ) % map_soln['Transit period (days)'] - 0.5 * map_soln[
                          'Transit period (days)']
            inds = np.argsort(x_fold)
            bokeh_document.add_next_tick_callback(
                partial(update_initial_fit_figure, fluxes, gp_pred, inds,
                        lc_pred, map_soln, relative_times, times, x_fold))

            self.bokeh_document.add_next_tick_callback(
                partial(fit, self, map_soln, model))
示例#15
0
    plt.plot(t, xo.eval_in_model(model.bkg_pred), ":k", alpha=0.5)
    plt.plot(t, xo.eval_in_model(model.rv_model_pred), label="model")

plt.legend(fontsize=10)
plt.xlim(t.min(), t.max())
plt.xlabel("time [days]")
plt.ylabel("radial velocity [m/s]")
_ = plt.title("initial model")
# -

# In this plot, the background is the dotted line, the individual planets are the dashed lines, and the full model is the blue line.
#
# It doesn't look amazing so let's fit for the maximum a posterior parameters.

with model:
    map_soln = xo.optimize(start=model.test_point, vars=[trend])
    map_soln = xo.optimize(start=map_soln)

# +
plt.errorbar(x, y, yerr=yerr, fmt=".k")
plt.plot(t, map_soln["vrad_pred"], "--k", alpha=0.5)
plt.plot(t, map_soln["bkg_pred"], ":k", alpha=0.5)
plt.plot(t, map_soln["rv_model_pred"], label="model")

plt.legend(fontsize=10)
plt.xlim(t.min(), t.max())
plt.xlabel("time [days]")
plt.ylabel("radial velocity [m/s]")
_ = plt.title("MAP model")
# -
示例#16
0
def spectrum_theano_model(obs):
    """ Build a spectrum estimate, old theano model """
    dat = (obs.data / obs.basic_model)
    dat /= obs.vsr_grad_model
    err = (obs.error / obs.basic_model)
    err /= obs.vsr_grad_model

    y = (dat).ravel()
    ye = (err).ravel()
    ye[obs.cosmic_rays.ravel()] = 1e10
    ye[ye / y > 0.01] = 1e10
    ye[ye < 1e-4] = 1e10

    in_transit = obs.in_transit

    xshift = obs.xshift
    xshift -= np.mean(xshift)
    xshift /= (np.max(xshift) - np.min(xshift))
    xshift = np.atleast_3d(xshift).transpose([1, 0, 2]) * np.ones(
        obs.data.shape)

    # This will help us mask the transit later.
    cadence_mask = (np.atleast_3d(~in_transit).transpose([1, 0, 2]) *
                    np.ones(obs.data.shape, bool)).ravel()

    with pm.Model() as model:
        # Track each dimension in theano tensors
        X_t = tt.as_tensor(obs.X[~in_transit]).flatten()
        Y_t = tt.as_tensor(obs.Y[~in_transit]).flatten()
        T_t = tt.as_tensor(obs.T[~in_transit]).flatten()
        xshift_t = tt.as_tensor(xshift[~in_transit]).flatten()
        g_spat_t = tt.as_tensor(
            (np.atleast_3d(obs.spec_grad_simple).transpose([0, 2, 1]) *
             np.ones(obs.data.shape))[~in_transit]).flatten()

        # Stellar spectrum gradient
        sg = pm.Normal("spectrum_gradient",
                       mu=obs.spec_grad_simple,
                       sd=np.ones(obs.data.shape[2]) * 1,
                       testval=obs.spec_grad_simple,
                       shape=obs.data.shape[2])

        # Reshaping to the correct dimensions of the image
        sg_3d = sg + tt.zeros(obs.data[~in_transit].shape)
        sg_flat = sg_3d.flatten()

        # Design matrix with all the shifts and tilts
        A = tt.stack([

            # Zeropoints and trends
            Y_t,
            T_t,
            Y_t * X_t,
            Y_t * X_t * T_t,

            # Spectrum tilts
            sg_flat,
            sg_flat * Y_t,
            sg_flat * X_t * Y_t,

            # Spectrum shifts
            sg_flat * xshift_t,
            sg_flat * xshift_t**2,
            sg_flat * xshift_t * X_t,

            # Spectrum stretches
            tt.abs_(sg_flat) * Y_t,
            tt.abs_(sg_flat) * xshift_t * X_t,
            tt.ones_like(sg_flat),
        ]).T

        # Linear algebra to find the best fitting shifts
        sigma_w_inv = A.T.dot(A / ye[cadence_mask, None]**2)
        B = A.T.dot((y[cadence_mask] / ye[cadence_mask]**2))
        w = pm.Deterministic('w', tt.slinalg.solve(sigma_w_inv, B))
        y_model = A.dot(w)

        pm.Normal("obs",
                  mu=y_model,
                  sd=ye[cadence_mask],
                  observed=y[cadence_mask])
        # Optimize to find the best fitting spectral gradient
        map_soln = xo.optimize(start=model.test_point, vars=[sg])

    sg_n = (np.atleast_3d(map_soln['spectrum_gradient']) *
            np.ones(dat.shape).transpose([0, 2, 1])).transpose([0, 2, 1])
    A = np.vstack([
        obs.Y.ravel(),
        obs.T.ravel(),
        obs.Y.ravel() * obs.X.ravel(),
        obs.Y.ravel() * obs.X.ravel() * obs.T.ravel(),
        sg_n.ravel(), (sg_n * obs.Y).ravel(), (sg_n * obs.X * obs.Y).ravel(),
        (sg_n * xshift).ravel(), (sg_n * xshift**2).ravel(),
        (sg_n * xshift * obs.Y).ravel(), (np.abs(sg_n) * obs.Y).ravel(),
        (np.abs(sg_n) * obs.Y * xshift).ravel(),
        np.ones(np.product(dat.shape))
    ]).T
    model_n = A.dot(map_soln['w']).reshape(obs.data.shape)
    return model_n
示例#17
0
    def hybrid_model(self):
        def submodel1(x, y, yerr, parent):
            with pm.Model(name="rotation_", model=parent) as submodel:
                # The mean flux of the time series
                mean1 = pm.Normal("mean1", mu=0.0, sd=10.0)

                # A jitter term describing excess white noise
                logs21 = pm.Normal("logs21", mu=np.log(np.mean(yerr)), sd=2.0)

                # The parameters of the RotationTerm kernel
                logamp = pm.Normal("logamp", mu=np.log(np.var(y)), sd=5.0)
                #logperiod = pm.Uniform("logperiod", lower=np.log(vgp.min_period), upper=np.log(vgp.max_period))
                logperiod = pm.Bound(pm.Normal,
                                     lower=np.log(self.min_period),
                                     upper=np.log(self.max_period))(
                                         "logperiod",
                                         mu=np.log(peak["period"]),
                                         sd=1.0)
                logQ0 = pm.Uniform("logQ0", lower=-15, upper=5)
                logdeltaQ = pm.Uniform("logdeltaQ", lower=-15, upper=5)
                mix = pm.Uniform("mix", lower=0, upper=1.0)

                # Track the period as a deterministic
                period = pm.Deterministic("period", tt.exp(logperiod))

                kernel1 = xo.gp.terms.RotationTerm(log_amp=logamp,
                                                   period=period,
                                                   log_Q0=logQ0,
                                                   log_deltaQ=logdeltaQ,
                                                   mix=mix)

                gp1 = xo.gp.GP(kernel1, x, yerr**2 + tt.exp(logs21))

                # Compute the Gaussian Process likelihood and add it into the
                # the PyMC3 model as a "potential"
                loglike1 = gp1.log_likelihood(y - mean1)
                #pred1  = pm.Deterministic("pred1", gp1.predict())

            return logperiod, logQ0, gp1, loglike1

        def submodel2(x, y, yerr, parent):
            with pm.Model(name="granulation", model=parent) as submodel:
                # The parameters of SHOTerm kernel for non-periodicity granulation
                mean2 = pm.Normal("mean2", mu=0.0, sd=10.0)
                #logz = pm.Uniform("logz", lower=np.log(2 * np.pi / 4), upper=np.log(2*np.pi/vgp.min_period))
                #sigma = pm.HalfCauchy("sigma", 3.0)
                #logw0 = pm.Normal("logw0", mu=logz, sd=2.0)
                logw0 = pm.Bound(pm.Normal,
                                 lower=np.log(2 * np.pi / 2.5),
                                 upper=np.log(2 * np.pi / self.min_period))(
                                     "logw0", mu=np.log(2 * np.pi / 0.8), sd=1)
                logSw4 = pm.Normal("logSw4",
                                   mu=np.log(np.var(y) * (2 * np.pi / 2)**4),
                                   sd=5)
                logs22 = pm.Normal("logs22", mu=np.log(np.mean(yerr)), sd=2.0)
                logQ = pm.Bound(pm.Normal,
                                lower=np.log(1 / 2),
                                upper=np.log(2))("logQ",
                                                 mu=np.log(1 / np.sqrt(2)),
                                                 sd=1)

                kernel2 = xo.gp.terms.SHOTerm(log_Sw4=logSw4,
                                              log_w0=logw0,
                                              log_Q=logQ)
                gp2 = xo.gp.GP(kernel2, x, yerr**2 + tt.exp(logs22))

                loglike2 = gp2.log_likelihood(y - mean2)

            return logw0, logQ, gp2, loglike2

        peak = self.period_prior()
        x = self.lc.lcf.time
        y = self.lc.lcf.flux
        yerr = self.lc.lcf.flux_err

        y1_old = self.lc.lcf.flatten(window_length=self.window_width,
                                     return_trend=True)[1].flux
        y2_old = y - y1_old
        y2 = sigmaclip(y2_old)[0]
        idx = np.in1d(y2_old, y2)
        self.y_new = y[idx]
        self.yerr_new = yerr[idx]
        self.x_new = x[idx]

        y1_old_ave = np.nanmean(y1_old[idx])
        #y1_old_std = np.nanstd(y1_old[idx])
        self.y1 = (y1_old[idx] - y1_old_ave) * 1e6
        self.y1_err = window_rms(yerr, self.window_width)[idx] * 1e6
        #y1 = y1[idx]

        y2_old = self.y_new - y1_old[idx]
        y2_old_ave = np.nanmean(y2_old)
        #y2_old_std = np.nanstd(y2_old)
        self.y2 = (y2_old - y2_old_ave) * 1e6  #/y2_old_ave
        y2_err = np.sqrt(self.yerr_new**2 -
                         window_rms(yerr, self.window_width)[idx]**2)
        y2_err[np.isnan(y2_err)] = 0
        self.y2_err = y2_err * 1e6  #/ y2_old_ave
        #y2 = self.lc.lcf.flatten(window_length=401, return_trend=False).flux

        y_class = [self.y1, self.y2]
        yerr_class = [self.y1_err, self.y2_err]
        submodel_class = [submodel1, submodel2]
        with pm.Model() as model:
            gp_class = []
            loglikes = []
            logtaus = []
            logQs = []
            for i in range(1, 3):
                logtau, log_Q, gp, loglike = submodel_class[i - 1](
                    self.x_new, y_class[i - 1], yerr_class[i - 1], model)
                gp_class.append(gp)
                loglikes.append(loglike)
                logtaus.append(logtau)
                logQs.append(log_Q)
            #loglikes = tt.stack(loglikes)
            #pm.Potential("loglike", pm.math.logsumexp(loglikes))
            pm.Potential("loglike_rot", loglikes[0])
            pm.Potential("loglike_gra", loglikes[1])
            predrot = pm.Deterministic(
                "pred_rot", gp_class[0].predict() / 1e6 + y1_old_ave)
            predgra = pm.Deterministic(
                "pred_gra", gp_class[1].predict() / 1e6 + y2_old_ave)
            predtot = pm.Deterministic(
                "pred_tot", gp_class[0].predict() / 1e6 + y1_old_ave +
                gp_class[1].predict() / 1e6 + y2_old_ave)
            # Optimize to find the maximum a posteriori parameters

            map_soln = xo.optimize(start=model.test_point,
                                   vars=[logtaus[0], logQs[0]])
            map_soln = xo.optimize(start=model.test_point,
                                   vars=[logtaus[1], logQs[1]])
            map_soln = xo.optimize(start=model.test_point)
        return model, map_soln
示例#18
0
import pymc3 as pm
import exoplanet as xo
import os

import src.close.rv_astro_more.model as m

with m.model:
    map_sol = xo.optimize(vars=[m.a_ang, m.MAb, m.incl])
    print(map_sol)
    map_sol1 = xo.optimize(start=map_sol, vars=[m.incl, m.Omega])
    print(map_sol1)

with m.model:
    trace = pm.sample(
        tune=2500,
        draws=3000,
        # start=map_sol,
        chains=4,
        step=xo.get_dense_nuts_step(target_accept=0.9),
    )

chaindir = "chains/close/rv_astro_more/"

if not os.path.isdir(chaindir):
    os.makedirs(chaindir)

# save the samples as a pymc3 object
pm.save_trace(trace, directory=chaindir, overwrite=True)

# and as a CSV, just in case the model spec
# changes and we have trouble reloading things
示例#19
0
    def gp_rotation(self,
                    init_period=None,
                    tune=2000,
                    draws=2000,
                    prediction=True,
                    cores=None):
        """
        Calculate a rotation period using a Gaussian process method.

        Args:
            init_period (Optional[float]): Your initial guess for the rotation
                period. The default is the Lomb-Scargle period.
            tune (Optional[int]): The number of tuning samples. Default is
                2000.
            draws (Optional[int]): The number of samples. Default is 2000.
            prediction (Optional[Bool]): If true, a prediction will be
                calculated for each sample. This is useful for plotting the
                prediction but will slow down the whole calculation.
            cores (Optional[int]): The number of cores to use. Default is
                None (for running one process).

        Returns:
            gp_period (float): The GP rotation period in days.
            errp (float): The upper uncertainty on the rotation period.
            errm (float): The lower uncertainty on the rotation period.
            logQ (float): The Q factor.
            Qerrp (float): The upper uncertainty on the Q factor.
            Qerrm (float): The lower uncertainty on the Q factor.
        """
        self.prediction = prediction

        x = np.array(self.time, dtype=float)
        # Median of data must be zero
        y = np.array(self.flux, dtype=float) - np.median(self.flux)
        yerr = np.array(self.flux_err, dtype=float)

        if init_period is None:
            # Calculate ls period
            init_period = self.ls_rotation()

        with pm.Model() as model:

            # The mean flux of the time series
            mean = pm.Normal("mean", mu=0.0, sd=10.0)

            # A jitter term describing excess white noise
            logs2 = pm.Normal("logs2", mu=2 * np.log(np.min(yerr)), sd=5.0)

            # The parameters of the RotationTerm kernel
            logamp = pm.Normal("logamp", mu=np.log(np.var(y)), sd=5.0)
            logperiod = pm.Normal("logperiod", mu=np.log(init_period), sd=5.0)
            logQ0 = pm.Normal("logQ0", mu=1.0, sd=10.0)
            logdeltaQ = pm.Normal("logdeltaQ", mu=2.0, sd=10.0)
            mix = pm.Uniform("mix", lower=0, upper=1.0)

            # Track the period as a deterministic
            period = pm.Deterministic("period", tt.exp(logperiod))

            # Set up the Gaussian Process model
            kernel = xo.gp.terms.RotationTerm(log_amp=logamp,
                                              period=period,
                                              log_Q0=logQ0,
                                              log_deltaQ=logdeltaQ,
                                              mix=mix)
            gp = xo.gp.GP(kernel, x, yerr**2 + tt.exp(logs2), J=4)

            # Compute the Gaussian Process likelihood and add it into the
            # the PyMC3 model as a "potential"
            pm.Potential("loglike", gp.log_likelihood(y - mean))

            # Compute the mean model prediction for plotting purposes
            if prediction:
                pm.Deterministic("pred", gp.predict())

            # Optimize to find the maximum a posteriori parameters
            self.map_soln = xo.optimize(start=model.test_point)
            # print(self.map_soln)
            # print(xo.utils.eval_in_model(model.logpt, self.map_soln))
            # assert 0

            # Sample from the posterior
            np.random.seed(42)
            sampler = xo.PyMC3Sampler()
            with model:
                print("sampling...")
                sampler.tune(tune=tune,
                             start=self.map_soln,
                             step_kwargs=dict(target_accept=0.9),
                             cores=cores)
                trace = sampler.sample(draws=draws, cores=cores)

            # Save samples
            samples = pm.trace_to_dataframe(trace)
            self.samples = samples

            self.period_samples = trace["period"]
            self.gp_period = np.median(self.period_samples)
            lower = np.percentile(self.period_samples, 16)
            upper = np.percentile(self.period_samples, 84)
            self.errm = self.gp_period - lower
            self.errp = upper - self.gp_period
            self.logQ = np.median(trace["logQ0"])
            upperQ = np.percentile(trace["logQ0"], 84)
            lowerQ = np.percentile(trace["logQ0"], 16)
            self.Qerrp = upperQ - self.logQ
            self.Qerrm = self.logQ - lowerQ

        self.trace = trace

        return self.gp_period, self.errp, self.errm, self.logQ, self.Qerrp, \
            self.Qerrm
示例#20
0
    rho_save_sky = pm.Deterministic("rhoSaveSky", rho)
    theta_save_sky = pm.Deterministic("thetaSaveSky", theta)

    rho, theta = orbit.get_relative_angles(t_data, parallax)
    rho_save_data = pm.Deterministic("rhoSaveData", rho)
    theta_save_data = pm.Deterministic("thetaSaveData", theta)

    # save RV plots
    t_dense = pm.Deterministic("tDense", xs_phase * P + jd0)
    rv1_dense = pm.Deterministic(
        "RV1Dense",
        conv * orbit.get_star_velocity(t_dense - jd0)[2] + gamma_keck)
    rv2_dense = pm.Deterministic(
        "RV2Dense",
        conv * orbit.get_planet_velocity(t_dense - jd0)[2] + gamma_keck)

with model:
    map_sol0 = xo.optimize(vars=[a_ang, phi])
    map_sol1 = xo.optimize(map_sol0, vars=[a_ang, phi, omega, Omega])
    map_sol2 = xo.optimize(map_sol1,
                           vars=[a_ang, logP, phi, omega, Omega, incl, e])
    map_sol3 = xo.optimize(map_sol2)

# now let's actually explore the posterior for real
sampler = xo.PyMC3Sampler(finish=500, chains=4)
with model:
    burnin = sampler.tune(tune=2000, step_kwargs=dict(target_accept=0.9))
    trace = sampler.sample(draws=3000)

pm.backends.ndarray.save_trace(trace, directory="current", overwrite=True)
示例#21
0
    # Compute the model
    uvT = tt.reshape(tt.dot(u, vT), (N * K, 1))
    f_model = tt.reshape(ts.dot(D, uvT), (M * Kobs, ))

    # Track some values for plotting later
    pm.Deterministic("f_model", f_model)

    # Save our initial guess
    f_model_guess = xo.eval_in_model(f_model)

    # The likelihood function assuming known Gaussian uncertainty
    pm.Normal("obs", mu=f_model, sd=ferr, observed=f)

# Maximum likelihood solution
with model:
    map_soln = xo.optimize()

# Plot some stuff
fig, ax = plt.subplots(1)
ax.plot(lam, I0)
ax.plot(lam, map_soln["vT"].reshape(-1), 'o')

fig, ax = plt.subplots(M, figsize=(3, 8), sharex=True, sharey=True)
F = f.reshape(M, Kobs)
F_model = map_soln["f_model"].reshape(M, Kobs)
for m in range(M):
    ax[m].plot(lam[Kpad:-Kpad], F[m] / F[m][0])
    ax[m].plot(lam[Kpad:-Kpad], F_model[m] / F[m][0])
    ax[m].axis('off')

ntheta = 12
示例#22
0
    def build_model(mask=None, start=None):
        ''' Build a PYMC3 model

        Parameters
        ----------
        mask : np.ndarray
            Boolean array to mask cadences. Cadences that are False will be excluded
            from the model fit
        start : dict
            MAP Solution from exoplanet

        Returns
        -------
        model : pymc3.model.Model
            A pymc3 model
        map_soln : dict
            Best fit solution
        '''

        if mask is None:
            mask = np.ones(len(time), dtype=bool)

        with pm.Model() as model:

            # Parameters for the stellar properties
            mean = pm.Normal("mean", mu=0.0, sd=10.0)
            u_star = xo.distributions.QuadLimbDark("u_star")

            m_star = pm.Normal("m_star", mu=M_star[0], sd=M_star[1])
            r_star = pm.Normal("r_star", mu=R_star[0], sd=R_star[1])
            t_star = pm.Normal("t_star", mu=T_star[0], sd=T_star[1])

            # Prior to require physical parameters
            pm.Potential("m_star_prior", tt.switch(m_star > 0, 0, -np.inf))
            pm.Potential("r_star_prior", tt.switch(r_star > 0, 0, -np.inf))

            # Orbital parameters for the planets
            logP = pm.Normal("logP",
                             mu=np.log(period_value),
                             sd=0.01,
                             shape=shape)
            t0 = pm.Normal("t0", mu=t0_value, sd=0.01, shape=shape)
            b = pm.Uniform("b", lower=0, upper=1, testval=0.5, shape=shape)
            logr = pm.Normal("logr",
                             sd=1.0,
                             mu=0.5 * np.log(np.array(depth_value)) +
                             np.log(R_star[0]),
                             shape=shape)
            r_pl = pm.Deterministic("r_pl", tt.exp(logr))
            ror = pm.Deterministic("ror", r_pl / r_star)

            # Tracking planet parameters
            period = pm.Deterministic("period", tt.exp(logP))

            # Orbit model
            orbit = xo.orbits.KeplerianOrbit(r_star=r_star,
                                             m_star=m_star,
                                             period=period,
                                             t0=t0,
                                             b=b)

            incl = pm.Deterministic('incl', orbit.incl)
            a = pm.Deterministic('a', orbit.a)
            teff = pm.Deterministic('teff', t_star * tt.sqrt(0.5 * (1 / a)))

            # Compute the model light curve using starry
            light_curves = xo.StarryLightCurve(u_star).get_light_curve(
                orbit=orbit, r=r_pl, t=time[mask], texp=texp) * 1e3
            light_curve = pm.math.sum(light_curves, axis=-1) + mean
            pm.Deterministic("light_curves", light_curves)

            # GP
            # --------
            logs2 = pm.Normal("logs2",
                              mu=np.log(1e-4 * np.var(raw_flux[mask])),
                              sd=10)
            logsigma = pm.Normal("logsigma",
                                 mu=np.log(np.std(raw_flux[mask])),
                                 sd=10)
            logrho = pm.Normal("logrho", mu=np.log(150), sd=10)
            kernel = xo.gp.terms.Matern32Term(log_rho=logrho,
                                              log_sigma=logsigma)
            gp = xo.gp.GP(kernel, time[mask],
                          tt.exp(logs2) + raw_flux_err[mask]**2)

            # Motion model
            #------------------
            A = tt.dot(X_pld[mask].T, gp.apply_inverse(X_pld[mask]))
            B = tt.dot(X_pld[mask].T, gp.apply_inverse(raw_flux[mask, None]))
            C = tt.slinalg.solve(A, B)
            motion_model = pm.Deterministic("motion_model",
                                            tt.dot(X_pld[mask], C)[:, 0])

            # Likelihood
            #------------------
            pm.Potential("obs",
                         gp.log_likelihood(raw_flux[mask] - motion_model))

            # gp predicted flux
            gp_pred = gp.predict()
            pm.Deterministic("gp_pred", gp_pred)
            pm.Deterministic("weights", C)

            # Optimize
            #------------------
            if start is None:
                start = model.test_point

            map_soln = xo.optimize(start=start, vars=[logrho, logsigma])
            map_soln = xo.optimize(start=start, vars=[logr])
            map_soln = xo.optimize(start=map_soln, vars=[logs2])
            map_soln = xo.optimize(start=map_soln,
                                   vars=[logrho, logsigma, logs2, logr])
            map_soln = xo.optimize(start=map_soln, vars=[mean, logr])
            map_soln = xo.optimize(start=map_soln, vars=[logP, t0])
            map_soln = xo.optimize(start=map_soln, vars=[b])
            map_soln = xo.optimize(start=map_soln, vars=[u_star])
            map_soln = xo.optimize(start=map_soln,
                                   vars=[logrho, logsigma, logs2])
            map_soln = xo.optimize(start=map_soln)

            return model, map_soln, gp
示例#23
0
def build_model(mask=None, start=None):

with pm.Model() as model:

	# The baseline flux
	mean = pm.Normal("mean", mu=0.0, sd=0.00001)

	# The time of a reference transit for each planet
	t0 = pm.Normal("t0", mu=t0s, sd=1.0, shape=1)

	# The log period; also tracking the period itself
	logP = pm.Normal("logP", mu=np.log(periods), sd=0.01, shape=1)

	rho_star = pm.Normal("rho_star", mu=0.14, sd=0.01, shape=1)
	r_star = pm.Normal("r_star", mu=2.7, sd=0.01, shape=1)

	period = pm.Deterministic("period", pm.math.exp(logP))

	# The Kipping (2013) parameterization for quadratic limb darkening paramters
	u = xo.distributions.QuadLimbDark("u", testval=np.array([0.3, 0.2]))

	r = pm.Uniform(
		"r", lower=0.01, upper=0.3, shape=1, testval=0.15)
	
	b = xo.distributions.ImpactParameter(
		"b", ror=r, shape=1, testval=0.5)
	
	# Transit jitter & GP parameters
	logs2 = pm.Normal("logs2", mu=np.log(np.var(y)), sd=10)
	logw0 = pm.Normal("logw0", mu=0, sd=10)
	logSw4 = pm.Normal("logSw4", mu=np.log(np.var(y)), sd=10)

	# Set up a Keplerian orbit for the planets
	orbit = xo.orbits.KeplerianOrbit(period=period, t0=t0, b=b, rho_star=rho_star,r_star=r_star)
	
	# Compute the model light curve using starry
	light_curves = xo.LimbDarkLightCurve(u).get_light_curve(
		orbit=orbit, r=r, t=t
	)
	light_curve = pm.math.sum(light_curves, axis=-1) + mean

	# Here we track the value of the model light curve for plotting
	# purposes
	pm.Deterministic("light_curves", light_curves)

	kernel = xo.gp.terms.SHOTerm(log_Sw4=logSw4, log_w0=logw0, Q=1 / np.sqrt(2))
	gp = xo.gp.GP(kernel, t, tt.exp(logs2) + tt.zeros(len(t)), mean=light_curve)
	gp.marginal("gp", observed=y)
	pm.Deterministic("gp_pred", gp.predict())

	# The likelihood function assuming known Gaussian uncertainty
	pm.Normal("obs", mu=light_curve, sd=yerr, observed=y)

	# Fit for the maximum a posteriori parameters given the simuated
	# dataset
	map_soln = xo.optimize(start=model.test_point)
	
	return model, map_soln
	
model, map_soln = build_model()

gp_mod = map_soln["gp_pred"] + map_soln["mean"]
plt.clf()
plt.plot(t, y, ".k", ms=4, label="data")
plt.plot(t, gp_mod, lw=1,label="gp model")
plt.plot(t, map_soln["light_curves"], lw=1,label="transit model")
plt.xlim(t.min(), t.max())
plt.ylabel("relative flux")
plt.xlabel("time [days]")
plt.legend(fontsize=10)
_ = plt.title("map model")

np.random.seed(42)
with model:
    trace = pm.sample(
        tune=3000,
        draws=3000,
        start=map_soln,
        cores=2,
        chains=2,
        step=xo.get_dense_nuts_step(target_accept=0.9),
    )
    
    
pm.summary(trace, varnames=["period", "t0", "r", "b", "u", "mean", "rho_star","logw0","logSw4","logs2"])


import corner

samples = pm.trace_to_dataframe(trace, varnames=["period", "r"])
truth = np.concatenate(
    xo.eval_in_model([period, r], model.test_point, model=model)
)
_ = corner.corner(
    samples,
    truths=truth,
    labels=["period 1", "radius 1"],
)


# Compute the GP prediction
gp_mod = np.median(trace["gp_pred"] + trace["mean"][:, None], axis=0)

# Get the posterior median orbital parameters
p = np.median(trace["period"])
t0 = np.median(trace["t0"])

# Plot the folded data
x_fold = (t - t0 + 0.5 * p) % p - 0.5 * p
plt.plot(x_fold, y - gp_mod, ".k", label="data", zorder=-1000)

# Overplot the phase binned light curve
bins = np.linspace(-0.41, 0.41, 50)
denom, _ = np.histogram(x_fold, bins)
num, _ = np.histogram(x_fold, bins, weights=y)
denom[num == 0] = 1.0
plt.plot(0.5 * (bins[1:] + bins[:-1]), num / denom, "o", color="C2", label="binned")

# Plot the folded model
inds = np.argsort(x_fold)
inds = inds[np.abs(x_fold)[inds] < 0.3]
pred = trace["light_curves"][:, inds, 0]
pred = np.percentile(pred, [16, 50, 84], axis=0)
plt.plot(x_fold[inds], pred[1], color="C1", label="model")
art = plt.fill_between(
    x_fold[inds], pred[0], pred[2], color="C1", alpha=0.5, zorder=1000
)
art.set_edgecolor("none")

# Annotate the plot with the planet's period
txt = "period = {0:.5f} +/- {1:.5f} d".format(
    np.mean(trace["period"]), np.std(trace["period"])
)
plt.annotate(
    txt,
    (0, 0),
    xycoords="axes fraction",
    xytext=(5, 5),
    textcoords="offset points",
    ha="left",
    va="bottom",
    fontsize=12,
)

plt.legend(fontsize=10, loc=4)
plt.xlim(-0.5 * p, 0.5 * p)
plt.xlabel("time since transit [days]")
plt.ylabel("de-trended flux")
plt.xlim(-0.3, 0.3);
示例#24
0
                    # calculate light curves
                    light_curves[j] = exoSLC.get_light_curve(orbit=orbit, r=rp[npl], t=t_[m_], oversample=oversample)
                    summed_light_curve[j] = pm.math.sum(light_curves[j], axis=-1) + flux0[j]*T.ones(len(t_[m_]))
                    model_flux[j] = pm.Deterministic('model_flux_{0}'.format(j), summed_light_curve[j])

                    # here's the GP (w/ kernel by season)
                    gp[j] = exo.gp.GP(kernel[q%4], t_[m_], T.exp(logvar[j])*T.ones(len(t_[m_])))
                
                
                    # add custom potential (log-prob fxn) with the GP likelihood
                    pm.Potential('obs_{0}'.format(j), gp[j].log_likelihood(f_[m_] - model_flux[j]))
                
                
        with hbm_model:
            hbm_map = exo.optimize(start=hbm_model.test_point, vars=[flux0, logvar])

            if local_trend_type[npl][ng-1] == "linear":
                hbm_map = exo.optimize(start=hbm_map, vars=[C0, C1]) 
            if local_trend_type[npl][ng-1] == "quadratic":
                hbm_map = exo.optimize(start=hbm_map, vars=[C0, C1, C2]) 
            if local_trend_type[npl][ng-1] == "sinusoid":
                hbm_map = exo.optimize(start=hbm_map, vars=[C0, C1, A, B])
            
                            
        # sample from the posterior
        with hbm_model:
            hbm_trace = pm.sample(tune=3000, draws=1000, start=hbm_map, chains=2,
                                  step=exo.get_dense_nuts_step(target_accept=0.9))

        # save the results
示例#25
0
    def _optimize(self, start=None, verbose=True):
        with self._model as model:
            if start is None:
                start = model.test_point

            map_soln = xo.optimize(start=start, vars=[model.mean], verbose=verbose)
            map_soln = xo.optimize(start=start, vars=[model.logrho, model.logsigma, model.mean], verbose=verbose)
            map_soln = xo.optimize(start=start, vars=[model.logrho, model.logsigma, model.logs2], verbose=verbose)
            map_soln = xo.optimize(start=map_soln, vars=[model.logP, model.rprs, model.t0], verbose=verbose)
            map_soln = xo.optimize(start=map_soln, vars=[model.u], verbose=verbose)
            map_soln = xo.optimize(start=map_soln, vars=[model.ep, model.dp], verbose=verbose)
            map_soln = xo.optimize(start=map_soln, vars=[model.inclination], verbose=verbose)
            map_soln = xo.optimize(start=map_soln, vars=[model.albedo], verbose=verbose)
            map_soln = xo.optimize(start=map_soln, vars=[model.albedo, model.dT, model.phase_shift], verbose=verbose)
            map_soln = xo.optimize(start=map_soln, vars=[model.logP, model.rprs, model.t0, model.u], verbose=verbose)
            map_soln = xo.optimize(start=start, vars=[model.logrho, model.logsigma, model.logs2], verbose=verbose)
            map_soln = xo.optimize(start=map_soln, vars=[model.albedo, model.dT], verbose=verbose)
            map_soln = xo.optimize(start=map_soln, vars=[model.ep, model.dp], verbose=verbose)
            map_soln = xo.optimize(start=map_soln, vars=[model.albedo, model.inclination, model.t0,
                                                        model.logP, model.u, model.ep, model.dp], verbose=verbose)
            map_soln = xo.optimize(start=map_soln, vars=[model.ep, model.dp, model.phase_shift, model.dT,
                                                        model.albedo, model.inclination, model.t0, model.logP, model.rprs, model.logrho, model.logsigma, model.logs2, model.mean, model.u], verbose=verbose)

            return map_soln
def build_model(mask=None, start=None):

    with pm.Model() as model:

        # The baseline flux
        mean = pm.Normal("mean", mu=0.0, sd=0.00001)

        # The time of a reference transit for each planet
        t0 = pm.Normal("t0", mu=t0s, sd=1.0, shape=1)

        # The log period; also tracking the period itself
        logP = pm.Normal("logP", mu=np.log(periods), sd=0.01, shape=1)

        rho_star = pm.Normal("rho_star", mu=0.14, sd=0.01, shape=1)
        r_star = pm.Normal("r_star", mu=2.7, sd=0.01, shape=1)

        period = pm.Deterministic("period", pm.math.exp(logP))

        # The Kipping (2013) parameterization for quadratic limb darkening paramters
        u = xo.distributions.QuadLimbDark("u", testval=np.array([0.3, 0.2]))

        r = pm.Uniform("r", lower=0.01, upper=0.3, shape=1, testval=0.15)

        b = xo.distributions.ImpactParameter("b", ror=r, shape=1, testval=0.5)

        # Transit jitter & GP parameters
        logs2 = pm.Normal("logs2", mu=np.log(np.var(y)), sd=10)
        logw0 = pm.Normal("logw0", mu=0, sd=10)
        logSw4 = pm.Normal("logSw4", mu=np.log(np.var(y)), sd=10)

        # Set up a Keplerian orbit for the planets
        orbit = xo.orbits.KeplerianOrbit(period=period,
                                         t0=t0,
                                         b=b,
                                         rho_star=rho_star,
                                         r_star=r_star)

        # Compute the model light curve using starry
        light_curves = xo.LimbDarkLightCurve(u).get_light_curve(orbit=orbit,
                                                                r=r,
                                                                t=t)
        light_curve = pm.math.sum(light_curves, axis=-1) + mean

        # Here we track the value of the model light curve for plotting
        # purposes
        pm.Deterministic("light_curves", light_curves)

        S1 = pm.InverseGamma(
            "S1", **estimate_inverse_gamma_parameters(0.5**2, 10.0**2))
        S2 = pm.InverseGamma(
            "S2", **estimate_inverse_gamma_parameters(0.25**2, 1.0**2))
        w1 = pm.InverseGamma(
            "w1", **estimate_inverse_gamma_parameters(2 * np.pi / 10.0, np.pi))
        w2 = pm.InverseGamma(
            "w2", **estimate_inverse_gamma_parameters(0.5 * np.pi, 2 * np.pi))
        log_Q = pm.Uniform("log_Q", lower=np.log(2), upper=np.log(10))

        # Set up the kernel an GP
        kernel = terms.SHOTerm(S_tot=S1, w0=w1, Q=1.0 / np.sqrt(2))
        kernel += terms.SHOTerm(S_tot=S2, w0=w2, log_Q=log_Q)
        gp = GP(kernel, t, yerr**2, mean=mean)

        gp.marginal("gp", observed=y)
        pm.Deterministic("gp_pred", gp.predict())

        # The likelihood function assuming known Gaussian uncertainty
        pm.Normal("obs", mu=light_curve, sd=yerr, observed=y)

        # Fit for the maximum a posteriori parameters given the simuated
        # dataset
        map_soln = xo.optimize(start=model.test_point)

        return model, map_soln
示例#27
0
文件: fit_gp.py 项目: lgbouma/timmy
            "w1", **estimate_inverse_gamma_parameters(2 * np.pi / 10.0, np.pi))
        w2 = pm.InverseGamma(
            "w2", **estimate_inverse_gamma_parameters(0.5 * np.pi, 2 * np.pi))
        log_Q = pm.Uniform("log_Q", lower=np.log(2), upper=np.log(10))

        # Set up the kernel an GP
        kernel = terms.SHOTerm(S_tot=S1, w0=w1, Q=1.0 / np.sqrt(2))
        kernel += terms.SHOTerm(S_tot=S2, w0=w2, log_Q=log_Q)
        gp = GP(kernel, t, yerr**2, mean=mean)

        # Condition the GP on the observations and add the marginal likelihood
        # to the model
        gp.marginal("gp", observed=y)

    with model:
        map_soln = xo.optimize(start=model.test_point)

    with model:
        mu, var = xo.eval_in_model(
            gp.predict(true_t, return_var=True, predict_mean=True), map_soln)

    # Plot the prediction and the 1-sigma uncertainty
    plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0, label="data")
    plt.plot(true_t, true_y, "k", lw=1.5, alpha=0.3, label="truth")

    sd = np.sqrt(var)
    art = plt.fill_between(true_t, mu + sd, mu - sd, color="C1", alpha=0.3)
    art.set_edgecolor("none")
    plt.plot(true_t, mu, color="C1", label="prediction")

    plt.legend(fontsize=12)
示例#28
0
文件: AKFor.py 项目: darinadaly/lmdeb
def build_model(mask):
    with pm.Model() as model:
        # Systemic parameters
        mean_lc = pm.Normal("mean_lc", mu=0.0, sd=5.0)
        mean_rv = pm.Normal("mean_rv", mu=0.0, sd=50.0)
        u1 = xo.QuadLimbDark("u1")
        u2 = xo.QuadLimbDark("u2")

        # Parameters describing the primary
        M1 = pm.Lognormal("M1", mu=0.0, sigma=10.0, testval=0.696)
        R1 = pm.Lognormal("R1", mu=0.0, sigma=10.0, testval=0.687)

        # Secondary ratios
        k = pm.Lognormal("k", mu=0.0, sigma=10.0,
                         testval=0.9403)  # radius ratio
        q = pm.Lognormal("q", mu=0.0, sigma=10.0, testval=0.9815)  # mass ratio
        s = pm.Lognormal("s", mu=np.log(0.5),
                         sigma=10.0)  # surface brightness ratio

        # Prior on flux ratio
        pm.Normal(
            "flux_prior",
            mu=lit_flux_ratio[0],
            sigma=lit_flux_ratio[1],
            observed=k**2 * s,
        )

        # Parameters describing the orbit
        b = xo.ImpactParameter("b", ror=k, testval=1.5)
        period = pm.Lognormal("period", mu=np.log(lit_period), sigma=1.0)
        t0 = pm.Normal("t0", mu=lit_t0, sigma=1.0)

        # Parameters describing the eccentricity: ecs = [e * cos(w), e * sin(w)]
        ecs = xo.UnitDisk("ecs", testval=np.array([1e-5, 0.0]))
        ecc = pm.Deterministic("ecc", tt.sqrt(tt.sum(ecs**2)))
        omega = pm.Deterministic("omega", tt.arctan2(ecs[1], ecs[0]))

        # Build the orbit
        R2 = pm.Deterministic("R2", k * R1)
        M2 = pm.Deterministic("M2", q * M1)
        orbit = xo.orbits.KeplerianOrbit(
            period=period,
            t0=t0,
            ecc=ecc,
            omega=omega,
            b=b,
            r_star=R1,
            m_star=M1,
            m_planet=M2,
        )

        # Track some other orbital elements
        pm.Deterministic("incl", orbit.incl)
        pm.Deterministic("a", orbit.a)

        # Noise model for the light curve
        sigma_lc = pm.InverseGamma("sigma_lc",
                                   testval=1.0,
                                   **xo.estimate_inverse_gamma_parameters(
                                       0.1, 2.0))
        S_tot_lc = pm.InverseGamma("S_tot_lc",
                                   testval=2.5,
                                   **xo.estimate_inverse_gamma_parameters(
                                       1.0, 5.0))
        ell_lc = pm.InverseGamma("ell_lc",
                                 testval=2.0,
                                 **xo.estimate_inverse_gamma_parameters(
                                     1.0, 5.0))
        kernel_lc = xo.gp.terms.SHOTerm(S_tot=S_tot_lc,
                                        w0=2 * np.pi / ell_lc,
                                        Q=1.0 / 3)

        # Noise model for the radial velocities
        sigma_rv1 = pm.InverseGamma("sigma_rv1",
                                    testval=1.0,
                                    **xo.estimate_inverse_gamma_parameters(
                                        0.5, 5.0))
        sigma_rv2 = pm.InverseGamma("sigma_rv2",
                                    testval=1.0,
                                    **xo.estimate_inverse_gamma_parameters(
                                        0.5, 5.0))
        S_tot_rv = pm.InverseGamma("S_tot_rv",
                                   testval=2.5,
                                   **xo.estimate_inverse_gamma_parameters(
                                       1.0, 5.0))
        ell_rv = pm.InverseGamma("ell_rv",
                                 testval=2.0,
                                 **xo.estimate_inverse_gamma_parameters(
                                     1.0, 5.0))
        kernel_rv = xo.gp.terms.SHOTerm(S_tot=S_tot_rv,
                                        w0=2 * np.pi / ell_rv,
                                        Q=1.0 / 3)

        # Set up the light curve model
        lc = xo.SecondaryEclipseLightCurve(u1, u2, s)

        def model_lc(t):
            return (
                mean_lc + 1e3 *
                lc.get_light_curve(orbit=orbit, r=R2, t=t, texp=texp)[:, 0])

        # Condition the light curve model on the data
        gp_lc = xo.gp.GP(kernel_lc,
                         x[mask],
                         tt.zeros(mask.sum())**2 + sigma_lc**2,
                         mean=model_lc)
        gp_lc.marginal("obs_lc", observed=y[mask])

        # Set up the radial velocity model
        def model_rv1(t):
            return mean_rv + 1e-3 * orbit.get_radial_velocity(t)

        def model_rv2(t):
            return mean_rv - 1e-3 * orbit.get_radial_velocity(t) / q

        # Condition the radial velocity model on the data
        gp_rv1 = xo.gp.GP(kernel_rv,
                          x_rv,
                          tt.zeros(len(x_rv))**2 + sigma_rv1**2,
                          mean=model_rv1)
        gp_rv1.marginal("obs_rv1", observed=y1_rv)
        gp_rv2 = xo.gp.GP(kernel_rv,
                          x_rv,
                          tt.zeros(len(x_rv))**2 + sigma_rv2**2,
                          mean=model_rv2)
        gp_rv2.marginal("obs_rv2", observed=y2_rv)

        # Optimize the logp
        map_soln = model.test_point

        # First the RV parameters
        map_soln = xo.optimize(map_soln, [mean_rv, q])
        map_soln = xo.optimize(
            map_soln, [mean_rv, sigma_rv1, sigma_rv2, S_tot_rv, ell_rv])

        # Then the LC parameters
        map_soln = xo.optimize(map_soln, [mean_lc, R1, k, s, b])
        map_soln = xo.optimize(map_soln, [mean_lc, R1, k, s, b, u1, u2])
        map_soln = xo.optimize(map_soln, [mean_lc, sigma_lc, S_tot_lc, ell_lc])
        map_soln = xo.optimize(map_soln, [t0, period])

        # Then all the parameters together
        map_soln = xo.optimize(map_soln)

        model.gp_lc = gp_lc
        model.model_lc = model_lc
        model.gp_rv1 = gp_rv1
        model.model_rv1 = model_rv1
        model.gp_rv2 = gp_rv2
        model.model_rv2 = model_rv2

        model.x = x[mask]
        model.y = y[mask]

    return model, map_soln
示例#29
0
def build_model(mask=None):
    if mask is None:
        mask = np.ones_like(x, dtype=bool)

    with pm.Model() as model:
        # Stellar parameters
        mean = pm.Normal("mean", mu=0.0, sigma=10.0)
        u = xo.distributions.QuadLimbDark("u")

        # Gaussian process noise model
        sigma = pm.InverseGamma("sigma", alpha=3.0, beta=2 * np.median(yerr))
        S_tot = pm.InverseGamma("S_tot", alpha=3.0, beta=2 * np.median(yerr))
        ell = pm.Lognormal("ell", mu=0.0, sigma=1.0)
        Q = 1.0 / 3.0
        w0 = 2 * np.pi / ell
        S0 = S_tot / (w0 * Q)
        kernel = xo.gp.terms.SHOTerm(S0=S0, w0=w0, Q=Q)

        # Transit parameters
        t0 = pm.Bound(
            pm.Normal,
            lower=t0_guess - max_duration,
            upper=t0_guess + max_duration,
        )(
            "t0",
            mu=t0_guess,
            sigma=0.5 * duration_guess,
            testval=t0_guess,
            shape=num_toi,
        )
        depth = pm.Lognormal(
            "transit_depth",
            mu=np.log(depth_guess),
            sigma=np.log(1.2),
            shape=num_toi,
        )
        duration = pm.Bound(pm.Lognormal,
                            lower=min_duration,
                            upper=max_duration)(
                                "transit_duration",
                                mu=np.log(duration_guess),
                                sigma=np.log(1.2),
                                shape=num_toi,
                                testval=min(
                                    max(duration_guess, 2 * min_duration),
                                    0.99 * max_duration),
                            )
        b = xo.distributions.UnitUniform("b", shape=num_toi)

        # Dealing with period, treating single transits properly
        period_params = []
        period_values = []
        t_max_values = []
        for n in range(num_toi):
            if single_transit[n]:
                period = pm.Pareto(
                    f"period_{n}",
                    m=period_min[n],
                    alpha=2.0 / 3,
                    testval=period_guess[n],
                )
                period_params.append(period)
                t_max_values.append(t0[n])
            else:
                t_max = pm.Bound(
                    pm.Normal,
                    lower=t_max_guess[n] - max_duration[n],
                    upper=t_max_guess[n] + max_duration[n],
                )(
                    f"t_max_{n}",
                    mu=t_max_guess[n],
                    sigma=0.5 * duration_guess[n],
                    testval=t_max_guess[n],
                )
                period = (t_max - t0[n]) / num_periods[n]
                period_params.append(t_max)
                t_max_values.append(t_max)
            period_values.append(period)
        period = pm.Deterministic("period", tt.stack(period_values))
        t_max = pm.Deterministic("t_max", tt.stack(t_max_values))

        # Compute the radius ratio from the transit depth, impact parameter, and
        # limb darkening parameters making the small-planet assumption
        u1 = u[0]
        u2 = u[1]
        mu = tt.sqrt(1 - b**2)
        ror = pm.Deterministic(
            "ror",
            tt.sqrt(1e-3 * depth * (1 - u1 / 3 - u2 / 6) /
                    (1 - u1 * (1 - mu) - u2 * (1 - mu)**2)),
        )

        # Set up the orbit
        orbit = xo.orbits.KeplerianOrbit(period=period,
                                         duration=duration,
                                         t0=t0,
                                         b=b)

        # We're going to track the implied density for reasons that will become clear later
        pm.Deterministic("rho_circ", orbit.rho_star)

        # Set up the mean transit model
        star = xo.LimbDarkLightCurve(u)

        lc_model = tess_world.LightCurveModels(mean, star, orbit, ror)

        # Finally the GP observation model
        gp = xo.gp.GP(kernel, x[mask], yerr[mask]**2 + sigma**2, mean=lc_model)
        gp.marginal("obs", observed=y[mask])

        # This is a check on the transit depth constraint
        D = tt.concatenate(
            (
                lc_model.light_curves(x[mask]) / depth[None, :],
                tt.ones((mask.sum(), 1)),
            ),
            axis=-1,
        )
        DTD = tt.dot(D.T, gp.apply_inverse(D))
        DTy = tt.dot(D.T, gp.apply_inverse(y[mask, None]))
        model.w = tt.slinalg.solve(DTD, DTy)[:, 0]
        model.sigma_w = tt.sqrt(
            tt.diag(tt.slinalg.solve(DTD, tt.eye(num_toi + 1))))

        # Double check that everything looks good - we shouldn't see any NaNs!
        print(model.check_test_point())

        # Optimize the model
        map_soln = model.test_point
        map_soln = xo.optimize(map_soln, [sigma])
        map_soln = xo.optimize(map_soln, [mean, depth, b, duration])
        map_soln = xo.optimize(map_soln, [sigma, S_tot, ell])
        map_soln = xo.optimize(map_soln, [mean, u])
        map_soln = xo.optimize(map_soln, period_params)
        map_soln = xo.optimize(map_soln)

        # Save some of the key parameters
        model.map_soln = map_soln
        model.lc_model = lc_model
        model.gp = gp
        model.mask = mask
        model.x = x[mask]
        model.y = y[mask]
        model.yerr = yerr[mask]

    return model
示例#30
0
def flatten_with_gp(lc,
                    break_tolerance,
                    min_period,
                    bin_factor=None,
                    return_trend=False):
    """
    Detrend the flux from an alderaan LiteCurve using a celerite RotationTerm GP kernel
    The mean function of each uninterrupted segment of flux is modeled as an exponential
    
        Fmean = F0*(1+A*exp(-t/tau))
    
    Parameters
    ----------
    lc : alderaan.LiteCurve
        must have .time, .flux and .mask attributes
    break_tolerance : int
        number of cadences considered a large gap in time
    min_period : float
        lower bound on primary period for RotationTerm kernel 
    return_trend : bool (default=False)
        if True, return the trend inferred from the GP fit
        
    Returns
    -------
    lc : alderaan.LiteCurve
        LiteCurve with trend removed from lc.flux
    gp_trend : ndarray
        trend inferred from GP fit (only returned if return_trend == True)
    """
    # find gaps/breaks/jumps in the data
    gaps = identify_gaps(lc, break_tolerance=break_tolerance)
    gaps[-1] -= 1

    # initialize data arrays and lists of segments
    gp_time = np.array(lc.time, dtype="float64")
    gp_flux = np.array(lc.flux, dtype="float64")
    gp_mask = np.sum(lc.mask, 0) == 0

    time_segs = []
    flux_segs = []
    mask_segs = []

    for i in range(len(gaps) - 1):
        time_segs.append(gp_time[gaps[i]:gaps[i + 1]])
        flux_segs.append(gp_flux[gaps[i]:gaps[i + 1]])
        mask_segs.append(gp_mask[gaps[i]:gaps[i + 1]])

    mean_flux = []
    approx_var = []

    for i in range(len(gaps) - 1):
        m = mask_segs[i]
        mean_flux.append(np.mean(flux_segs[i][m]))
        approx_var.append(np.var(flux_segs[i] - sig.medfilt(flux_segs[i], 13)))

    # put segments into groups of ten
    nseg = len(time_segs)
    ngroup = int(np.ceil(nseg / 10))
    seg_groups = np.array(np.arange(ngroup + 1) * np.ceil(nseg / ngroup),
                          dtype="int")
    seg_groups[-1] = len(gaps) - 1

    # identify rotation period to initialize GP
    ls_estimate = exo.estimators.lomb_scargle_estimator(gp_time, gp_flux, max_peaks=1, \
                                                        min_period=min_period, max_period=91.0, \
                                                        samples_per_peak=50)

    peak_per = ls_estimate["peaks"][0]["period"]

    # set up lists to hold trend info
    trend_maps = [None] * ngroup

    # optimize the GP for each group of segments
    for j in range(ngroup):
        sg0 = seg_groups[j]
        sg1 = seg_groups[j + 1]
        nuse = sg1 - sg0

        with pm.Model() as trend_model:

            log_amp = pm.Normal("log_amp", mu=np.log(np.std(gp_flux)), sd=5)
            log_per_off = pm.Normal("log_per_off",
                                    mu=0,
                                    sd=5,
                                    testval=np.log(peak_per - min_period))
            log_Q0_off = pm.Normal("log_Q0_off", mu=0, sd=10)
            log_deltaQ = pm.Normal("log_deltaQ", mu=2, sd=10)
            mix = pm.Uniform("mix", lower=0, upper=1)

            P = pm.Deterministic("P", min_period + T.exp(log_per_off))
            Q0 = pm.Deterministic("Q0", 1 / T.sqrt(2) + T.exp(log_Q0_off))

            kernel = exo.gp.terms.RotationTerm(log_amp=log_amp,
                                               period=P,
                                               Q0=Q0,
                                               log_deltaQ=log_deltaQ,
                                               mix=mix)

            # exponential trend
            logtau = pm.Normal("logtau",
                               mu=np.log(3) * np.ones(nuse),
                               sd=5 * np.ones(nuse),
                               shape=nuse)
            exp_amp = pm.Normal('exp_amp',
                                mu=np.zeros(nuse),
                                sd=np.std(gp_flux) * np.ones(nuse),
                                shape=nuse)

            # nuissance parameters per segment
            flux0 = pm.Normal('flux0',
                              mu=np.array(mean_flux[sg0:sg1]),
                              sd=np.std(gp_flux) * np.ones(nuse),
                              shape=nuse)
            logvar = pm.Normal('logvar',
                               mu=np.log(approx_var[sg0:sg1]),
                               sd=10 * np.ones(nuse),
                               shape=nuse)

            # now set up the GP
            gp = [None] * nuse
            gp_pred = [None] * nuse

            for i in range(nuse):
                m = mask_segs[sg0 + i]
                t = time_segs[sg0 + i][m] - time_segs[sg0 + i][m][0]

                ramp = 1 + exp_amp[i] * np.exp(-t / np.exp(logtau[i]))

                gp[i] = exo.gp.GP(
                    kernel, time_segs[sg0 + i][m],
                    T.exp(logvar[i]) * T.ones(len(time_segs[sg0 + i][m])))
                pm.Potential(
                    'obs_{0}'.format(i),
                    gp[i].log_likelihood(flux_segs[sg0 + i][m] -
                                         flux0[i] * ramp))

        with trend_model:
            trend_maps[j] = exo.optimize(start=trend_model.test_point)

    # set up mean and variance vectors
    gp_mean = np.ones_like(gp_flux)
    gp_var = np.ones_like(gp_flux)

    for i in range(nseg):
        j = np.argmin(seg_groups <= i) - 1

        g0 = gaps[i]
        g1 = gaps[i + 1]

        F0_ = trend_maps[j]["flux0"][i - seg_groups[j]]
        A_ = trend_maps[j]["exp_amp"][i - seg_groups[j]]
        tau_ = np.exp(trend_maps[j]["logtau"][i - seg_groups[j]])

        t_ = gp_time[g0:g1] - gp_time[g0]

        gp_mean[g0:g1] = F0_ * (1 + A_ * np.exp(-t_ / tau_))
        gp_var[g0:g1] = np.ones(g1 - g0) * np.exp(
            trend_maps[j]["logvar"][i - seg_groups[j]])

    # increase variance for cadences in transit (yes, this is hacky, but it works)
    # using gp.predict() inside the initial model was crashing jupyter, making debugging slow
    gp_var[~gp_mask] *= 1e12

    # now evaluate the GP to get the final trend
    gp_trend = np.zeros_like(gp_flux)

    for j in range(ngroup):
        start = gaps[seg_groups[j]]
        end = gaps[seg_groups[j + 1]]

        m = gp_mask[start:end]

        with pm.Model() as trend_model:

            log_amp = trend_maps[j]["log_amp"]
            P = trend_maps[j]["P"]
            Q0 = trend_maps[j]["Q0"]
            log_deltaQ = trend_maps[j]["log_deltaQ"]
            mix = trend_maps[j]["mix"]

            kernel = exo.gp.terms.RotationTerm(log_amp=log_amp,
                                               period=P,
                                               Q0=Q0,
                                               log_deltaQ=log_deltaQ,
                                               mix=mix)

            gp = exo.gp.GP(kernel, gp_time[start:end], gp_var[start:end])

            gp.log_likelihood(gp_flux[start:end] - gp_mean[start:end])

            gp_trend[start:end] = gp.predict().eval() + gp_mean[start:end]

    # now remove the trend
    lc.flux = lc.flux - gp_trend + 1.0

    # return results
    if return_trend:
        return lc, gp_trend
    else:
        return lc