Esempio n. 1
0
def build_model_with_sigma_clipping(sigma=5.0, maxiter=10):
    ntot = len(x)
    mask = np.ones_like(x, dtype=bool)
    pred = np.zeros_like(y)
    for i in range(maxiter):
        print(f"Sigma clipping round {i + 1}")

        with build_model(mask) as model:
            pred[mask] = xo.eval_in_model(
                model.gp.predict() + model.lc_model(x[mask]), model.map_soln)
            if np.any(~mask):
                pred[~mask] = xo.eval_in_model(
                    model.gp.predict(x[~mask]) + model.lc_model(x[~mask]),
                    model.map_soln,
                )

        resid = y - pred
        rms = np.sqrt(np.median(resid**2))
        mask = np.abs(resid) < sigma * rms

        print(
            f"... clipping {(~mask).sum()} of {len(x)} ({100 * (~mask).sum() / len(x):.1f}%)"
        )

        if ntot == mask.sum():
            break
        ntot = mask.sum()

    return model
Esempio n. 2
0
def gp_fit(t, y, yerr, t_grid, integrated=False, exp_time=60.):
    # optimize kernel hyperparameters and return fit + predictions
    with pm.Model() as model:
        logS0 = pm.Normal("logS0", mu=0.4, sd=5.0, testval=np.log(np.var(y)))
        logw0 = pm.Normal("logw0", mu=-3.9, sd=0.1)
        logQ = pm.Normal("logQ", mu=3.5, sd=5.0)

        # Set up the kernel and GP
        kernel = terms.SHOTerm(log_S0=logS0, log_w0=logw0, log_Q=logQ)
        if integrated:
            kernel_int = terms.IntegratedTerm(kernel, exp_time)
            gp = GP(kernel_int, t, yerr**2)
        else:
            gp = GP(kernel, t, yerr**2)

        # Add a custom "potential" (log probability function) with the GP likelihood
        pm.Potential("gp", gp.log_likelihood(y))

    with model:
        map_soln = xo.optimize(start=model.test_point)
        mu, var = xo.eval_in_model(gp.predict(t_grid, return_var=True),
                                   map_soln)
        sd = np.sqrt(var)
        y_pred = xo.eval_in_model(gp.predict(t), map_soln)

    return map_soln, mu, sd, y_pred
Esempio n. 3
0
def evaluate_map_prediction(event, pm_model, map_params, t_grid, alert_time):
    """
    Evaluates MAP model prediciton in data space.
    """
    with pm_model:
        # Compute the trajectory of the lens
        trajectory = ca.trajectory.Trajectory(
            event, alert_time + T.exp(pm_model.ln_delta_t0), pm_model.u0,
            pm_model.tE)
        u_dense = trajectory.compute_trajectory(t_grid)

        # Compute the magnification
        mag_dense = (u_dense**2 + 2) / (u_dense * T.sqrt(u_dense**2 + 4))

        F_base = 10**(-(pm_model.m_b - 22.0) / 2.5)

        # Compute the mean model
        prediction = pm_model.f * F_base * mag_dense + (1 -
                                                        pm_model.f) * F_base

    # Evaluate model for each sample on a fine grid
    n_pts_dense = T.shape(t_grid)[0].eval()
    n_bands = len(event.light_curves)

    prediction_eval = np.zeros(n_pts_dense)

    # Evaluate predictions in model context
    with pm_model:
        prediction_eval = xo.eval_in_model(prediction, map_params)

    return prediction_eval
Esempio n. 4
0
def plot_sky(trace, m):
    # plot sky position for a full orbit
    xs_phase = np.linspace(0, 1, num=1000)

    with m.model:
        ts_full = xs_phase * m.P + m.t_periastron
        predict_full = m.orbit.get_relative_angles(ts_full, m.parallax)

    fig, ax = plt.subplots(nrows=1, figsize=(4, 4))

    for sample in xo.get_samples_from_trace(trace, size=20):

        # we'll want to cache these functions when we evaluate many samples
        rho_full, theta_full = xo.eval_in_model(
            predict_full, point=sample, model=m.model
        )

        x_full = rho_full * np.cos(theta_full)  # X North
        y_full = rho_full * np.sin(theta_full)
        ax.plot(y_full, x_full, color="C0", lw=0.8, alpha=0.7)

    xs = d.wds[1] * np.cos(d.wds[3])  # X is north
    ys = d.wds[1] * np.sin(d.wds[3])  # Y is east

    ax.plot(ys, xs, "ko")
    ax.set_ylabel(r"$\Delta \delta$ ['']")
    ax.set_xlabel(r"$\Delta \alpha \cos \delta$ ['']")
    ax.invert_xaxis()
    ax.plot(0, 0, "k*")
    ax.set_aspect("equal", "datalim")
    fig.subplots_adjust(left=0.18, right=0.82)

    return fig
Esempio n. 5
0
def _plot_light_curve(map_soln, model, mask, x, y, yerr, components, gp):
    if mask is None:
        mask = np.ones(len(x), dtype=bool)

    motion = np.dot(components, map_soln['weights']).reshape(-1)
    with model:
        stellar = xo.eval_in_model(gp.predict(x), map_soln)

    if 'light_curves' in map_soln.keys():
        fig, axes = plt.subplots(4, 1, figsize=(10, 10), sharex=True)
    else:
        fig, axes = plt.subplots(3, 1, figsize=(10, 7), sharex=True)
    corrected = y - motion - stellar
    ax = axes[0]
    ax.plot(x, y, "k", label="Raw Data")
    ax.plot(x, motion, color="C1", label="Motion Model")
    ax.legend(fontsize=10)
    ax.set_ylabel("Relative Flux [ppt]", fontsize=8)

    ax = axes[1]
    ax.plot(x, y - motion, "k", label="Motion Corrected Data")
    ax.plot(x,
            stellar,
            color="C2",
            label="Stellar Variability",
            lw=3,
            alpha=0.5)
    ax.legend(fontsize=10)
    ax.set_ylabel("Relative Flux [ppt]", fontsize=8)

    ax = axes[2]
    ax.plot(x, y - motion - stellar, "k", label="Fully Corrected Data")
    if 'light_curves' in map_soln.keys():
        for p in range(map_soln['light_curves'].shape[1]):
            ax.plot(x[mask],
                    map_soln['light_curves'][:, p],
                    "k",
                    label="Planet {}".format(p),
                    c='C{}'.format(p + 1))
        ax.set_ylim(0 + np.nanmin(map_soln['light_curves']),
                    0 - np.nanmin(map_soln['light_curves']))
    ax.legend(fontsize=10)
    ax.set_ylabel("Relative Flux [ppt]", fontsize=8)

    if 'light_curves' in map_soln.keys():
        ax = axes[3]
        ax.plot(x,
                y - motion - stellar -
                np.sum(map_soln["light_curves"], axis=-1),
                "k",
                label="Residuals")
        ax.set_ylim(0 + np.nanmin(map_soln['light_curves']),
                    0 - np.nanmin(map_soln['light_curves']))
        ax.legend(fontsize=10)
        ax.set_ylabel("Relative Flux [ppt]", fontsize=8)
    return fig
Esempio n. 6
0
    def __call__(self, point=None, **kwargs):
        """
        Evaluate the model with input parameters at ``point``

        Thanks x1000 to Daniel Foreman-Mackey for making this possible.
        """
        from exoplanet import eval_in_model

        with self.pymc_model:
            result = eval_in_model(self.mean_model, point=point, **kwargs)
        return result
Esempio n. 7
0
def gp_predict(t,
               y,
               yerr,
               t_grid,
               logS0=0.4,
               logw0=-3.9,
               logQ=3.5,
               integrated=False,
               exp_time=60.):
    # take kernel hyperparameters as fixed inputs, train + predict
    with pm.Model() as model:
        kernel = terms.SHOTerm(log_S0=logS0, log_w0=logw0, log_Q=logQ)
        if integrated:
            kernel_int = terms.IntegratedTerm(kernel, exp_time)
            gp = GP(kernel_int, t, yerr**2)
        else:
            gp = GP(kernel, t, yerr**2)
        gp.condition(y)
        mu, var = xo.eval_in_model(gp.predict(t_grid, return_var=True))
        sd = np.sqrt(var)
        y_pred = xo.eval_in_model(gp.predict(t))

    return y_pred, mu, sd
Esempio n. 8
0
File: lc.py Progetto: tagordon/k2rot
 def predict(self, t=None, return_var=True):
     if t is None:
         t = self.t
     with self.model:
         kernel = xo.gp.terms.RotationTerm(
             log_amp=self.map_soln["logamp"],
             period=self.map_soln["period"],
             log_Q0=self.map_soln["logQ0"],
             log_deltaQ=self.map_soln["logdeltaQ"],
             mix=self.map_soln["mix"])
         gp = xo.gp.GP(kernel,
                       self.t,
                       self.yerr**2 + tt.exp(self.map_soln["logs2"]),
                       J=4)
         gp.log_likelihood(self.flux)
         if return_var:
             mu, var = xo.eval_in_model(gp.predict(t, return_var=True),
                                        self.map_soln)
             return mu, var
         else:
             mu = xo.eval_in_model(gp.predict(t, return_var=False),
                                   self.map_soln)
             return mu
Esempio n. 9
0
def multi_gp_predict(t, y, yerr, t_grid, integrated=False, exp_time=60.):
    # this code is GARBAGE. but in principle does gp_predict() for a full comb of modes.
    a_max = 0.55  # amplitude of central mode in m/s
    nu_max = 3.1e-3  # peak frequency in Hz
    c_env = 0.331e-3  # envelope width in Hz
    delta_nu = 0.00013  # Hz
    gamma = 1. / (2 * 24. * 60. * 60.)  # s^-1 ; 2-day damping timescale
    freq_grid = np.arange(nu_max - 0.001, nu_max + 0.001,
                          delta_nu)  # magic numbers
    amp_grid = a_max**2 * np.exp(-(freq_grid - nu_max)**2 /
                                 (2. * c_env**2))  # amplitudes in m/s
    driving_amp_grid = np.sqrt(amp_grid * gamma * dt)
    log_S0_grid = [
        np.log(d**2 / (dt * o)) for o, d in zip(omega_grid, driving_amp_grid)
    ]
    with pm.Model() as model:
        kernel = None
        for o, lS in zip(omega_grid, log_S0_grid):
            if kernel is None:
                kernel = terms.SHOTerm(log_S0=lS,
                                       log_w0=np.log(o),
                                       log_Q=np.log(o / gamma))
            else:
                kernel += terms.SHOTerm(log_S0=lS,
                                        log_w0=np.log(o),
                                        log_Q=np.log(o / gamma))
        if integrated:
            kernel_int = terms.IntegratedTerm(kernel, exp_time)
            gp = GP(kernel_int, t, yerr**2)
        else:
            gp = GP(kernel, t, yerr**2)
        gp.condition(y)
        mu, var = xo.eval_in_model(gp.predict(t_grid, return_var=True))
        sd = np.sqrt(var)
        y_pred = xo.eval_in_model(gp.predict(t))
    return y_pred, mu, sd
Esempio n. 10
0
    def evaluate_model(self, test_t):
        """
        Evaluate the best-fit GP offset model.

        Args:
            test_t (array): The ordinate values to plot the prediction at.

        Returns:
            mu (array): The best-fit mean-function.
            var (array): The variance of the model
        """

        with self.model:
            mu, var = xo.eval_in_model(
                self.gp.predict(test_t, return_var=True), self.map_soln)
        return mu, var
Esempio n. 11
0
def test_get_log_probability_function():
    # Create trivial model
    with pm.Model() as model:
        # Define priors
        x1 = pm.Normal("x1", 5.0, 6.0, shape=(2,), testval=[1.46, 2.1])
        x2 = pm.Normal("x2", 1.5, 20, testval=2.37)
        x3 = pm.Normal("x3", 0.2, 5.4, testval=3.48)

        pm.Potential("log_likelihood", T.sum(x1) * x2 + x3)

    with model:
        logp = get_log_probability_function()

    with model:
        logp1 = eval_in_model(model.logpt)

    logp2 = logp([1.46, 2.1, 2.37, 3.48])

    assert logp1 == logp2
Esempio n. 12
0
def test_get_log_likelihood_function():
    # Create trivial model
    with pm.Model() as model:
        # Define priors
        x1 = pm.HalfCauchy("x1", beta=10, shape=(2,), testval=[1.46, 2.1])
        x2 = pm.Normal("x2", 0, sigma=20, testval=2.37)
        x3 = pm.Exponential("x3", 0.2, testval=3.48)

        pm.Potential("log_likelihood", T.sum(x1) * x2 + x3)

    with model:
        loglike = get_log_likelihood_function(model.log_likelihood)

    with model:
        ll1 = eval_in_model(model.log_likelihood)

    ll2 = loglike([1.46, 2.1, 2.37, 3.48])

    assert ll1 == ll2
Esempio n. 13
0
def sigma_clip():
    mask = np.ones(len(x), dtype=bool)
    num = len(mask)

    for i in range(10):
        model, map_soln = build_model(mask)

        with model:
            mdl = xo.eval_in_model(
                model.model_lc(x[mask]) + model.gp_lc.predict(), map_soln)

        resid = y[mask] - mdl
        sigma = np.sqrt(np.median((resid - np.median(resid))**2))
        mask[mask] = np.abs(resid - np.median(resid)) < 7 * sigma
        print("Sigma clipped {0} light curve points".format(num - mask.sum()))
        if num == mask.sum():
            break
        num = mask.sum()

    return model, map_soln
Esempio n. 14
0
 def optimize(self, start=None, mask=True, sigma=3, verbose=True):
     # Optimize
     print('Initial Optimization')
     map_soln0 = self._optimize(start=start, verbose=verbose)
     if mask:
         print('Second Optimization')
         eb = map_soln0['eb_model']
         with self._model:
             gp = xo.eval_in_model(self._model.gp.predict(np.asarray(self.lc.time, np.float64)), map_soln0)
         motion_model = map_soln0['motion_model']
         p, t0 = map_soln0['period'], map_soln0['t0']
         f = ((self.lc - motion_model - gp).fold(p, t0) - lk.LightCurve(self.lc.time, eb).fold(p, t0).flux)
         f1 = f.bin(15, 'median')
         f -= np.interp(f.time, f1.time, f1.flux)
         bad = np.abs(f.flux - np.median(f.flux)) > sigma * f.flux.std()
         bad = np.in1d(self.lc.time, f.time_original[bad])
         self._diag[bad] += 1e12
         self.bad = bad
         self.map_soln = self._optimize(start=map_soln0, verbose=verbose)
     else:
         self.map_soln = map_soln0
Esempio n. 15
0
def plot_sep_pa(trace, m):
    # plot separation and PA across the observed dates
    ts_obs = np.linspace(
        np.min(d.wds[0]) - 300, np.max(d.wds[0]) + 300, num=1000
    )  # days

    with m.model:
        predict_fine = m.orbit.get_relative_angles(ts_obs, m.parallax)

    # we can plot the maximum posterior solution to see
    # pkw = {'marker':".", "color":"k", 'ls':""}
    ekw = {"color": "C1", "marker": "o", "ls": ""}

    fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(6, 4))
    ax[0].set_ylabel(r'$\rho\,$ ["]')
    ax[1].set_ylabel(r"P.A. [radians]")
    ax[1].set_xlabel("JD [days]")

    for sample in xo.get_samples_from_trace(trace, size=20):

        # we'll want to cache these functions when we evaluate many samples
        rho_fine, theta_fine = xo.eval_in_model(
            predict_fine, point=sample, model=m.model
        )

        ax[0].plot(ts_obs, rho_fine, "C0")
        ax[1].plot(ts_obs, theta_fine, "C0")

    # get map sol for tot_rho_err
    tot_rho_err = np.sqrt(d.wds[2] ** 2 + np.exp(2 * np.median(trace["logRhoS"])))
    tot_theta_err = np.sqrt(d.wds[4] ** 2 + np.exp(2 * np.median(trace["logThetaS"])))

    # ax[0].plot(d.wds[0], d.wds[1], **pkw)
    ax[0].errorbar(d.wds[0], d.wds[1], yerr=tot_rho_err, **ekw)

    # ax[1].plot(jds, theta_data, **pkw)
    ax[1].errorbar(d.wds[0], d.wds[3], yerr=tot_theta_err, **ekw)

    return fig
Esempio n. 16
0
def evaluate_prediction_quantiles(event, pm_model, trace, t_grid, alert_time):
    """
    Evaluates median model prediciton in data space.
    """
    n_samples = 500

    samples = xo.get_samples_from_trace(trace, size=n_samples)

    with pm_model:
        # Compute the trajectory of the lens
        trajectory = ca.trajectory.Trajectory(
            event, alert_time + T.exp(pm_model.ln_delta_t0), pm_model.u0,
            pm_model.tE)
        u_dense = trajectory.compute_trajectory(t_grid)

        # Compute the magnification
        mag_dense = (u_dense**2 + 2) / (u_dense * T.sqrt(u_dense**2 + 4))

        F_base = 10**(-(pm_model.m_b - 22.0) / 2.5)

        # Compute the mean model
        prediction = pm_model.f * F_base * mag_dense + (1 -
                                                        pm_model.f) * F_base

    # Evaluate model for each sample on a fine grid
    n_pts_dense = T.shape(t_grid)[0].eval()
    n_bands = len(event.light_curves)

    prediction_eval = np.zeros((n_samples, n_bands, n_pts_dense))

    # Evaluate predictions in model context
    with pm_model:
        for i, sample in enumerate(samples):
            prediction_eval[i] = xo.eval_in_model(prediction, sample)

    for i in range(n_bands):
        q = np.percentile(prediction_eval[:, i, :], [16, 50, 84], axis=0)

    return q
Esempio n. 17
0

model, map_soln = sigma_clip()

period = map_soln["period"]
t0 = map_soln["t0"]
mean = map_soln["mean_rv"]

x_fold = (x_rv - t0 + 0.5 * period) % period - 0.5 * period
plt.plot(fold, y1_rv - mean, ".", label="primary")
plt.plot(fold, y2_rv - mean, ".", label="secondary")

x_phase = np.linspace(-0.5 * period, 0.5 * period, 500)
with model:
    y1_mod, y2_mod = xo.eval_in_model(
        [model.model_rv1(x_phase + t0),
         model.model_rv2(x_phase + t0)], map_soln)
plt.plot(x_phase, y1_mod - mean, "C0")
plt.plot(x_phase, y2_mod - mean, "C1")

plt.legend(fontsize=10)
plt.xlim(-0.5 * period, 0.5 * period)
plt.ylabel("radial velocity [km / s]")
plt.xlabel("time since primary eclipse [days]")
_ = plt.title("AK For; map model", fontsize=14)
plt.show()

with model:
    gp_pred = xo.eval_in_model(model.gp_lc.predict(),
                               map_soln) + map_soln["mean_lc"]
    lc = xo.eval_in_model(model.model_lc(model.x),
Esempio n. 18
0
def build_model(mask=None, start=None):

with pm.Model() as model:

	# The baseline flux
	mean = pm.Normal("mean", mu=0.0, sd=0.00001)

	# The time of a reference transit for each planet
	t0 = pm.Normal("t0", mu=t0s, sd=1.0, shape=1)

	# The log period; also tracking the period itself
	logP = pm.Normal("logP", mu=np.log(periods), sd=0.01, shape=1)

	rho_star = pm.Normal("rho_star", mu=0.14, sd=0.01, shape=1)
	r_star = pm.Normal("r_star", mu=2.7, sd=0.01, shape=1)

	period = pm.Deterministic("period", pm.math.exp(logP))

	# The Kipping (2013) parameterization for quadratic limb darkening paramters
	u = xo.distributions.QuadLimbDark("u", testval=np.array([0.3, 0.2]))

	r = pm.Uniform(
		"r", lower=0.01, upper=0.3, shape=1, testval=0.15)
	
	b = xo.distributions.ImpactParameter(
		"b", ror=r, shape=1, testval=0.5)
	
	# Transit jitter & GP parameters
	logs2 = pm.Normal("logs2", mu=np.log(np.var(y)), sd=10)
	logw0 = pm.Normal("logw0", mu=0, sd=10)
	logSw4 = pm.Normal("logSw4", mu=np.log(np.var(y)), sd=10)

	# Set up a Keplerian orbit for the planets
	orbit = xo.orbits.KeplerianOrbit(period=period, t0=t0, b=b, rho_star=rho_star,r_star=r_star)
	
	# Compute the model light curve using starry
	light_curves = xo.LimbDarkLightCurve(u).get_light_curve(
		orbit=orbit, r=r, t=t
	)
	light_curve = pm.math.sum(light_curves, axis=-1) + mean

	# Here we track the value of the model light curve for plotting
	# purposes
	pm.Deterministic("light_curves", light_curves)

	kernel = xo.gp.terms.SHOTerm(log_Sw4=logSw4, log_w0=logw0, Q=1 / np.sqrt(2))
	gp = xo.gp.GP(kernel, t, tt.exp(logs2) + tt.zeros(len(t)), mean=light_curve)
	gp.marginal("gp", observed=y)
	pm.Deterministic("gp_pred", gp.predict())

	# The likelihood function assuming known Gaussian uncertainty
	pm.Normal("obs", mu=light_curve, sd=yerr, observed=y)

	# Fit for the maximum a posteriori parameters given the simuated
	# dataset
	map_soln = xo.optimize(start=model.test_point)
	
	return model, map_soln
	
model, map_soln = build_model()

gp_mod = map_soln["gp_pred"] + map_soln["mean"]
plt.clf()
plt.plot(t, y, ".k", ms=4, label="data")
plt.plot(t, gp_mod, lw=1,label="gp model")
plt.plot(t, map_soln["light_curves"], lw=1,label="transit model")
plt.xlim(t.min(), t.max())
plt.ylabel("relative flux")
plt.xlabel("time [days]")
plt.legend(fontsize=10)
_ = plt.title("map model")

np.random.seed(42)
with model:
    trace = pm.sample(
        tune=3000,
        draws=3000,
        start=map_soln,
        cores=2,
        chains=2,
        step=xo.get_dense_nuts_step(target_accept=0.9),
    )
    
    
pm.summary(trace, varnames=["period", "t0", "r", "b", "u", "mean", "rho_star","logw0","logSw4","logs2"])


import corner

samples = pm.trace_to_dataframe(trace, varnames=["period", "r"])
truth = np.concatenate(
    xo.eval_in_model([period, r], model.test_point, model=model)
)
_ = corner.corner(
    samples,
    truths=truth,
    labels=["period 1", "radius 1"],
)


# Compute the GP prediction
gp_mod = np.median(trace["gp_pred"] + trace["mean"][:, None], axis=0)

# Get the posterior median orbital parameters
p = np.median(trace["period"])
t0 = np.median(trace["t0"])

# Plot the folded data
x_fold = (t - t0 + 0.5 * p) % p - 0.5 * p
plt.plot(x_fold, y - gp_mod, ".k", label="data", zorder=-1000)

# Overplot the phase binned light curve
bins = np.linspace(-0.41, 0.41, 50)
denom, _ = np.histogram(x_fold, bins)
num, _ = np.histogram(x_fold, bins, weights=y)
denom[num == 0] = 1.0
plt.plot(0.5 * (bins[1:] + bins[:-1]), num / denom, "o", color="C2", label="binned")

# Plot the folded model
inds = np.argsort(x_fold)
inds = inds[np.abs(x_fold)[inds] < 0.3]
pred = trace["light_curves"][:, inds, 0]
pred = np.percentile(pred, [16, 50, 84], axis=0)
plt.plot(x_fold[inds], pred[1], color="C1", label="model")
art = plt.fill_between(
    x_fold[inds], pred[0], pred[2], color="C1", alpha=0.5, zorder=1000
)
art.set_edgecolor("none")

# Annotate the plot with the planet's period
txt = "period = {0:.5f} +/- {1:.5f} d".format(
    np.mean(trace["period"]), np.std(trace["period"])
)
plt.annotate(
    txt,
    (0, 0),
    xycoords="axes fraction",
    xytext=(5, 5),
    textcoords="offset points",
    ha="left",
    va="bottom",
    fontsize=12,
)

plt.legend(fontsize=10, loc=4)
plt.xlim(-0.5 * p, 0.5 * p)
plt.xlabel("time since transit [days]")
plt.ylabel("de-trended flux")
plt.xlim(-0.3, 0.3);
Esempio n. 19
0
    # evaluate the model with the current parameter settings
    offset = offset_dict[label]
    d = data[1] - offset
    resid = d - model
    err = np.sqrt(data[2]**2 + np.exp(2 * err_dict[label]))

    color = color_dict[label]
    a.errorbar(phase, d, yerr=err, label=label, **ekw, color=color)
    a_r.errorbar(phase, resid, yerr=err, **ekw, color=color)


for sample in xo.get_samples_from_trace(trace, size=1):

    # we'll want to cache these functions when we evaluate many samples
    rv1_m = xo.eval_in_model(rv1, point=sample, model=m.model)
    rv2_m = xo.eval_in_model(rv2, point=sample, model=m.model)

    ax1.plot(xs_phase, rv1_m, **pkw)
    ax2.plot(xs_phase, rv2_m, **pkw)

    err_dict = {
        "CfA": sample["logjittercfa"],
        "Keck": sample["logjitterkeck"],
        "FEROS": sample["logjitterferos"],
        "du Pont": sample["logjitterdupont"],
    }
    offset_dict = {
        "CfA": 0.0,
        "Keck": sample["offsetKeck"],
        "FEROS": sample["offsetFeros"],
Esempio n. 20
0
        def run_fitting():
            times = self.light_curve_data_source.data['Time (BTJD)'].astype(
                np.float32)
            flux_errors = self.light_curve_data_source.data[
                'Normalized PDCSAP flux error']
            fluxes = self.light_curve_data_source.data[
                'Normalized PDCSAP flux']
            relative_times = self.light_curve_data_source.data['Time (days)']
            nan_indexes = np.union1d(
                np.argwhere(np.isnan(fluxes)),
                np.union1d(np.argwhere(np.isnan(times)),
                           np.argwhere(np.isnan(flux_errors))))
            fluxes = np.delete(fluxes, nan_indexes)
            flux_errors = np.delete(flux_errors, nan_indexes)
            times = np.delete(times, nan_indexes)
            relative_times = np.delete(relative_times, nan_indexes)
            with pm.Model() as model:
                # Stellar parameters
                mean = pm.Normal("mean", mu=0.0, sigma=10.0 * 1e-3)
                u = xo.distributions.QuadLimbDark("u")
                star_params = [mean, u]

                # Gaussian process noise model
                sigma = pm.InverseGamma("sigma",
                                        alpha=3.0,
                                        beta=2 * np.nanmedian(flux_errors))
                log_Sw4 = pm.Normal("log_Sw4", mu=0.0, sigma=10.0)
                log_w0 = pm.Normal("log_w0",
                                   mu=np.log(2 * np.pi / 10.0),
                                   sigma=10.0)
                kernel = xo.gp.terms.SHOTerm(log_Sw4=log_Sw4,
                                             log_w0=log_w0,
                                             Q=1.0 / 3)
                noise_params = [sigma, log_Sw4, log_w0]

                # Planet parameters
                log_ror = pm.Normal("log_ror",
                                    mu=0.5 * np.log(self_.depth),
                                    sigma=10.0 * 1e-3)
                ror = pm.Deterministic("ror", tt.exp(log_ror))
                depth = pm.Deterministic('Transit depth (relative flux)',
                                         tt.square(ror))
                planet_radius = pm.Deterministic('Planet radius (solar radii)',
                                                 ror * self_.star_radius)

                # Orbital parameters
                log_period = pm.Normal("log_period",
                                       mu=np.log(self_.period),
                                       sigma=1.0)
                t0 = pm.Normal('Transit epoch (BTJD)',
                               mu=self_.transit_epoch,
                               sigma=1.0)
                log_dur = pm.Normal("log_dur", mu=np.log(0.1), sigma=10.0)
                b = xo.distributions.ImpactParameter("b", ror=ror)

                period = pm.Deterministic('Transit period (days)',
                                          tt.exp(log_period))
                dur = pm.Deterministic('Transit duration (days)',
                                       tt.exp(log_dur))

                # Set up the orbit
                orbit = xo.orbits.KeplerianOrbit(period=period,
                                                 duration=dur,
                                                 t0=t0,
                                                 b=b,
                                                 r_star=self.star_radius)

                # We're going to track the implied density for reasons that will become clear later
                pm.Deterministic("rho_circ", orbit.rho_star)

                # Set up the mean transit model
                star = xo.LimbDarkLightCurve(u)

                def lc_model(t):
                    return mean + tt.sum(star.get_light_curve(
                        orbit=orbit, r=ror * self.star_radius, t=t),
                                         axis=-1)

                # Finally the GP observation model
                gp = xo.gp.GP(kernel,
                              times, (flux_errors**2) + (sigma**2),
                              mean=lc_model)
                gp.marginal("obs", observed=fluxes)

                # Double check that everything looks good - we shouldn't see any NaNs!
                print(model.check_test_point())

                # Optimize the model
                map_soln = model.test_point
                map_soln = xo.optimize(map_soln, [sigma])
                map_soln = xo.optimize(map_soln, [log_ror, b, log_dur])
                map_soln = xo.optimize(map_soln, noise_params)
                map_soln = xo.optimize(map_soln, star_params)
                map_soln = xo.optimize(map_soln)

            with model:
                gp_pred, lc_pred = xo.eval_in_model(
                    [gp.predict(), lc_model(times)], map_soln)

            x_fold = (times - map_soln['Transit epoch (BTJD)'] +
                      0.5 * map_soln['Transit period (days)']
                      ) % map_soln['Transit period (days)'] - 0.5 * map_soln[
                          'Transit period (days)']
            inds = np.argsort(x_fold)
            bokeh_document.add_next_tick_callback(
                partial(update_initial_fit_figure, fluxes, gp_pred, inds,
                        lc_pred, map_soln, relative_times, times, x_fold))

            self.bokeh_document.add_next_tick_callback(
                partial(fit, self, map_soln, model))
Esempio n. 21
0
    # Also define the model on a fine grid as computed above (for plotting)
    rv_model_pred = get_rv_model(t, name="_pred")

    # Finally add in the observation model. This next line adds a new contribution
    # to the log probability of the PyMC3 model
    err = tt.sqrt(yerr**2 + tt.exp(2 * logs))
    pm.Normal("obs", mu=rv_model, sd=err, observed=y)
# -

# Now, we can plot the initial model:

# +
plt.errorbar(x, y, yerr=yerr, fmt=".k")

with model:
    plt.plot(t, xo.eval_in_model(model.vrad_pred), "--k", alpha=0.5)
    plt.plot(t, xo.eval_in_model(model.bkg_pred), ":k", alpha=0.5)
    plt.plot(t, xo.eval_in_model(model.rv_model_pred), label="model")

plt.legend(fontsize=10)
plt.xlim(t.min(), t.max())
plt.xlabel("time [days]")
plt.ylabel("radial velocity [m/s]")
_ = plt.title("initial model")
# -

# In this plot, the background is the dotted line, the individual planets are the dashed lines, and the full model is the blue line.
#
# It doesn't look amazing so let's fit for the maximum a posterior parameters.

with model:
Esempio n. 22
0
    rho_save_data = pm.Deterministic("rhoSaveData", rho)
    theta_save_data = pm.Deterministic("thetaSaveData", theta)

    # save RV plots
    t_dense = pm.Deterministic("tDense", xs_phase * P + jd0)
    rv1_dense = pm.Deterministic(
        "RV1Dense",
        conv * orbit.get_star_velocity(t_dense - jd0)[2] + gamma_keck)
    rv2_dense = pm.Deterministic(
        "RV2Dense",
        conv * orbit.get_planet_velocity(t_dense - jd0)[2] + gamma_keck)

fig, ax = plt.subplots(nrows=1)

with model:
    rho = xo.eval_in_model(rho_model, map_sol3)
    theta = xo.eval_in_model(theta_model, map_sol3)

    # plot the data
    xs = rho_data * np.cos(theta_data)  # X is north
    ys = rho_data * np.sin(theta_data)  # Y is east
    ax.plot(ys, xs, "ko")

    # plot the orbit
    xs = rho * np.cos(theta)  # X is north
    ys = rho * np.sin(theta)  # Y is east
    ax.plot(ys, xs, ".")

    ax.set_ylabel(r"$\Delta \delta$ ['']")
    ax.set_xlabel(r"$\Delta \alpha \cos \delta$ ['']")
    ax.invert_xaxis()
Esempio n. 23
0
def joint_fit(tpf,
              period_value,
              t0_value,
              depth_value,
              duration_value,
              R_star,
              M_star,
              T_star,
              aperture=None,
              texp=0.0204335,
              return_quick_corrected=False,
              return_soln=False,
              trim=0):
    shape = len(period_value)

    planet_mask = np.ones(len(tpf.time), bool)
    for p, t, d in zip(period_value, t0_value, duration_value):
        planet_mask &= np.abs((tpf.time - t + 0.5 * p) % p - 0.5 * p) > d / 2

    if aperture is None:
        aperture = tpf.pipeline_mask

    time = np.asarray(tpf.time, np.float64)
    if trim > 0:
        flux = np.asarray(tpf.flux[:, trim:-trim, trim:-trim], np.float64)
        flux_err = np.asarray(tpf.flux_err[:, trim:-trim, trim:-trim],
                              np.float64)
        aper = np.asarray(aperture, bool)[trim:-trim, trim:-trim]
    else:
        flux = np.asarray(tpf.flux, np.float64)
        flux_err = np.asarray(tpf.flux_err, np.float64)
        aper = np.asarray(aperture, bool)

    raw_flux = np.asarray(np.nansum(flux[:, aper], axis=(1)), np.float64)
    raw_flux_err = np.asarray(
        np.nansum(flux_err[:, aper]**2, axis=(1))**0.5, np.float64)

    raw_flux_err /= np.median(raw_flux)
    raw_flux /= np.median(raw_flux)
    raw_flux -= 1

    # Setting to Parts Per Thousand keeps us from hitting machine precision errors...
    raw_flux *= 1e3
    raw_flux_err *= 1e3

    # Build the first order PLD basis
    #    X_pld = np.reshape(flux[:, aper], (len(flux), -1))
    saturation = (np.nanpercentile(flux, 95, axis=0) > 175000)
    X_pld = np.reshape(flux[:, aper & ~saturation], (len(tpf.flux), -1))

    extra_pld = np.zeros((len(time), np.any(saturation, axis=0).sum()))
    idx = 0
    for column in saturation.T:
        if column.any():
            extra_pld[:, idx] = np.sum(flux[:, column, :], axis=(1, 2))
            idx += 1
    X_pld = np.hstack([X_pld, extra_pld])

    # Remove NaN pixels
    X_pld = X_pld[:, ~((~np.isfinite(X_pld)).all(axis=0))]
    X_pld = X_pld / np.sum(flux[:, aper], axis=-1)[:, None]

    # Build the second order PLD basis and run PCA to reduce the number of dimensions
    X2_pld = np.reshape(X_pld[:, None, :] * X_pld[:, :, None], (len(flux), -1))
    # Remove NaN pixels
    X2_pld = X2_pld[:, ~((~np.isfinite(X2_pld)).all(axis=0))]
    U, _, _ = np.linalg.svd(X2_pld, full_matrices=False)
    X2_pld = U[:, :X_pld.shape[1]]

    ## Construct the design matrix and fit for the PLD model
    X_pld = np.concatenate((X_pld, X2_pld), axis=-1)

    def build_model(mask=None, start=None):
        ''' Build a PYMC3 model

        Parameters
        ----------
        mask : np.ndarray
            Boolean array to mask cadences. Cadences that are False will be excluded
            from the model fit
        start : dict
            MAP Solution from exoplanet

        Returns
        -------
        model : pymc3.model.Model
            A pymc3 model
        map_soln : dict
            Best fit solution
        '''

        if mask is None:
            mask = np.ones(len(time), dtype=bool)

        with pm.Model() as model:

            # Parameters for the stellar properties
            mean = pm.Normal("mean", mu=0.0, sd=10.0)
            u_star = xo.distributions.QuadLimbDark("u_star")

            m_star = pm.Normal("m_star", mu=M_star[0], sd=M_star[1])
            r_star = pm.Normal("r_star", mu=R_star[0], sd=R_star[1])
            t_star = pm.Normal("t_star", mu=T_star[0], sd=T_star[1])

            # Prior to require physical parameters
            pm.Potential("m_star_prior", tt.switch(m_star > 0, 0, -np.inf))
            pm.Potential("r_star_prior", tt.switch(r_star > 0, 0, -np.inf))

            # Orbital parameters for the planets
            logP = pm.Normal("logP",
                             mu=np.log(period_value),
                             sd=0.01,
                             shape=shape)
            t0 = pm.Normal("t0", mu=t0_value, sd=0.01, shape=shape)
            b = pm.Uniform("b", lower=0, upper=1, testval=0.5, shape=shape)
            logr = pm.Normal("logr",
                             sd=1.0,
                             mu=0.5 * np.log(np.array(depth_value)) +
                             np.log(R_star[0]),
                             shape=shape)
            r_pl = pm.Deterministic("r_pl", tt.exp(logr))
            ror = pm.Deterministic("ror", r_pl / r_star)

            # Tracking planet parameters
            period = pm.Deterministic("period", tt.exp(logP))

            # Orbit model
            orbit = xo.orbits.KeplerianOrbit(r_star=r_star,
                                             m_star=m_star,
                                             period=period,
                                             t0=t0,
                                             b=b)

            incl = pm.Deterministic('incl', orbit.incl)
            a = pm.Deterministic('a', orbit.a)
            teff = pm.Deterministic('teff', t_star * tt.sqrt(0.5 * (1 / a)))

            # Compute the model light curve using starry
            light_curves = xo.StarryLightCurve(u_star).get_light_curve(
                orbit=orbit, r=r_pl, t=time[mask], texp=texp) * 1e3
            light_curve = pm.math.sum(light_curves, axis=-1) + mean
            pm.Deterministic("light_curves", light_curves)

            # GP
            # --------
            logs2 = pm.Normal("logs2",
                              mu=np.log(1e-4 * np.var(raw_flux[mask])),
                              sd=10)
            logsigma = pm.Normal("logsigma",
                                 mu=np.log(np.std(raw_flux[mask])),
                                 sd=10)
            logrho = pm.Normal("logrho", mu=np.log(150), sd=10)
            kernel = xo.gp.terms.Matern32Term(log_rho=logrho,
                                              log_sigma=logsigma)
            gp = xo.gp.GP(kernel, time[mask],
                          tt.exp(logs2) + raw_flux_err[mask]**2)

            # Motion model
            #------------------
            A = tt.dot(X_pld[mask].T, gp.apply_inverse(X_pld[mask]))
            B = tt.dot(X_pld[mask].T, gp.apply_inverse(raw_flux[mask, None]))
            C = tt.slinalg.solve(A, B)
            motion_model = pm.Deterministic("motion_model",
                                            tt.dot(X_pld[mask], C)[:, 0])

            # Likelihood
            #------------------
            pm.Potential("obs",
                         gp.log_likelihood(raw_flux[mask] - motion_model))

            # gp predicted flux
            gp_pred = gp.predict()
            pm.Deterministic("gp_pred", gp_pred)
            pm.Deterministic("weights", C)

            # Optimize
            #------------------
            if start is None:
                start = model.test_point

            map_soln = xo.optimize(start=start, vars=[logrho, logsigma])
            map_soln = xo.optimize(start=start, vars=[logr])
            map_soln = xo.optimize(start=map_soln, vars=[logs2])
            map_soln = xo.optimize(start=map_soln,
                                   vars=[logrho, logsigma, logs2, logr])
            map_soln = xo.optimize(start=map_soln, vars=[mean, logr])
            map_soln = xo.optimize(start=map_soln, vars=[logP, t0])
            map_soln = xo.optimize(start=map_soln, vars=[b])
            map_soln = xo.optimize(start=map_soln, vars=[u_star])
            map_soln = xo.optimize(start=map_soln,
                                   vars=[logrho, logsigma, logs2])
            map_soln = xo.optimize(start=map_soln)

            return model, map_soln, gp

    with silence():
        model0, map_soln0, gp = build_model()

    # Remove outliers
    with model0:
        motion = np.dot(X_pld, map_soln0['weights']).reshape(-1)
        stellar = xo.eval_in_model(gp.predict(time), map_soln0)
        corrected = raw_flux - motion - stellar
        mask = ~sigma_clip(corrected, sigma=5).mask
        mask = ~(convolve(mask, Box1DKernel(5), fill_value=1) != 1)
        mask |= (~planet_mask)

    with silence():
        model, map_soln, gp = build_model(start=map_soln0, mask=mask)

    lc_fig = _plot_light_curve(map_soln, model, mask, time, raw_flux,
                               raw_flux_err, X_pld, gp)

    if return_soln:
        motion = np.dot(X_pld, map_soln['weights']).reshape(-1)
        with model:
            stellar = xo.eval_in_model(gp.predict(time), map_soln)
        return model, map_soln, motion, stellar

    if return_quick_corrected:
        raw_lc = tpf.to_lightcurve()
        clc = lk.KeplerLightCurve(
            time=time,
            flux=(raw_flux - stellar - motion) * 1e-3 + 1,
            flux_err=(raw_flux_err) * 1e-3,
            time_format=raw_lc.time_format,
            centroid_col=tpf.estimate_centroids()[0],
            centroid_row=tpf.estimate_centroids()[0],
            quality=raw_lc.quality,
            channel=raw_lc.channel,
            campaign=raw_lc.campaign,
            quarter=raw_lc.quarter,
            mission=raw_lc.mission,
            cadenceno=raw_lc.cadenceno,
            targetid=raw_lc.targetid,
            ra=raw_lc.ra,
            dec=raw_lc.dec,
            label='{} PLD Corrected'.format(raw_lc.targetid))
        return clc

    return model, map_soln, gp, X_pld, time, raw_flux, raw_flux_err, mask
Esempio n. 24
0
            break
        ntot = mask.sum()

    return model


model = build_model_with_sigma_clipping()

# %% [markdown]
# Now, after building the model, clipping outliers, and optimizing to estimate the maximum a posteriori (MAP) parameters, we can visualize our initial fit.

# %%
with model:
    gp_pred, lc_pred = xo.eval_in_model(
        [model.gp.predict(),
         model.lc_model.light_curves(model.x)],
        model.map_soln,
    )

for n in range(num_toi):
    t0 = model.map_soln["t0"][n]
    period = model.map_soln["period"][n]
    x_fold = (model.x - t0 + 0.5 * period) % period - 0.5 * period

    plt.figure(figsize=(8, 4))
    plt.scatter(x_fold,
                model.y - gp_pred - model.map_soln["mean"],
                c=model.x,
                s=3)

    inds = np.argsort(x_fold)
Esempio n. 25
0
        log_Q = pm.Uniform("log_Q", lower=np.log(2), upper=np.log(10))

        # Set up the kernel an GP
        kernel = terms.SHOTerm(S_tot=S1, w0=w1, Q=1.0 / np.sqrt(2))
        kernel += terms.SHOTerm(S_tot=S2, w0=w2, log_Q=log_Q)
        gp = GP(kernel, t, yerr**2, mean=mean)

        # Condition the GP on the observations and add the marginal likelihood
        # to the model
        gp.marginal("gp", observed=y)

    with model:
        map_soln = xo.optimize(start=model.test_point)

    with model:
        mu, var = xo.eval_in_model(
            gp.predict(true_t, return_var=True, predict_mean=True), map_soln)

    # Plot the prediction and the 1-sigma uncertainty
    plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0, label="data")
    plt.plot(true_t, true_y, "k", lw=1.5, alpha=0.3, label="truth")

    sd = np.sqrt(var)
    art = plt.fill_between(true_t, mu + sd, mu - sd, color="C1", alpha=0.3)
    art.set_edgecolor("none")
    plt.plot(true_t, mu, color="C1", label="prediction")

    plt.legend(fontsize=12)
    plt.xlabel("t")
    plt.ylabel("y")
    plt.xlim(0, 10)
    _ = plt.ylim(-2.5, 2.5)
Esempio n. 26
0
    kernel += terms.SHOTerm(S_tot=S2, w0=w2, log_Q=log_Q)
    gp = GP(kernel, t, yerr ** 2, mean=mean)

    # Condition the GP on the observations and add the marginal likelihood
    # to the model
    gp.marginal("gp", observed=y)
  
pm.summary(trace, varnames=["S1", "S2", "w1", "w2"])
  
  
with model:
    map_soln = xo.optimize(start=model.test_point)
    
with model:
    mu, var = xo.eval_in_model(
        gp.predict(t, return_var=True, predict_mean=True), map_soln
    )
    
plt.ion()
plt.clf()
plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0, label="data")
# Plot the prediction and the 1-sigma uncertainty
sd = np.sqrt(var)
art = plt.fill_between(t, mu + sd, mu - sd, color="C1", alpha=0.3)
art.set_edgecolor("none")
plt.plot(t, mu, color="C1", label="prediction")




Esempio n. 27
0
    # evaluate the model with the current parameter settings
    offset = offset_dict[label]
    d = data[1] - offset
    resid = d - model
    err = np.sqrt(data[2]**2 + np.exp(2 * err_dict[label]))

    color = color_dict[label]
    a.errorbar(phase, d, yerr=err, label=label, **ekw, color=color)
    a_r.errorbar(phase, resid, yerr=err, **ekw, color=color)


# just use model.test_point
for sample in xo.get_samples_from_trace(trace, size=1):

    # we'll want to cache these functions when we evaluate many samples
    rv1_m = xo.eval_in_model(rv1, point=sample, model=m.model)
    rv2_m = xo.eval_in_model(rv2, point=sample, model=m.model)

    ax1.plot(xs_phase, rv1_m, **pkw)
    ax2.plot(xs_phase, rv2_m, **pkw)

    err_dict = {
        "CfA": sample["logjittercfa"],
        "Keck": sample["logjitterkeck"],
        "FEROS": sample["logjitterferos"],
        "du Pont": sample["logjitterdupont"],
    }
    offset_dict = {
        "CfA": 0.0,
        "Keck": sample["offsetKeck"],
        "FEROS": sample["offsetFeros"],
Esempio n. 28
0
ms = Maelstrom(LC.time, LC.flux, max_peaks=5, fmin=5, fmax=48)
# ms.first_look()

print(f"Oscillation modes are at {ms.freq}")

period_guess = ms.get_period_estimate()
print(f"The orbital period is around {period_guess:.2f} days")

ms.setup_orbit_model(period=period_guess)
opt = ms.optimize()

td_time, td_td = ms.get_time_delay()
td_average = np.average(td_td, axis=-1, weights=ms.get_weights())

plt.figure(figsize=(8, 8))
with ms:
    plt.plot(ms.time,
             xo.eval_in_model(ms.lc_model, opt),
             c='blue',
             linewidth=0.5,
             label='Maelstrom')
    plt.plot(ms.time, ms.flux, '.k', label='Data')
    plt.xlim(200, 205)
    plt.ylim(-4, 4)
    plt.xlabel('Time [day]', fontsize=16)
    plt.ylabel('Flux [ppt]', fontsize=16)
    plt.title('Optimized Light Curve', fontsize=16)
    plt.legend()
    plt.savefig('lightcurve_opt.png')
Esempio n. 29
0
    # Set up the RV model and save it as a deterministic
    # for plotting purposes later
    vrad = orbit.get_radial_velocity(x, K=tt.exp(logK))
    if N_pl == 1:
        vrad = vrad[:, None]

    # Define the background model
    A = np.vander(x - 0.5*(x.min() + x.max()), 3)
    bkg = tt.dot(A, trend)

    # Sum over planets and add the background to get the full model
    rv_model = tt.sum(vrad, axis=-1) + bkg

    # Simulate the data
    y_true = xo.eval_in_model(rv_model)
    y = y_true + yerr * np.random.randn(len(yerr))

    # Compute the prediction
    vrad_pred = orbit.get_radial_velocity(t, K=tt.exp(logK))
    if N_pl == 1:
        vrad_pred = vrad_pred[:, None]
    A_pred = np.vander(t - 0.5*(x.min() + x.max()), 3)
    bkg_pred = tt.dot(A_pred, trend)
    rv_model_pred = tt.sum(vrad_pred, axis=-1) + bkg_pred

    # Likelihood
    err = yerr
    # err = tt.sqrt(yerr**2 + tt.exp(2*logs))
    pm.Normal("obs", mu=rv_model, sd=err, observed=y)
Esempio n. 30
0
    # The spectral basis
    mu_vT = np.ones(K)
    cov_vT = 1e-2 * np.eye(K)
    vT = pm.MvNormal("vT", mu_vT, cov_vT, shape=(K, ))
    vT = tt.reshape(vT, (1, K))

    # Compute the model
    uvT = tt.reshape(tt.dot(u, vT), (N * K, 1))
    f_model = tt.reshape(ts.dot(D, uvT), (M * Kobs, ))

    # Track some values for plotting later
    pm.Deterministic("f_model", f_model)

    # Save our initial guess
    f_model_guess = xo.eval_in_model(f_model)

    # The likelihood function assuming known Gaussian uncertainty
    pm.Normal("obs", mu=f_model, sd=ferr, observed=f)

# Maximum likelihood solution
with model:
    map_soln = xo.optimize()

# Plot some stuff
fig, ax = plt.subplots(1)
ax.plot(lam, I0)
ax.plot(lam, map_soln["vT"].reshape(-1), 'o')

fig, ax = plt.subplots(M, figsize=(3, 8), sharex=True, sharey=True)
F = f.reshape(M, Kobs)
Esempio n. 31
0
prange
pshape
psum
make_file_path


'''
import numpy as np
import pymc3 as pm
import exoplanet as xo

# flatten lists
flatten = lambda l: [item for sublist in l for item in sublist]

# print in pymc3 model
pmprint = lambda x: print(xo.eval_in_model(x))
pmpshape = lambda x: print(np.shape(xo.eval_in_model(x)))

# useful print statements
prange = lambda _: print('min:', np.min(_), 'max:', np.max(_))
pshape = lambda _: print(np.shape(_))
psum = lambda _: print(np.sum(_))


## make file path names
def make_file_path(directory, array_kwargs, extra_string=None, ext='.dat'):
    s = '_'
    string_kwargs = [str(int(i)) for i in array_kwargs]
    string_kwargs = np.array(string_kwargs, dtype='U45')
    if (extra_string != None) and (len(extra_string) > 45):
        raise TypeError('Extra string must have less than 45 characters')