示例#1
0
文件: k2.py 项目: pombreda/ketu
def prepare_characterization(light_curve_file, basis_file,
                             periods, time0s, rors, impacts,
                             es=None):
    # Download and process the light curves.
    pipe = K2Data(cache=False)
    pipe = K2Likelihood(pipe, cache=False)
    query = dict(
        basis_file=os.path.abspath(basis_file),
        light_curve_file=os.path.abspath(light_curve_file)
    )
    r = pipe.query(**query)

    lc = r.model_light_curves[0]

    # Set up the initial system model.
    star = transit.Central()
    s = transit.System(star)
    for i in range(len(periods)):
        planet = transit.Body(r=rors[i],
                              period=periods[i],
                              t0=time0s[i] % periods[i],
                              b=impacts[i],
                              e=0.0 if es is None else es[i])
        s.add_body(planet)

    return ProbabilisticModel(lc, s)
示例#2
0
文件: inject.py 项目: pombreda/ketu
    def get_result(self, query, parent_response):
        # Parse the arguments.
        injections = query["injections"]
        if not len(injections):
            return dict(target_datasets=parent_response.target_datasets)

        # Build the system.
        q1 = query["q1"]
        q2 = query["q2"]
        mstar = query["mstar"]
        rstar = query["rstar"]
        s = transit.System(transit.Central(q1=q1, q2=q2, mass=mstar,
                                           radius=rstar))

        # Loop over injected bodies and add them to the system.
        for inj in injections:
            body = transit.Body(r=inj["radius"], period=inj["period"],
                                t0=inj["t0"], b=inj.get("b", 0.0),
                                e=inj.get("e", 0.0),
                                pomega=inj.get("pomega", 0.0))
            s.add_body(body)
            try:
                body.ix
            except ValueError:
                logging.warn("Removing planet with invalid impact parameter")
                s.bodies.pop()

        # Inject the transit into each dataset.
        results = []
        for _ in parent_response.target_datasets:
            lc = InjectedLightCurve(_)
            lc.flux[lc.m] *= s.light_curve(lc.time[lc.m])
            results.append(lc)

        return dict(target_datasets=results, injected_system=s)
示例#3
0
 def __init__(self, fn, median=True):
     self.t, self.f, self.fe, self.truth = load_data(fn, median)
     self.ivar = 1.0 / self.fe**2
     self.central = transit.Central(q1=self.truth["q1"],
                                    q2=self.truth["q2"])
     self.system = transit.System(self.central)
     self.body = transit.Body(period=self.truth["period"],
                              r=self.truth["r"],
                              b=self.truth["b"],
                              t0=self.truth["t0"])
     self.system.add_body(self.body)
示例#4
0
def lightcurve(star, planet, t):
    """
    Generate a light curve for the given values.
    Star    - [mass,radius] in stellar radii
    Planet  - [radius,mass,period,t0]
    t       - the array for time points from the data
    """

    # Build the transiting system.
    star = transit.Central(star[0], star[1])

    s = transit.System(star)

    #body = transit.Body(r=0.11/0.74,m=0.002375/0.74,period=1.337,to=0.99,b=0.2,e=0.0167)
    body = transit.Body(r=planet[0],
                        mass=planet[1],
                        period=planet[2],
                        t0=planet[3],
                        b=0.029)
    s.add_body(body)

    # Compute the light curve for given values of t
    f = s.light_curve(t)

    return f


#lightcurve([0.74,0.713],[0.1,0.001,1.33712,1], np.arange(0,2,1e-4))
#
#star = transit.Central(0.74,0.713)
#s = transit.System(star)
#body = transit.Body(r=0.1,mass=0.01, period=1.33712, t0=0, b=0)
#s.add_body(body)
#
#t = np.arange(0, 2, 1e-4)
#
#f = s.light_curve(t)
示例#5
0
rstar = float(cand.radius)
rstar_err1 = float(cand.radius_err1)
rstar_err2 = float(cand.radius_err2)
ln_rstar = np.log(rstar)
ln_rstar_err = np.mean([np.log(rstar + rstar_err1) - np.log(rstar),
                        np.log(rstar) - np.log(rstar + rstar_err2)])

mstar = float(cand.mass)
mstar_err1 = float(cand.mass_err1)
mstar_err2 = float(cand.mass_err2)
ln_mstar = np.log(mstar)
ln_mstar_err = np.mean([np.log(mstar + mstar_err1) - np.log(mstar),
                        np.log(mstar) - np.log(mstar + mstar_err2)])

# Set up the model.
system = transit.System(transit.Central(mass=mstar, radius=rstar))
system.add_body(transit.Body(
    period=period, r=1.3*rp, b=b, t0=t0,
))

# Load the light curves.
lcs = peerless.data.load_light_curves_for_kic(kicid, min_break=10)
texp = lcs[0].texp
t = np.concatenate([lc.time for lc in lcs])
f = np.concatenate([lc.flux for lc in lcs])
fe = np.concatenate([lc.yerr + np.zeros_like(lc.time) for lc in lcs])

# Find the minimum allowed period.
m = np.isfinite(t)
min_period = max(t[m].max() - t0, t0 - t[m].min())
示例#6
0
文件: prepare.py 项目: pombreda/ketu
def prepare_characterization(kicid, periods, time0s, rors, impacts,
                             es=None,
                             data_window_hw=3.0, min_data_window_hw=0.5):
    # Download and process the light curves.
    pipe = Download()
    pipe = Prepare(pipe)
    pipe = Discontinuity(pipe)
    r = pipe.query(kicid=kicid)

    # Find the data chunks that hit a transit.
    lcs = []
    for lc in r.light_curves:
        # Build the mask of times that hit transits.
        m = np.zeros_like(lc.time, dtype=bool)
        mmin = np.zeros_like(lc.time, dtype=bool)
        for p, t0 in zip(periods, time0s):
            hp = 0.5 * p
            t0 = t0 % p
            dt = np.abs((lc.time - t0 + hp) % p - hp)
            m += dt < data_window_hw
            mmin += dt < min_data_window_hw

        # Trim the dataset and set up the Gaussian Process model.
        if np.any(mmin) and np.sum(m) > 10:
            # Re-normalize the trimmed light curve.
            mu = np.median(lc.flux[m])
            lc.time = np.ascontiguousarray(lc.time[m])
            lc.flux = np.ascontiguousarray(lc.flux[m] / mu)
            lc.ferr = np.ascontiguousarray(lc.ferr[m] / mu)

            # Make sure that the light curve knows its integration time.
            lc.texp = kplr.EXPOSURE_TIMES[1] / 86400.0

            # Heuristically guess the Gaussian Process parameters.
            lc.factor = 1000.0
            amp = np.median((lc.factor * (lc.flux-1.0))**2)
            kernel = amp*kernels.Matern32Kernel(4.0)
            lc.gp = george.GP(kernel)

            # Run an initial computation of the GP.
            lc.gp.compute(lc.time, lc.ferr * lc.factor)

            # Save this light curve.
            lcs.append(lc)

    # Set up the initial system model.
    spars = r.star.huber
    star = transit.Central(mass=spars.M, radius=spars.R)
    s = transit.System(star)
    for i in range(len(periods)):
        planet = transit.Body(r=rors[i] * star.radius,
                              period=periods[i],
                              t0=time0s[i] % periods[i],
                              b=impacts[i],
                              e=0.0 if es is None else es[i])
        s.add_body(planet)

    # Approximate the stellar mass and radius measurements as log-normal.
    q = np.array(spars[["R", "E_R", "e_R"]], dtype=float)
    lnsr = (np.log(q[0]),
            1.0 / np.mean([np.log(q[0] + q[1]) - np.log(q[0]),
                           np.log(q[0]) - np.log(q[0] - q[2])]) ** 2)
    q = np.array(spars[["M", "E_M", "e_M"]], dtype=float)
    lnsm = (np.log(q[0]),
            1.0 / np.mean([np.log(q[0] + q[1]) - np.log(q[0]),
                           np.log(q[0]) - np.log(q[0] - q[2])]) ** 2)

    return ProbabilisticModel(lcs, s, lnsr, lnsm)
示例#7
0
def simulation_system(smass, srad, q1, q2, period, t0, rp, b, e, pomega):
    s = transit.System(transit.Central(mass=smass, radius=srad, q1=q1, q2=q2))
    s.add_body(
        transit.Body(period=period, t0=t0, r=rp, b=b, e=e, pomega=pomega))
    return s
示例#8
0
文件: fit.py 项目: mbadenas/peerless
def setup_fit(args, fit_kois=False, max_points=300):
    kicid = args["kicid"]

    # Initialize the system.
    system = transit.System(
        transit.Central(
            flux=1.0,
            radius=args["srad"],
            mass=args["smass"],
            q1=0.5,
            q2=0.5,
        ))
    system.add_body(
        transit.Body(
            radius=args["radius"],
            period=args["period"],
            t0=args["t0"],
            b=args["impact"],
            e=1.123e-7,
            omega=0.0,
        ))
    if fit_kois:
        kois = KOICatalog().df
        kois = kois[kois.kepid == kicid]
        for _, row in kois.iterrows():
            system.add_body(
                transit.Body(
                    radius=float(row.koi_ror) * args["srad"],
                    period=float(row.koi_period),
                    t0=float(row.koi_time0bk) % float(row.koi_period),
                    b=float(row.koi_impact),
                    e=1.123e-7,
                    omega=0.0,
                ))
    system.thaw_parameter("*")
    system.freeze_parameter("bodies*ln_mass")

    # Load the light curves.
    lcs, _ = load_light_curves_for_kic(kicid, remove_kois=not fit_kois)

    # Which light curves should be fit?
    fit_lcs = []
    other_lcs = []
    gps = []
    for lc in lcs:
        f = system.light_curve(lc.time, lc.texp)
        if np.any(f < 1.0):
            i = np.argmin(f)
            inds = np.arange(len(f))
            m = np.zeros(len(lc.time), dtype=bool)
            m[np.sort(np.argsort(np.abs(inds - i))[:max_points])] = True
            if np.any(f[~m] < system.central.flux):
                m = np.ones(len(lc.time), dtype=bool)
            lc.time = np.ascontiguousarray(lc.time[m])
            lc.flux = np.ascontiguousarray(lc.flux[m])
            lc.ferr = np.ascontiguousarray(lc.ferr[m])
            fit_lcs.append(lc)
            var = np.median((lc.flux - np.median(lc.flux))**2)
            kernel = 1e6 * var * kernels.Matern32Kernel(2**2)
            gp = george.GP(kernel,
                           white_noise=2 * np.log(np.mean(lc.ferr) * 1e3),
                           fit_white_noise=True)
            gp.compute(lc.time, lc.ferr * 1e3)
            gps.append(gp)
        else:
            other_lcs.append(lc)

    model = TransitModel(args, gps, system, args["smass"], args["smass_err"],
                         args["srad"], args["srad_err"], fit_lcs, other_lcs)
    return model
示例#9
0
def search(kicid_and_injection=None,
           lcs=None,
           tau=0.6,
           detrend_hw=2.0,
           remove_kois=True,
           grid_frac=0.25,
           noise_hw=15.0,
           detect_thresh=25.0,
           max_fit_data=500,
           max_peaks=3,
           min_datapoints=10,
           all_models=False,
           verbose=False):
    """
    :param tau:
        The transit duration. (default: 0.6)

    :param detrend_hw:
        Half width of running window for de-trending. (default: 2.0)

    :param remove_kois:
        Remove data points near known KOI transits. (default: True)

    :param grid_frac:
        The search grid spacing as a fraction of the duration. (default: 0.25)

    :param noise_hw:
        Half width of running window for noise estimation. (default: 15.0)

    :param detect_thresh:
        Relative S/N detection threshold. (default: 25.0)

    :param max_fit_data:
        The maximum number of data points for fitting. (default: 500)

    :param max_peaks:
        The maximum number of peaks to analyze in detail. (default: 3)

    :param min_datapoints:
        The minimum number of in-transit data points. (default: 10)

    :param verbose:
        Moar printing. (default: False)

    """
    if kicid_and_injection is not None:
        kicid, injection = kicid_and_injection
    else:
        kicid, injection = None, None
    inject = injection is not None

    system = None
    if inject:
        system = transit.System(
            transit.Central(q1=injection["q1"], q2=injection["q2"]))
        system.add_body(
            transit.Body(
                radius=injection["ror"],
                period=injection["period"],
                b=injection["b"],
                e=injection["e"],
                omega=injection["omega"],
                t0=injection["t0"],
            ))
        injection["ncadences"] = 0
        injection["recovered"] = False

    if lcs is None and kicid is None:
        raise ValueError("you must specify 'lcs' or 'kicid'")
    if lcs is None:
        lcs, ncad = load_light_curves_for_kic(kicid,
                                              detrend_hw=detrend_hw,
                                              remove_kois=remove_kois,
                                              inject_system=system)
        if inject:
            injection["ncadences"] += ncad

    else:
        kicid = "unknown-target"

    # Loop over light curves and search each one.
    time = []
    chunk = []
    depth = []
    depth_ivar = []
    s2n = []
    for i, lc in enumerate(lcs):
        time.append(np.arange(lc.time.min(), lc.time.max(), grid_frac * tau))
        d, ivar, s = cython_search(tau, time[-1], lc.time, lc.flux - 1,
                                   1 / lc.ferr**2)
        depth.append(d)
        depth_ivar.append(ivar)
        s2n.append(s)
        chunk.append(i + np.zeros(len(time[-1]), dtype=int))
    time = np.concatenate(time)
    chunk = np.concatenate(chunk)
    depth = np.concatenate(depth)
    depth_ivar = np.concatenate(depth_ivar)
    s2n = np.concatenate(s2n)

    # Compute the depth S/N time series and smooth it to estimate a background
    # noise level.
    m = depth_ivar > 0.0
    noise = np.nan + np.zeros_like(s2n)
    noise[m] = running_median_trend(time[m], np.abs(s2n[m]), noise_hw)

    results = SearchResults(kicid,
                            lcs,
                            tau,
                            detect_thresh,
                            time,
                            depth,
                            depth_ivar,
                            s2n,
                            noise,
                            injection=injection)

    # Find peaks above the fiducial threshold.
    m = s2n > detect_thresh * noise
    peaks = []
    while np.any(m):
        i = np.argmax(s2n[m])
        t0 = time[m][i]
        peaks.append(
            dict(
                kicid=kicid,
                t0=t0 + 0.5 * tau,
                s2n=s2n[m][i],
                bkg=noise[m][i],
                depth=depth[m][i],
                depth_ivar=depth_ivar[m][i],
                chunk=chunk[m][i],
            ))
        m &= np.abs(time - t0) > 2 * tau

    if verbose:
        print("Found {0} raw peaks".format(len(peaks)))

    if not len(peaks):
        return results

    for i, peak in enumerate(peaks):
        peak["num_peaks"] = len(peaks)
        peak["peak_id"] = i

    if verbose and len(peaks) > max_peaks:
        logging.warning("truncating peak list")
    peaks = peaks[:max_peaks]

    # For each peak, plot the diagnostic plots and vet.
    for i, peak in enumerate(peaks):
        # Vetting.
        t0 = peak["t0"]
        d = peak["depth"]
        chunk = peak["chunk"]
        lc0 = lcs[chunk]
        x = lc0.raw_time
        y = lc0.raw_flux
        yerr = lc0.raw_ferr
        ndata = np.sum(np.abs(x - t0) < 0.5 * tau)
        if ndata < min_datapoints:
            if verbose:
                logging.warning(
                    "there are only {0} data points in transit".format(ndata))
            continue

        # Limit number of data points in chunk.
        inds = np.sort(np.argsort(np.abs(t0 - x))[:max_fit_data])
        x = np.ascontiguousarray(x[inds])
        y = np.ascontiguousarray(y[inds])
        yerr = np.ascontiguousarray(yerr[inds])
        cen_x = np.ascontiguousarray(lc0.mom_cen_1[inds])
        cen_y = np.ascontiguousarray(lc0.mom_cen_2[inds])

        peak["data"] = (x, y, yerr, cen_x, cen_y)

        if verbose:
            print("{0} data points in chunk".format(len(x)))

        for k in [
                "channel", "skygroup", "module", "output", "quarter", "season"
        ]:
            peak[k] = lc0.meta[k]

        peak["chunk_min_time"] = x.min()
        peak["chunk_max_time"] = x.max()

        # Mean models:
        # 1. constant
        constant = np.median(y)

        # 2. transit
        m = np.abs(x - t0) < tau
        ind = np.arange(len(x))[m][np.argmin(y[m])]
        ror = np.sqrt(max(d, 1.0 - y[ind]))
        system = transit.SimpleSystem(
            period=3000.0,
            t0=x[ind],
            ror=ror,
            duration=tau,
            impact=0.5,
        )
        system.freeze_parameter("ln_period")
        system.freeze_parameter("q1_param")
        system.freeze_parameter("q2_param")
        best = (np.inf, system.t0, 0.0, ror)
        for dur in np.linspace(0.1 * tau, 2 * tau, 50):
            system.duration = dur
            for t0 in x[ind] + np.linspace(-0.1 * tau, 0.1 * tau, 7):
                system.t0 = t0
                system.ror = ror
                mu = system.get_value(x)
                A = np.concatenate((np.vander(x, 2).T, [mu]), axis=0).T
                try:
                    w = np.linalg.solve(np.dot(A.T, A), np.dot(A.T, y))
                except np.linalg.LinAlgError:
                    continue
                d = np.sum((y - np.dot(A, w))**2)
                if d < best[0]:
                    best = (d, t0, dur, np.sqrt(w[-1]) * ror)
        system.t0 = best[1]
        system.duration = best[2]
        system.ror = best[3]

        # 3. step
        best = (np.inf, 0, 0.0, 0.0)
        n = 2
        for ind in range(1, len(y) - 2 * n):
            a = slice(ind, ind + n)
            b = slice(ind + n, ind + 2 * n)
            step = StepModel(
                value1=np.mean(y[:ind]),
                value2=np.mean(y[ind + 2 * n:]),
                height1=np.mean(y[a]) - np.mean(y[:ind]),
                height2=np.mean(y[ind + 2 * n:]) - np.mean(y[b]),
                log_width_plus=0.0,
                log_width_minus=0.0,
                t0=0.5 * (x[ind + n - 1] + x[ind + n]),
            )

            best_minus = (np.inf, 0.0)
            m = x < step.t0
            for w in np.linspace(-4, 2, 20):
                step.log_width_minus = w
                d = np.sum((y[m] - step.get_value(x[m]))**2)
                if d < best_minus[0]:
                    best_minus = (d, w)

            best_plus = (np.inf, 0.0)
            m = x >= step.t0
            for w in np.linspace(-4, 2, 20):
                step.log_width_plus = w
                d = np.sum((y[m] - step.get_value(x[m]))**2)
                if d < best_plus[0]:
                    best_plus = (d, w)

            d = best_minus[0] + best_plus[0]
            if d < best[0]:
                best = (d, ind, best_minus[1], best_plus[1])

        _, ind, wm, wp = best
        a = slice(ind, ind + n)
        b = slice(ind + n, ind + 2 * n)
        step = StepModel(
            value1=np.mean(y[:ind]),
            value2=np.mean(y[ind + 2 * n:]),
            height1=np.mean(y[a]) - np.mean(y[:ind]),
            height2=np.mean(y[ind + 2 * n:]) - np.mean(y[b]),
            log_width_plus=wp,
            log_width_minus=wm,
            t0=0.5 * (x[ind + n - 1] + x[ind + n]),
        )
        check_gradient(step, x)

        # 4. box:
        inds = np.argsort(np.diff(y))
        inds = np.sort([inds[0], inds[-1]])
        boxes = []
        for tmn, tmx in (0.5 * (x[inds] + x[inds + 1]), (t0 - 0.5 * tau,
                                                         t0 + 0.5 * tau)):
            boxes.append(BoxModel(tmn, tmx, data=(x, y)))
            check_gradient(boxes[-1], x)

        # # 5. vee
        # vee = VeeModel(depth=1., t0=system.t0, log_b=-0.5,
        #                log_a=np.log(0.1/24.))
        # check_gradient(vee, x)

        # Loop over models and compare them.
        models = OrderedDict([
            ("transit", system),
            # ("vee", vee),
            ("box1", boxes[1]),
            ("step", step),
            ("gp", constant),
            ("box2", boxes[0]),
        ])
        peak["gps"] = OrderedDict()
        peak["pred_cens"] = []
        for name, mean_model in models.items():
            kernel = np.var(y) * kernels.Matern32Kernel(2**2)
            gp = george.GP(kernel,
                           mean=mean_model,
                           fit_mean=True,
                           white_noise=2 * np.log(np.mean(yerr)),
                           fit_white_noise=True)
            gp.compute(x, yerr)

            # Set up some bounds.
            bounds = gp.get_bounds()
            n = gp.get_parameter_names()

            if name == "transit":
                bounds[n.index("mean:ln_duration")] = np.log(
                    (system.duration / 2.0, system.duration * 2.0))
                bounds[n.index("mean:t0")] = (t0 - 0.5 * tau, t0 + 0.5 * tau)
                if "mean:impact" in n:
                    bounds[n.index("mean:impact")] = (0, 1.0)
                if "mean:q1_param" in n:
                    bounds[n.index("mean:q1_param")] = (-10, 10)
                    bounds[n.index("mean:q2_param")] = (-10, 10)

            bounds[n.index("kernel:k2:ln_M_0_0")] = (np.log(0.1), None)
            bounds[n.index("white:value")] = (2 *
                                              np.log(0.5 * np.median(yerr)),
                                              None)
            bounds[n.index("kernel:k1:ln_constant")] = \
                (2*np.log(np.median(yerr)), None)

            # Optimize.
            initial_vector = np.array(gp.get_vector())
            r = minimize(gp.nll,
                         gp.get_vector(),
                         jac=gp.grad_nll,
                         args=(y, ),
                         method="L-BFGS-B",
                         bounds=bounds)
            gp.set_vector(r.x)
            peak["gps"][name] = (gp, y)

            # Compute the -0.5*BIC.
            peak["lnlike_{0}".format(name)] = -r.fun
            peak["bic_{0}".format(
                name)] = -r.fun - 0.5 * len(r.x) * np.log(len(x))

            if verbose:
                print("Peak {0}:".format(i))
                print("Model: '{0}'".format(name))
                print("Converged: {0}".format(r.success))
                if not r.success:
                    print("Message: {0}".format(r.message))
                print("Log-likelihood: {0}".format(
                    peak["lnlike_{0}".format(name)]))
                print("-0.5 * BIC: {0}".format(peak["bic_{0}".format(name)]))
                print("Parameters:")
                for k, v0, v in zip(gp.get_parameter_names(), initial_vector,
                                    gp.get_vector()):
                    print("  {0}: {1:.4f} -> {2:.4f}".format(k, v0, v))
                print()

            # Initialize one of the boxes using the transit shape.
            if name == "transit" and np.any(system.get_value(x) < 1.0):
                depth = 1.0 - float(system.get_value(system.t0))

                models["box1"].mn = system.t0 - 0.5 * system.duration
                models["box1"].mx = system.t0 + 0.5 * system.duration

                # models["vee"].t0 = system.t0
                # models["vee"].log_b = np.log(0.5*system.duration)
                # models["vee"].depth = depth

                # Fit the centroids.
                tm = (1.0 - system.get_value(x)) / depth
                A = np.vander(tm, 2)
                AT = A.T

                offset = 0.0
                offset_err = 0.0
                for ind, c in enumerate((cen_x, cen_y)):
                    err = np.median(np.abs(np.diff(c)))
                    kernel = np.var(c) * kernels.Matern32Kernel(2**2)
                    gp = george.GP(kernel,
                                   white_noise=2 * np.log(np.mean(err)),
                                   fit_white_noise=True,
                                   mean=CentroidModel(tm, a=0.0, b=0.0),
                                   fit_mean=True)
                    gp.compute(x, err)

                    r = minimize(gp.nll,
                                 gp.get_vector(),
                                 jac=gp.grad_nll,
                                 args=(c - np.mean(c), ),
                                 method="L-BFGS-B")
                    gp.set_vector(r.x)

                    C = gp.get_matrix(x)
                    C[np.diag_indices_from(C)] += err**2
                    alpha = np.linalg.solve(C, A)
                    ATA = np.dot(AT, alpha)
                    mu = np.mean(c)
                    a = np.linalg.solve(C, c - mu)
                    w = None
                    for _ in range(10):
                        try:
                            w = np.linalg.solve(ATA, np.dot(AT, a))
                        except np.linalg.LinAlgError:
                            ATA[np.diag_indices_from(ATA)] *= 1 + 1e-5
                            w = None
                        else:
                            break
                    if w is None:
                        raise np.linalg.LinAlgError("Couldn't fix ATA")

                    offset += w[0]**2
                    offset_err = np.linalg.inv(ATA)[0, 0] * w[0]**2
                    peak["pred_cens"].append(np.dot(A, w) + mu)

                offset_err = np.sqrt(offset_err / offset)
                offset = np.sqrt(offset)

                peak["centroid_offset"] = offset
                peak["centroid_offset_err"] = offset_err

            elif name == "transit":
                peak["centroid_offset"] = 0.0
                peak["centroid_offset_err"] = np.inf

            if (peak["bic_{0}".format(name)] > peak["bic_transit"]
                    and not all_models):
                break

            # Deal with outliers.
            if name != "gp":
                continue
            N = len(r.x) + 1
            peak["lnlike_outlier"] = -r.fun
            peak["bic_outlier"] = -r.fun - 0.5 * N * np.log(len(x))
            best = (-r.fun, 0)
            for j in np.arange(len(x))[np.abs(x - t0) < 0.5 * tau]:
                y0 = np.array(y)
                y0[j] = np.median(y[np.arange(len(y)) != j])
                ll = gp.lnlikelihood(y0)
                if ll > best[0]:
                    best = (ll, j)

            # Optimize the outlier model:
            m = np.arange(len(y)) != best[1]
            y0 = np.array(y)
            y0[~m] = np.median(y0[m])
            kernel = np.var(y0) * kernels.Matern32Kernel(2**2)
            gp = george.GP(kernel,
                           mean=np.median(y0),
                           fit_mean=True,
                           white_noise=2 * np.log(np.mean(yerr)),
                           fit_white_noise=True)
            gp.compute(x, yerr)

            r = minimize(gp.nll,
                         gp.get_vector(),
                         jac=gp.grad_nll,
                         args=(y0, ),
                         method="L-BFGS-B",
                         bounds=bounds)
            gp.set_vector(r.x)
            peak["lnlike_outlier"] = -r.fun
            peak["bic_outlier"] = -r.fun - 0.5 * N * np.log(len(x))
            peak["gps"]["outlier"] = (gp, y0)

            if verbose:
                print("Peak {0}:".format(i))
                print("Model: 'outlier'")
                print("Converged: {0}".format(r.success))
                print("Log-likelihood: {0}".format(peak["lnlike_outlier"]))
                print("-0.5 * BIC: {0}".format(peak["bic_outlier"]))
                print("Parameters:")
                for k, v in zip(gp.get_parameter_names(), gp.get_vector()):
                    print("  {0}: {1:.4f}".format(k, v))
                print()

        # Save the transit parameters.
        peak["transit_impact"] = system.impact
        peak["transit_duration"] = system.duration
        peak["transit_ror"] = system.ror
        peak["transit_time"] = system.t0
        peak["transit_depth"] = 1.0 - float(system.get_value(system.t0))

        # Accept the peak?
        accept_bic = all(
            peak["bic_transit"] >= peak.get("bic_{0}".format(k), -np.inf)
            for k in models) and (peak["bic_transit"] > peak["bic_outlier"])
        accept_time = ((peak["transit_time"] - 1.0 * peak["transit_duration"] >
                        peak["chunk_min_time"]) and
                       (peak["transit_time"] + 1.0 * peak["transit_duration"] <
                        peak["chunk_max_time"]))
        accept = accept_bic and accept_time
        peak["accept_bic"] = accept_bic
        peak["accept_time"] = accept_time

        # Save the injected parameters.
        if inject:
            for k in ["t0", "period", "ror", "b", "e", "omega"]:
                peak["injected_{0}".format(k)] = injection[k]

            # Check for recovery.
            p = injection["period"]
            d = (peak["transit_time"] - injection["t0"] +
                 0.5 * p) % p - 0.5 * p
            peak["is_injection"] = np.abs(d) < peak["transit_duration"]
            results.injection["recovered"] |= accept

        # Save the peak.
        results.peaks.append(peak)

    return results
示例#10
0
from __future__ import division, print_function

import time
import numpy as np
import matplotlib.pyplot as pl

import emcee

from kplr import EXPOSURE_TIMES

import transit

texp = EXPOSURE_TIMES[1] / 86400.0

s = transit.System(transit.Central(dilution=0.05))
body = transit.Body(radius=0.2, mass=0.0, period=4.0, t0=2, b=0.0, e=0.4,
                    omega=0.5*np.pi + 0.01)
s.add_body(body)

s.thaw_parameter("*")
print(s.get_parameter_names())

x = np.linspace(0, 10.0, 1000)
yerr = 5e-4 * np.ones_like(x)
y = s.light_curve(x) + yerr * np.random.randn(len(x))

pl.plot(x, y, ".k")
pl.savefig("data.png")

p0 = s.get_vector()
示例#11
0
def inject(kicid, rng=6):
    # Download the data.
    client = kplr.API()
    kic = client.star(kicid)
    lcs = kic.get_light_curves(short_cadence=False)
    lc = lcs[np.random.randint(len(lcs))]

    # Read the data.
    data = lc.read()
    t = data["TIME"]
    f = data["SAP_FLUX"]
    fe = data["SAP_FLUX_ERR"]
    q = data["SAP_QUALITY"]

    # Remove missing points.
    m = np.isfinite(t) * np.isfinite(f) * np.isfinite(fe) * (q == 0)
    t, f, fe = t[m], f[m], fe[m]
    t -= t.min()

    # Build the transit system.
    s = transit.System(
        transit.Central(q1=np.random.rand(), q2=np.random.rand()))
    body = transit.Body(period=365.25,
                        b=np.random.rand(),
                        r=0.04,
                        t0=np.random.uniform(t.max()))
    s.add_body(body)

    # Compute the transit model.
    texp = kplr.EXPOSURE_TIMES[1] / 86400.0  # Long cadence exposure time.
    model = s.light_curve(t, texp=texp)
    f *= model

    # Trim the dataset to include data only near the transit.
    m = np.abs(t - body.t0) < rng
    t, f, fe = t[m], f[m], fe[m]
    t -= body.t0

    # Save the injection as a FITS light curve.
    dt = [("TIME", float), ("SAP_FLUX", float), ("SAP_FLUX_ERR", float)]
    data = np.array(zip(t, f, fe), dtype=dt)
    hdr = dict(b=body.b,
               period=body.period,
               r=body.r,
               t0=0.0,
               q1=s.central.q1,
               q2=s.central.q2)
    fitsio.write("{0}-injection.fits".format(kicid),
                 data,
                 header=hdr,
                 clobber=True)

    # Plot the light curve.
    ppm = (f / np.median(f) - 1) * 1e6
    fig = pl.figure(figsize=(6, 6))
    ax = fig.add_subplot(111)
    ax.plot(t, ppm, ".k")
    ax.set_xlim(-rng, rng)
    ax.set_xlabel("time since transit [days]")
    ax.set_ylabel("relative flux [ppm]")
    ax.set_title("raw light curve")
    fig.subplots_adjust(left=0.2, bottom=0.2, top=0.9, right=0.9)
    fig.savefig("{0}-raw.pdf".format(kicid))
示例#12
0
    # Throw away the datasets without transits.
    period, t0 = 295.963, 138.91
    hp = 0.5 * period
    selection = lambda lc: np.any(
        np.abs((lc[0] - t0 + hp) % period - hp) < 1.0)
    light_curves = filter(selection, light_curves)
    print(len(light_curves))

    # Plot the raw data.
    for lc in light_curves:
        pl.plot(lc[0], lc[1], ".", ms=3)
    pl.savefig("raw_data.png")

    # Set up the initial system.
    system = transit.System(transit.Central(radius=0.95))
    planet = transit.Body(r=2.03 * 0.01, period=period, t0=t0, b=0.9)
    system.add_body(planet)
    texp = kplr.EXPOSURE_TIMES[1] / 60. / 60. / 24.
    mean_function = partial(system.light_curve, texp=texp)

    # Set up the Gaussian processes.
    pl.clf()
    offset = 0.001
    models = []
    for i, lc in enumerate(light_curves):
        dt = np.median(np.diff(lc[0])) * integrated_time(lc[1])
        kernel = np.var(lc[1]) * kernels.Matern32Kernel(dt**2)
        gp = george.GP(kernel, mean=mean_function, solver=george.HODLRSolver)
        gp.compute(lc[0], lc[2])
        models.append((gp, lc[1]))