コード例 #1
0
def loadfile(EPIC_id):
    star = everest.Everest(EPIC_id)
    t = numpy.delete(star.time, star.badmask)
    y = numpy.delete(star.fcor, star.badmask)
    t = numpy.array(t[~numpy.isnan(y)], dtype='float32')
    y = numpy.array(y[~numpy.isnan(y)], dtype='float32')
    return cleaned_array(t, y)
コード例 #2
0
ファイル: photometry.py プロジェクト: petigura/ktwo19
def _everest():
    """
    Load and prepare everest photometry
    """
    ephem = ktwo19.io.load_table('ephem-sinukoff16')
    star = everest.Everest(201505350)

    # Mask out transits as recommended by Luger
    pad = 1.0
    star.mask_planet(ephem.tc1, ephem.per1, dur=(ephem.dur1 + pad) / 24)
    star.mask_planet(ephem.tc2, ephem.per2, dur=(ephem.dur2 + pad) / 24)
    star.mask_planet(ephem.tc3, ephem.per3, dur=(ephem.dur3 + pad) / 24)
    star.compute()

    # Clip out outliers
    mask = np.zeros_like(star.time)
    mask[star.outmask] = 1
    mask[star.badmask] = 1
    mask[star.transitmask] = 0
    fm = ma.masked_array(star.flux, mask)
    fm.data[:] = fm.data / ma.median(fm)
    fm.mask = fm.mask | (fm < 0.98)
    plot(star.time, fm, '.')

    df = dict(t=star.time, f=fm.data, mask=fm.mask)
    df = pd.DataFrame(df)
    df = LE(df.to_records(index=False))
    df = df['t f mask'.split()]
    df = pd.DataFrame(df)
    return df
コード例 #3
0
    def __init__(self,
                 EPIC,
                 K2name,
                 maxperiod=30,
                 window_length=301,
                 sigma_step=1,
                 sigma_max=10,
                 debug_mode=False):
        self.K2name = K2name
        self.EPIC = EPIC
        for i in np.arange(1, 18, 1):
            try:
                self.star = everest.Everest(EPIC, quiet=True, season=i)
            except Exception as e:
                i += i

        try:
            self.star
        except Exception as e:
            raise NameError("EPIC ID has no cooresponding Star")

        self.rawlc = lk.LightCurve(time=self.star.time, flux=self.star.flux)

        sigma = self.__findBestSigma(maxperiod, window_length, sigma_step,
                                     sigma_max, debug_mode)
        self.__generateFlat(window_length, sigma)
        self.__generatePeriodogram(maxperiod)

        maxID = np.argmax(self.periodogram.power)
        self.best_fit_period = self.periodogram.period[maxID]
        self.transit_time = self.periodogram.transit_time[maxID]
        self.transit_depth = self.periodogram.depth[maxID]

        self.foldedlc = self.flat.fold(period=self.best_fit_period,
                                       t0=self.transit_time)
コード例 #4
0
ファイル: standalone.py プロジェクト: rodluger/everest
def DetrendFITS(fitsfile, raw=False, season=None, clobber=False, **kwargs):
    """
    De-trend a K2 FITS file using :py:class:`everest.detrender.rPLD`.

    :param str fitsfile: The full path to the FITS file
    :param ndarray aperture: A 2D integer array corresponding to the \
           desired photometric aperture (1 = in aperture, 0 = outside \
           aperture). Default is to interactively select an aperture.
    :param kwargs: Any kwargs accepted by :py:class:`everest.detrender.rPLD`.

    :returns: An :py:class:`everest.Everest` instance.

    """
    # Get info
    EPIC = pyfits.getheader(fitsfile, 0)['KEPLERID']
    if season is None:
        season = pyfits.getheader(fitsfile, 0)['CAMPAIGN']
        if season is None or season == "":
            season = 0
    everestfile = os.path.join(
        everest.missions.k2.TargetDirectory(EPIC, season),
        everest.missions.k2.FITSFile(EPIC, season))

    # De-trend?
    if clobber or not os.path.exists(everestfile):

        # Get raw data
        data = GetData(fitsfile, EPIC, season, clobber=clobber, **kwargs)

        # De-trend
        model = everest.rPLD(EPIC,
                             data=data,
                             season=season, debug=True,
                             clobber=clobber, **kwargs)

        # Publish it
        everest.fits.MakeFITS(model)
        shutil.copyfile(os.path.join(model.dir, model.name + '.pdf'),
                        os.path.join(model.dir,
                                     model._mission.DVSFile(model.ID,
                                                            model.season,
                                                            model.cadence)))

    # Return an Everest instance
    return everest.Everest(EPIC, season=season)
コード例 #5
0
def test_user():
    '''

    '''

    # Copy the sample K2 FITS file to the correct directory
    path = everest.missions.k2.TargetDirectory(201367065, 1)
    if not os.path.exists(path):
        os.makedirs(path)
    dest = os.path.join(path, everest.missions.k2.FITSFile(201367065, 1))
    orig = os.path.join(
        os.path.dirname(os.path.abspath(__file__)),
        'hlsp_everest_k2_llc_201367065-c01_kepler_v2.0_lc.fits')
    shutil.copy(orig, dest)

    # Load the FITS file
    star = everest.Everest(201367065)

    # Compute the model
    star.compute()
コード例 #6
0
def save_data(epic, campaign):
    """
    Function to save 4 numpy arrays into 2 files
    
    The first is a csv file with the following columns (NOT in time order, needs to be sorted):
        "time" : obj.time
        "flux" : obj.flux (not .fcor!)
        "flux_err" : (raw flux error ?)
    
    The second is a .npy containing
    "mask" : obj.mask (spurious cadence mask)
    
    """

    # download it if it's in EVEREST database, else detred maunally
    try:

        lc_everest = everest.Everest(epic, season=campaign)
        print("\t Found in EVEREST Database")
    except:
        print("\t Manually Detrending via nPLD")
        lc_everest = everest.detrender.nPLD(epic, season=campaign)

    # put it in a pandas dataframe
    df = pd.DataFrame(
        data={
            'cadenceno': lc_everest.cadn,
            'time': lc_everest.time,
            'flux': lc_everest.flux,
            'flux_err': lc_everest.fraw_err
        })

    # save as a csv
    df.to_csv(folder + "/%s_lc.csv" % epic, index=False)

    # save the spurious cadence mask
    np.save(folder + "/%s_scmask.npy" % epic, lc_everest.mask)

    # save the bad and nan masks (since that seems to be what they apply)
    np.save(folder + "/%s_badmask.npy" % epic, lc_everest.badmask)
    np.save(folder + "/%s_nanmask.npy" % epic, lc_everest.nanmask)
コード例 #7
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
short_cad.py
------------

'''

import everest
from everest.math import Downbin, Interpolate
import matplotlib.pyplot as pl
import numpy as np

# Get the LC data
lc = everest.Everest(201601162, cadence='lc')
offset = 100 * (lc.time - 2017.69)

# Get the SC data
sc = everest.Everest(201601162, cadence='sc')
scflux = Interpolate(sc.time, sc.mask, sc.flux)
scflux_downbin = Downbin(scflux, len(lc.flux))

# Plot
fig, ax = pl.subplots(3, figsize=(10, 9))
ax[0].plot(sc.time, sc.fraw, 'k.', alpha=0.1, ms=2, zorder=-1)
ax[0].set_rasterization_zorder(0)
ax[1].plot(sc.time, sc.flux, 'k.', alpha=0.1, ms=2, zorder=-1)
ax[1].set_rasterization_zorder(0)
ax[2].plot(lc.time, scflux_downbin, 'k.', alpha=0.3, ms=3)
ax[2].plot(lc.time, lc.flux + offset, 'r.', alpha=0.3, ms=3)
コード例 #8
0
ファイル: analysis.py プロジェクト: rrybuttry/agn_everest
def K2_correction(epic, campaign=None, lttr='cbv', runpld=True, Ncbv=2):
    """
    performs a PLD based correction on K2 objects

    args
        epic : EPIC ID (int)
        campaign : K2 campaign (int)
        runpld(optional) : if true, nPLD will be manually run when the EPIC ID is not found in the EVEREST database
        lttr(optional) : long-timescale trend removal method
            'cbv' subtracts 2 cotrending basis vectors
            'linear' subtracts linear slope
        Ncbv(optional) : number of EVEREST co-trending basis vectors to fit/subtract (is using lttr='cbv')

    returns
        time : array of timestamps for observations (in days)
        flux_corrected : array of corrected flux for object
    """

    try:
        # get an everest light curve

        # -----------------get EVEREST PLD-----------------
        lc = everest.Everest(epic, season=campaign, mission='k2')

    except:
        # there is no corrected light curve in the EVEREST database
        logging.info("No EVEREST light curve found")

        if runpld:
            logging.info("Running EVEREST nPLD (this may take a while)")

            # -----------------run EVEREST nPLD-----------------
            lc = everest.nPLD(epic, season=campaign, mission='k2')
            logging.info("Done")

        else:
            logging.info(
                "Exiting funtion returning None; run again and set 'runpld=True' to run the EVEREST correction"
            )
            return None

    if lttr == 'cbv':
        # calculate correction based on (2) cotrending basis vectors
        lc.cbv_num = Ncbv
        lc.compute()

        # put flux/cadences into an array
        # (there are 3852 cadences in a given 80 day campaign)
        cad = np.arange(len(lc.time))
        flux_pld = lc.flux

        # turning indices found to be "bad" into a boolen mask to apply
        mask = (np.isin(cad, np.concatenate([lc.nanmask, lc.badmask,
                                             lc.mask])))

        # interpolate the spurious cadences
        interped_vals = np.interp(cad[mask], cad[~mask], flux_pld[~mask])
        # replace spurious cadence values with the interpolated values
        flux_pld[mask] = interped_vals

        return lc.time, flux_pld

    elif lttr == 'linear':

        # -----------------do addtional cut and slope subtraction-----------------
        # put flux/cadences into an array
        # (there are 3852 cadences in a given 80 day campaign)
        cad = np.arange(len(lc.time))
        flux_pld = lc.flux

        # turning indices found to be "bad" into a boolen mask to apply
        mask = (np.isin(cad, np.concatenate([lc.nanmask, lc.badmask,
                                             lc.mask])))

        # interpolate the spurious cadences
        interped_vals = np.interp(cad[mask], cad[~mask], flux_pld[~mask])
        # replace spurious cadence values with the interpolated values
        flux_pld[mask] = interped_vals

        # 30 mintue intervals between cadences
        cutoff_day = 3 * 24 * 2
        #cutoff = np.logical_and(cad>cutoff_day, cad<cad[-1]-5*cutoff_day)
        cutoff = cad > cutoff_day
        # finding linear fit
        m, b = np.polyfit(cad[cutoff], flux_pld[cutoff], 1)

        # subtracting it
        flux_corrected = flux_pld[cutoff] - (m * cad[cutoff])

        # time from the light curve
        time = lc.time[cutoff]

        return time, flux_corrected

    else:
        raise ValueError(
            "Invalid method in lttr arg. Use either 'cbv' or 'linear'")
コード例 #9
0
ファイル: crowding.py プロジェクト: rodluger/everest_paper
for axis in ax.flatten():
    axis.set_xticklabels([])
for axis in ax[:, 1]:
    axis.set_yticklabels([])
ax[0, 0].set_title('EVEREST 1.0', fontsize=16)
ax[0, 1].set_title('EVEREST 2.0', fontsize=16)
ax[-1, 0].set_xlabel('Time', fontsize=18)
ax[-1, 1].set_xlabel('Time', fontsize=18)
plasma = pl.get_cmap('gray_r')
plasma.set_bad(alpha=0)

for n, epic, period, t0, dur, src in zip(range(len(epics)), epics, periods,
                                         t0s, durs, sources):

    # Get the data
    e2 = everest.Everest(epic)
    e1 = k2plr.EVEREST(epic, version=1)

    # Whiten with the GP
    _, amp, tau = e2.kernel_params
    # HACK: GP for these stars is too strong, washes out eclipse
    if epic in [202072978, 218803648, 202733088]:
        amp /= 1000
    gp = george.GP(amp**2 * george.kernels.Matern32Kernel(tau**2))

    # Everest 2
    mask = []
    t0 += np.ceil((e2.time[0] - dur - t0) / period) * period
    for t in np.arange(t0, e2.time[-1] + dur, period):
        mask.extend(np.where(np.abs(e2.time - t) < dur / 2.)[0])
    e2_mask = np.array(
コード例 #10
0
ファイル: test.py プロジェクト: rodluger/L1
def GetTarget(epicid, snr=10, plot=True):
    """Get the target we're going to de-trend."""
    file = os.path.join('data', 'c6', ('%09d' % epicid)[:4] + '00000',
                        ('%09d' % epicid)[4:6] + '000',
                        "ktwo%d-c06_lpd-targ.fits.gz" % epicid)
    with pyfits.open(file) as f:

        # Grab the data
        qdata = f[1].data
        epicid = int(f[0].header["KEPLERID"])

        # Get a small aperture
        img_med = np.nanmedian(qdata.field('FLUX'), axis=0)
        mu = np.nanmedian(img_med)
        sigma = np.sqrt(np.nanmedian((img_med - mu)**2))
        m = (img_med - mu) > snr * sigma
        labels, nstar = label(m)

        # Enlarge it
        aperture = np.array(m)
        for i in range(2):
            aperture = aperture | np.vstack(
                (m[1:, :], np.zeros(m.shape[1], dtype=bool)))
            aperture = aperture | np.hstack(
                (m[:, 1:], np.zeros(m.shape[0], dtype=bool).reshape(-1, 1)))
            aperture = aperture | np.vstack(
                (np.zeros(m.shape[1], dtype=bool), m))[:-1, :]
            aperture = aperture | np.hstack(
                (np.zeros(m.shape[0], dtype=bool).reshape(-1, 1), m))[:, :-1]
            m = aperture

        # Get the arrays
        cadn = np.array(qdata.field('CADENCENO'), dtype='int32')
        time = np.array(qdata.field('TIME'), dtype='float64')
        fpix3D = np.array(qdata.field('FLUX'), dtype='float64')
        fpix3D_err = np.array(qdata.field('FLUX_ERR'), dtype='float64')
        qual = np.array(qdata.field('QUALITY'), dtype=int)

        # Get rid of NaNs in the time array by interpolating
        naninds = np.where(np.isnan(time))
        time = Interpolate(np.arange(0, len(time)), naninds, time)

        # Flatten the pixel array
        aperture[np.isnan(fpix3D[0])] = 0
        ap = np.where(aperture & 1)
        fpix = np.array([f[ap] for f in fpix3D], dtype='float64')
        fpix_err = np.array([p[ap] for p in fpix3D_err], dtype='float64')
        flux = np.sum(fpix, axis=1)
        flux_err = np.sqrt(np.sum(fpix_err**2, axis=1))

        # Interpolate over NaNs in the flux
        nanmask = np.where(np.isnan(flux) | (flux == 0))[0]
        fpix = Interpolate(time, nanmask, fpix)
        fpix_err = Interpolate(time, nanmask, fpix_err)
        flux = Interpolate(time, nanmask, flux)
        flux_err = Interpolate(time, nanmask, flux_err)

        # Interpolate over quality flags
        badmask = []
        for b in bad_bits:
            badmask += list(np.where(qual & 2**(b - 1))[0])
        fpix = Interpolate(time, badmask, fpix)
        fpix_err = Interpolate(time, badmask, fpix_err)
        flux = Interpolate(time, badmask, flux)
        flux_err = Interpolate(time, badmask, flux_err)

        # Interpolate over >10 sigma outliers
        f = SavGol(flux)
        med = np.nanmedian(f)
        MAD = 1.4826 * np.nanmedian(np.abs(f - med))
        badmask = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0]
        fpix = Interpolate(time, badmask, fpix)
        fpix_err = Interpolate(time, badmask, fpix_err)
        flux = Interpolate(time, badmask, flux)
        flux_err = Interpolate(time, badmask, flux_err)

        # Normalize everything to unit median flux
        norm = np.nanmedian(flux)
        flux /= norm
        flux_err /= norm
        fpix /= norm
        fpix_err /= norm

        # Plot the target?
        if plot:

            # Setup
            fig = pl.figure(figsize=(15, 6))
            ax_ps = [
                pl.subplot2grid((6, 5), (0, 0), rowspan=2, colspan=1),
                pl.subplot2grid((6, 5), (2, 0), rowspan=2, colspan=1),
                pl.subplot2grid((6, 5), (4, 0), rowspan=2, colspan=1)
            ]
            ax_lc = [
                pl.subplot2grid((6, 5), (0, 1), rowspan=3, colspan=4),
                pl.subplot2grid((6, 5), (3, 1), rowspan=3, colspan=4)
            ]
            ax_lc[0].set_xticklabels([])
            ax_lc[1].set_xlabel("Time", fontsize=14)

            # Plot the postage stamp
            fsap = np.nansum(qdata.field('FLUX'), axis=(1, 2))
            imed = np.nanargmin(np.abs(fsap - np.nanmedian(fsap)))
            for ax, img, title in zip(ax_ps, [
                    qdata.field('FLUX')[imed], img_med,
                    np.log10(np.clip(img_med, 0.1, None))
            ], ["med", "lin", "log"]):
                ax.axis("off")
                ax.imshow(img, origin='lower', alpha=1)
                ax.set_title(title, y=0.95, fontsize=8)
                contour = np.zeros_like(aperture)
                contour[np.where(aperture)] = 1
                contour = np.lib.pad(contour, 1, PadWithZeros)
                highres = zoom(contour, 100, order=0, mode='nearest')
                extent = np.array([-1, m.shape[1], -1, m.shape[0]])
                contour = ax.contour(highres,
                                     levels=[0.5],
                                     extent=extent,
                                     origin='lower',
                                     colors='r',
                                     linewidths=1)

            # Plot the raw flux
            ax_lc[0].plot(time,
                          flux,
                          'k.',
                          alpha=0.3,
                          ms=2,
                          label="%.3f ppm" % Scatter(flux))
            ax_lc[0].legend(loc="upper right")

            # Download and plot the EVEREST flux
            star = everest.Everest(epicid, campaign=6)
            ev_time = star.time
            ev_flux = star.flux / np.nanmedian(star.flux)
            ax_lc[1].plot(ev_time,
                          ev_flux,
                          'k.',
                          alpha=0.3,
                          ms=2,
                          label="%.3f ppm" % Scatter(ev_flux))
            ax_lc[1].legend(loc="upper right")
            ax_lc[1].set_ylim(*ax_lc[0].get_ylim())
            fig.savefig("data/c6/%d.pdf" % epicid, bbox_inches="tight")
            pl.close()

        # Return
        return time, flux, flux_err
コード例 #11
0
from astropy.utils.data import get_pkg_data_filename
import numpy as np
import pyke
import logging
#%matplotlib inline

logger = logging.getLogger()
logger.setLevel(logging.ERROR)  #Prevents spam

if os.path.exists("EverestFits-KepFiltered.fits"):
    os.remove("EverestFits-KepFiltered.fits")
    print("EverestFits-KepFiltered.fits clobbered")
else:
    print("The Filtered does not exist")

star = everest.Everest(201128338, season=102,
                       clobber=True)  #downloads the fitsfile to variable star

fits_file = get_pkg_data_filename(
    star.fitsfile)  #star.fitsfile is the path to the star

print("Extension 0:")  # header
print(repr(fits.getheader(fits_file, 0)))
print()
print("Extension 1:")  # data
print(repr(fits.getheader(fits_file, 1)))

#fits.setval(fits_file, 'TTYPE1', value='TIME', ext=1)
fits.setval(fits_file, 'TTYPE2', value='SAP_FLUX',
            ext=1)  #likely that TTYPE6 should be SAP_FLUX

#possible code edit: prompt the user to change
コード例 #12
0
'''

import everest
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
import numpy as np
import george

# Planet params
EPIC = 201862715
t0 = 1979.95
period = 2.65572
dur = 0.15

# Get the Everest 2 data
evr2 = everest.Everest(EPIC)
evr2.mask_planet(t0, period, dur)
evr2.compute()

# Get the K2SFF data
k2sff_time, k2sff_flux = everest.k2.pipelines.get(EPIC, 'k2sff')

# Fill in missing cadences
tol = 0.005
if not ((len(k2sff_time) == len(evr2.time)) and
        (np.abs(k2sff_time[0] - evr2.time[0]) < tol) and
        (np.abs(k2sff_time[-1] - evr2.time[-1]) < tol)):
    ftmp = np.zeros_like(evr2.time)
    j = 0
    for i, t in enumerate(evr2.time):
        if np.abs(k2sff_time[j] - t) < tol:
コード例 #13
0
ファイル: overfit_test.py プロジェクト: ruimeng-duan/everest
import numpy as np
import everest
from everest.gp import GetCovariance
from everest.transit import TransitShape, TransitModel, Get_rhos
import matplotlib.pyplot as pl
from scipy.linalg import cholesky, cho_solve
from tqdm import tqdm
import os

# The injected depth
depth = 0.001

# Load the light curve
star = everest.Everest(201367065, quiet=True)
overfit = star.overfit(plot=False)

# Compute
if not os.path.exists('overfit.npz'):

    # Pre-compute some stuff
    med = np.nanmedian(star.flux)
    mtime = star.apply_mask(star.time)
    merr = star.apply_mask(star.fraw_err)
    npts = len(mtime)
    inj = TransitShape(depth=1)
    K = GetCovariance(star.kernel, star.kernel_params, mtime, merr)
    CK = cholesky(K)
    fpix = np.array(star.fpix)

    # Unmasked Overfitting Metric (Brute force)
    UOMbf = np.zeros(npts)
コード例 #14
0
ファイル: halo_everest.py プロジェクト: zabop/halophot
mpl.rcParams['savefig.dpi'] = 200  #72
mpl.rcParams['axes.labelsize'] = 16
mpl.rcParams['axes.labelsize'] = 16
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12

fname = '../data/ktwo211309989-c05_lpd-targ.fits.gz'  # point this path to your favourite K2SC light curve

print 'Running everest'
# try:
# 	pass
# except:

star = everest.Everest(211309989,
                       clobber=True,
                       mission='k2',
                       giter=1,
                       gmaxf=3,
                       lambda_arr=[1e0, 1e5, 1e10],
                       oiter=3,
                       pld_order=2,
                       get_hires=False,
                       get_nearby=False)
# star.publish()

print 'Running halophot'

tpf, ts = read_tpf(fname)

tpf, newts, weights, weightmap, pixelvector = do_lc(tpf, ts, (None, None), 1,
                                                    1)
コード例 #15
0
datapath = '/Users/nks1994/Documents/Research/everest/docs/c0'
dataloc = '.csv'

# CAMPAIGN 1

tags = []
mags = []
with open((datapath + str(1) + dataloc), 'r') as f:
    data = csv.reader(f)
    for i, row in enumerate(data):
        if i == 0:
            continue
        else:
            tags.append(row[0])
            mags.append(row[1])

A = []

for t in tqdm(tags):
    t = int(t)
    star = everest.Everest(t)
    flux = star.apply_mask(star.flux)
    sgflux = SavGol(flux)
    A.append(np.mean(sgflux))

np.savez('A2.npz', A=A, mags=mags)

import pdb
pdb.set_trace()
コード例 #16
0
ファイル: hd3167.py プロジェクト: rodluger/everest_paper
import everest
from scipy.signal import savgol_filter
from everest.mathutils import Interpolate, Smooth
from everest.missions.k2 import CDPP
import matplotlib.pyplot as pl
import numpy as np

# Planet params
EPIC = 220383386

# Set up the figure
fig, ax = pl.subplots(2, sharex=True, sharey=True, figsize=(13, 9))
fig.subplots_adjust(hspace=0.05)

# Everest
star = everest.Everest(EPIC)
time = star.time
med = np.nanmedian(star.flux)
baseline = Interpolate(star.time, star.mask, star.flux)
baseline = savgol_filter(baseline, 49, 2)
flux = star.flux - baseline + med
ax[1].plot(time, flux, 'k.', alpha=0.75, ms=3)

# K2SFF
time_sff, flux_sff = everest.k2.pipelines.get(EPIC, 'k2sff')
med = np.nanmedian(flux_sff)
ys = flux_sff - Smooth(flux_sff, 50)
M = np.nanmedian(ys)
MAD = 1.4826 * np.nanmedian(np.abs(ys - M))
out = []
for i, _ in enumerate(flux_sff):
コード例 #17
0
ファイル: k2.py プロジェクト: dfm/rotate
def get_light_curve(epicid,
                    season=None,
                    mask_transits=True,
                    mask_width=3,
                    sigma_iter=10,
                    sigma_thresh=5.0,
                    sigma_window=49):
    """Get the light curve for a given EPIC ID

    Args:
        epicid (int): The ID of the target.
        mask_transits (bool): Should known candidates be masked?
        mask_width (float): The half width of the transit mask in units of the
            transit duration.
        sigma_iter (int): The maximum number of iterations of sigma clipping to
            run.
        sigma_thresh (float): The sigma clipping threshold.
        sigma_window (int): The width of the smoothing window for sigma
            clipping.

    Returns:
        t (ndarray): The array of timestamps.
        F (ndarray): The ``(ntime, npix)`` matrix of (normalized) pixel flux
            time series.
        yerr (ndarray): An estimate of the uncertainties of the SAP flux
            (``sum(F, axis=1)``).

    """
    star = everest.Everest(epicid, season=season, quiet=True)
    t = star.apply_mask(star.time)
    F = star.apply_mask(star.fpix)

    # Mask any known transits
    if mask_transits:
        k2cand = exoarch.ExoplanetArchiveCatalog("k2candidates").df
        epic = k2cand[k2cand.epic_name == "EPIC {0}".format(epicid)]
        cands = epic.groupby("epic_candname").mean()
        for _, cand in cands.iterrows():
            t0 = cand.pl_tranmid - 2454833.0
            per = cand.pl_orbper
            dur = cand.pl_trandur
            m = np.abs((t - t0 + 0.5 * per) % per -
                       0.5 * per) > mask_width * dur
            t = t[m]
            F = F[m]

    # Use 1st order PLD to do some sigma clipping
    fsap = np.sum(F, axis=1)
    A = F / fsap[:, None]
    m = np.ones_like(fsap, dtype=bool)
    for i in range(sigma_iter):
        w = np.linalg.solve(np.dot(A[m].T, A[m]), np.dot(A[m].T, fsap[m]))
        resid = fsap - np.dot(A, w)
        m_new = sigma_clip(resid, thresh=sigma_thresh, window=sigma_window)
        if m.sum() == m_new.sum():
            m = m_new
            break
        m = m_new
    t = t[m]
    fsap = fsap[m]
    F = F[m]

    # Normalize
    med = np.median(fsap)
    fsap /= med
    F /= med

    # Estimate flux uncertainty
    yerr = np.nanmedian(np.abs(np.diff(fsap)))

    return t, F, yerr
コード例 #18
0
'''
saturated_star.py
-----------------


'''

import everest
import k2plr
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
import numpy as np
from scipy.ndimage import zoom

# Everest 2.0
star = everest.Everest(202063160)
inds = np.where(star.time >= 1959)[0]
time = star.time[inds]
flux = star.flux[inds]
flux /= np.nanmedian(flux)

# Raw
fraw = star.fraw[inds]
fraw /= np.nanmedian(fraw)

# Everest 1.0
v1_time, v1_flux = star.get_pipeline('everest1')
inds = np.where(v1_time >= 1959)[0]
v1_time = v1_time[inds]
v1_flux = v1_flux[inds]
v1_flux /= np.nanmedian(v1_flux)
コード例 #19
0
import everest
import numpy as np
import matplotlib.pyplot as pl
from plotmatrix import PlotMatrix

# Load everest
# 205071984, 201546283, 210957318, 211916756, 201546283
star = everest.Everest(201367065)

# Let's look at what we're dealing with
# There are three transiting planets buried in here.
fig = pl.figure(figsize=(16, 6))
pl.plot(star.time, star.fraw, 'k.', alpha=0.3, ms=3)
pl.ylim(354000, 359000)
pl.show()

# Let's remove the really bad outliers
cut = np.where(star.fraw < 355000)
time = np.delete(star.time, cut)
fpix = np.delete(star.fpix, cut, axis=0)
ntime, npix = fpix.shape

# Let's look at what we're dealing with
fig = pl.figure(figsize=(16, 8))
for n in range(npix):
    pl.plot(time, fpix[:, n])
pl.show()

# Construct our design matrix
total_flux = np.sum(fpix, axis=1).reshape(-1, 1)