예제 #1
0
def get_merged_companion_isochrone():

    outpath = '../data/companion_isochrones/MIST_plus_Baraffe_merged.csv'

    if not os.path.exists(outpath):

        #
        # Get Teff, L for the stellar models of MIST at 5 Gyr.
        #

        # MIST isochrones, v1.2 from the website
        mistpath = os.path.join(
            datadir, 'MIST_v1.2_feh_p0.00_afe_p0.0_vvcrit0.4_basic.iso')
        iso = ISO(mistpath)

        # 10**9.7 = 5.01 Gyr. I'm OK with not interpolating further here.
        mist_age_ind = iso.age_index(9.7)
        mist_logTeff = iso.isos[mist_age_ind]['log_Teff']
        mist_logL = iso.isos[mist_age_ind]['log_L']
        mist_initial_mass = iso.isos[mist_age_ind]['initial_mass']

        mist_df = pd.DataFrame({
            'mass': mist_initial_mass,
            'lum': 10**(mist_logL),
            'teff': 10**(mist_logTeff),
        })

        #
        # 300 Mjup ~= 0.3 Msun (really 0.286), so limit our range of interest.
        # There is one point that overlaps: Mstar = 0.1Msun. For that point,
        # use the Baraffe model.
        #
        sel = (mist_df.mass < 0.9) & (mist_df.mass > 0.1)
        mist_df = mist_df[sel]

        #
        # Get Teff, L for the Baraffe03 models 5 Gyr.
        #

        # Baraffe+2003 isochrones for substellar mass objects
        bar_df = pd.read_csv(os.path.join(datadir, 'COND03_5gyr.csv'),
                             delim_whitespace=True)

        bar_df = bar_df.drop(
            ['g', 'R', 'Mv', 'Mr', 'Mi', 'Mj', 'Mh', 'Mk', 'Mll', 'Mm'],
            axis=1)

        bar_df['L/Ls'] = 10**nparr(bar_df['L/Ls'])

        bar_df = bar_df.rename(columns={
            'L/Ls': 'lum',
            'Teff': 'teff',
            'M/Ms': 'mass'
        })

        #
        # merge
        #
        mdf = pd.concat((bar_df, mist_df), sort=False).reset_index()

        mdf = mdf.drop(['index'], axis=1)

        mdf.to_csv(outpath, index=False)

    return pd.read_csv(outpath)
예제 #2
0
def abs_mag_in_bandpass(lum, teff, bandpass='******'):
    """
    lum: bolometric luminosity in units of Lsun
    teff: effective temperature in units of K
    bandpass: '******' or '832', nanometers.
    """

    if bandpass not in ['562', '832', 'NIRC2_Kp']:
        raise ValueError

    if bandpass in ['562', '832']:

        bandpassdir = '../data/WASP4_zorro_speckle/filters/'
        bandpasspath = os.path.join(bandpassdir,
                                    'filter_EO_{}.csv'.format(bandpass))

        bpdf = pd.read_csv(bandpasspath, delim_whitespace=True)

        # the actual tabulated values here are bogus at the long wavelength end.
        # obvious from like... physics, assuming detectors are silicon. (confirmed
        # by Howell in priv. comm.)

        width = 100  # nanometers, around the bandpass middle
        sel = np.abs(float(bandpass) - bpdf.nm) < width

        bpdf = bpdf[sel]

    elif bandpass == 'NIRC2_Kp':

        # NIRC2 Kp band filter from
        # http://svo2.cab.inta-csic.es/theory/fps/getdata.php?format=ascii&id=Keck/NIRC2.Kp
        bandpassdir = '../data/WASP4_NIRC2/'
        bandpasspath = os.path.join(bandpassdir, 'Keck_NIRC2.Kp.dat')

        bpdf = pd.read_csv(bandpasspath,
                           delim_whitespace=True,
                           names=['wvlen_angst', 'Transmission'])

        bpdf['nm'] = bpdf.wvlen_angst / 10

    else:
        raise NotImplementedError

    #
    # see /doc/20200121_blackbody_mag_derivn.pdf for relevant discussion of
    # units and where the equations come from.
    #

    from astropy.modeling.models import BlackBody

    M_Xs = []
    for temperature, luminosity in zip(teff * u.K, lum * u.Lsun):

        bb = BlackBody(temperature=temperature)

        wvlen = nparr(bpdf.nm) * u.nm
        B_nu_vals = bb(wvlen)
        B_lambda_vals = B_nu_vals * (const.c / wvlen**2)

        T_lambda = nparr(bpdf.Transmission)

        F_X = 4 * np.pi * u.sr * trapz(B_lambda_vals * T_lambda, wvlen)

        F = const.sigma_sb * temperature**4

        # https://nssdc.gsfc.nasa.gov/planetary/factsheet/sunfact.html
        M_bol_sun = 4.83
        M_bol_star = (-5 / 2 * np.log10(luminosity / (1 * u.Lsun)) + M_bol_sun)

        # bolometric magnitude of the star, in the bandpass!
        M_X = M_bol_star - 5 / 2 * np.log10(F_X / F)

        M_Xs.append(M_X.value)

    return nparr(M_Xs)
예제 #3
0
def get_companion_bounds(instrument):

    if instrument == 'Zorro':

        zorrostr = 'WASP-4_20190928_832'
        outpath = ('../data/WASP4_zorro_speckle/{}_companionbounds.csv'.format(
            zorrostr))

        if not os.path.exists(outpath):

            df = get_wasp4_mag_to_companion_contrasts()

            #
            # WASP-4_20190928_832.dat is the most contstraining curve for basically any
            # substellar mass companion. The blackbody curve works against us in 562,
            # and the seeing was better on 20190928 than 20190912.
            #
            datapath = '../data/WASP4_zorro_speckle/{}.dat'.format(zorrostr)
            zorro_df = pd.read_csv(datapath,
                                   comment='#',
                                   skiprows=29,
                                   names=['ang_sep', 'delta_mag'],
                                   delim_whitespace=True)

            #
            # Interpolation function to convert observed deltamags to deltamass.
            #
            fn_dmag_to_mass = interp1d(nparr(df.dmag_832),
                                       nparr(df.mass),
                                       kind='quadratic',
                                       bounds_error=False,
                                       fill_value=np.nan)

            zorro_df['m_comp/m_sun'] = fn_dmag_to_mass(zorro_df.delta_mag)

            zorro_df.to_csv(outpath, index=False)
            print('made {}'.format(outpath))

        return pd.read_csv(outpath)

    elif instrument == 'NIRC2':

        namestr = 'WASP-4_20120727_NIRC2'
        outpath = (
            '../data/WASP4_NIRC2/{}_companionbounds.csv'.format(namestr))

        if not os.path.exists(outpath):

            df = get_wasp4_mag_to_companion_contrasts()

            #
            # WASP-4_20190928_832.dat is the most contstraining curve for basically any
            # substellar mass companion. The blackbody curve works against us in 562,
            # and the seeing was better on 20190928 than 20190912.
            #
            datapath = '../data/WASP4_NIRC2/WASP-4_Kp_contrast_2012_07_27_contrast_dk_full_img.txt'
            nirc2_df = pd.read_csv(
                datapath,
                skiprows=2,
                names=['ang_sep', 'delta_mag', 'completeness'],
                delim_whitespace=True)

            #
            # Interpolation function to convert observed deltamags to deltamass.
            #
            fn_dmag_to_mass = interp1d(nparr(df.dmag_832),
                                       nparr(df.mass),
                                       kind='quadratic',
                                       bounds_error=False,
                                       fill_value=np.nan)

            nirc2_df['m_comp/m_sun'] = fn_dmag_to_mass(nirc2_df.delta_mag)

            nirc2_df.to_csv(outpath, index=False)
            print('made {}'.format(outpath))

        return pd.read_csv(outpath)

    else:
        raise NotImplementedError
예제 #4
0
def _get_detection_estimate(df, Rp, writetype, dilkey, occ_rate,
                            southern_only=False, extended_mission=False):
        '''
        args:
        -----
        df: a DataFrame of cluster members with T<16, selected to not include
        anything in globulars (i.e. our correct cluster definition) and to only
        be in the southern ecliptic hemisphere.

        dilkey (str): key to column header of dilution, e.g., 'dilution_ap2.00'

        Rp: astropy unitful planet radius assumed.

        writetype (str): 'a' for append, 'w' for write.

        occ_rate (float): fraction of stars with planet
        -----

        This routine assumes all cluster members are dwarf stars.
        For a P=10day and P=3day planet of radius Rp, what fraction of the
        stars are detectable, at what thresholds?
        '''

        noise_1hr_in_ppm = np.array(df['noise_1hr'])
        noise_1hr_in_frac = noise_1hr_in_ppm/1e6

        dilution = df[dilkey]

        Rstar = np.array(df['rad'])*u.Rsun

        signal = ((Rp/Rstar).cgs)**2

        # assuming aperture photometry...
        SNR_1hr = nparr( (signal / noise_1hr_in_frac)*np.sqrt(dilution) )

        # # assuming difference imaging
        # cutoff_dilution = 0.1
        # SNR_1hr = nparr( (signal / noise_1hr_in_frac)*
        #                 (dilution>cutoff_dilution).astype(int) )

        if not extended_mission:
            T_obs = 28*u.day
        elif extended_mission:
            T_obs = 56*u.day

        P_long = 10*u.day

        # Compute transit duration, avg over impact param
        Mstar = np.array(df['mass'])*u.Msun
        vol_star = (4*np.pi/3)*Rstar**3
        rho_star = Mstar / vol_star
        vol_sun = (4*np.pi/3)*u.Rsun**3
        rho_sun = u.Msun / vol_sun

        T_dur_long = 13*u.hr * (P_long.to(u.yr).value)**(1/3) \
                             * (rho_star/rho_sun)**(-1/3)

        P_short = 3*u.day
        T_dur_short = 13*u.hr * (P_short.to(u.yr).value)**(1/3) \
                              * (rho_star/rho_sun)**(-1/3)

        T_in_transit_long = (T_obs / P_long)*T_dur_long*np.pi/4
        T_in_transit_short = (T_obs / P_short)*T_dur_short*np.pi/4

        SNR_pf_long = SNR_1hr * (T_in_transit_long.to(u.hr).value)**(1/2)
        SNR_pf_short = SNR_1hr * (T_in_transit_short.to(u.hr).value)**(1/2)

        # For how many cluster members can you get SNR > 10 in ONE HOUR?
        N_1hr = len(SNR_1hr[SNR_1hr > 10])

        # For how many cluster members can you get SNR > 10 phase folded,
        # assuming the long period?
        N_pf_long = len(SNR_pf_long[SNR_pf_long > 10])

        # For how many cluster members can you get SNR > 10 phase folded,
        # assuming the short period?
        N_pf_short = len(SNR_pf_short[SNR_pf_short > 10])

        a_long = (const.G * Mstar / (4*np.pi*np.pi) * P_long**2 )**(1/3)
        transit_prob_long = (Rstar/a_long).cgs.value
        a_short = (const.G * Mstar / (4*np.pi*np.pi) * P_short**2 )**(1/3)
        transit_prob_short = (Rstar/a_short).cgs.value

        # For how many planets do you get SNR>10 in one hour?
        N_pla_1hr_long = 0
        N_pla_1hr_short = 0
        N_pla_pf_long = 0
        N_pla_pf_short = 0

        for ix, this_transit_prob in enumerate(transit_prob_long):
            if np.random.rand() < occ_rate * this_transit_prob:
                # Congrats, you have a transiting planet that exists
                if SNR_1hr[ix] > 10:
                    # Congrats, it's detected (1hr integration)
                    N_pla_1hr_long += 1
                if SNR_pf_long[ix] > 10:
                    # Congrats, it's detected (phase-folded)
                    N_pla_pf_long += 1

        for ix, this_transit_prob in enumerate(transit_prob_short):
            if np.random.rand() < occ_rate * this_transit_prob:
                # Congrats, you have a transiting planet that exists
                if SNR_1hr[ix] > 10:
                    # Congrats, it's detected (1hr integration)
                    N_pla_1hr_short += 1
                if SNR_pf_short[ix] > 10:
                    # Congrats, it's detected (phase-folded)
                    N_pla_pf_short += 1

        if southern_only:
            southern_str = 'only count stars in southern ecliptic hemisphere!!'
        else:
            southern_str = ''

        outstr = \
        '''
        ##################################################
        {:s}

        For Rp = {:.1f}, cluster star radii and masses from TIC8,
        dilution aperture radius of {:s}

        FRACTION OF STARS WITH PLANETS IS {:s}

        MEDIAN STELLAR RADIUS IS {:s}
        MEAN DILUTION IS {:.2f}

        For how many cluster members can you get SNR > 10 in ONE HOUR?
        {:d}

        For how many cluster members can you get SNR > 10 phase folded, assuming
        the long period (10day)?
        {:d}

        For how many cluster members can you get SNR > 10 phase folded, assuming
        the short period (3day)?
        {:d}

        N_pla_1_hr_long: {:d}
        N_pla_1_hr_short: {:d}
        N_pla_pf_long: {:d}
        N_pla_pf_short: {:d}

        ##################################################
        '''.format(
        southern_str,
        Rp,
        dilkey,
        repr(occ_rate),
        repr(np.median(Rstar)),
        np.mean(dilution),
        N_1hr,
        N_pf_long,
        N_pf_short,
        N_pla_1hr_long,
        N_pla_1hr_short,
        N_pla_pf_long,
        N_pla_pf_short
        )

        if southern_only:
            outpath = '../../results/yield_calculation/planet_detection_estimate_southern_only.out'
        else:
            outpath = '../../results/yield_calculation/planet_detection_estimate_allsky.out'

        if extended_mission:
            outpath = outpath.replace('.out', '_extendedmission.out')

        with open(outpath, writetype) as f:
            f.writelines(outstr)

        print(outstr)
예제 #5
0
import matplotlib.patheffects as pe

#
# manually added WASP-4 to the output
# \dot{P}_{\rm RV} &= -5.94 \pm 0.39~{\rm ms}\,{\rm yr}^{-1}.
#

df = pd.read_csv('../results/knutson_all_pdots.csv', sep=';')
sel = (df.K14_significant) & (df.Pdot != 0)
df = df[sel]

savpath = '../results/k14_pdot.png'
fig, ax = plt.subplots(figsize=(4,3))

yval = np.arange(0, len(df), 1) + 0.1
xval = nparr(df.Pdot)
x_perr = nparr(df.Pdot_perr)
x_merr = nparr(df.Pdot_merr)
dotlabels = nparr(df.planet)

ax.errorbar(xval, yval, xerr=np.vstack([x_merr, x_perr]),
            fmt='.k', ecolor='black', zorder=2, alpha=1, mew=1,
            markersize=2.5, capsize=3)

ix = 0
for _x, _y, _l in zip(xval+x_perr+2, yval, dotlabels):

    if 'WASP-4' in _l:
        c = 'C0'
        ax.errorbar(xval[ix], yval[ix],
                    xerr=np.vstack([x_merr[ix], x_perr[ix]]),
예제 #6
0
# datasets['tess'] = [x_obs, y_obs, y_err]

datestrs = ['20200401', '20200426', '20200521', '20200614']
for ix, d in enumerate(datestrs):
    x_obs, y_obs, y_err = get_elsauce_phot(datestr=d)
    x_obs -= 2457000 # convert to BTJD
    datasets[f'elsauce_{ix}'] = [x_obs, y_obs, y_err]

datestrs = ['20200529', '20200614', '20200623']
for ix, d in enumerate(datestrs):
    x_obs, y_obs, y_err = get_astep_phot(datestr=d)
    x_obs += 2450000 # convert to BJD_TDB
    x_obs -= 2457000 # convert to BTJD
    datasets[f'astep_{ix}'] = [x_obs, y_obs, y_err]

times, fluxs, errs, instrs = nparr([]), nparr([]), nparr([]), nparr([])
for k in datasets.keys():
    times = np.hstack((times, datasets[k][0]))
    fluxs = np.hstack((fluxs, datasets[k][1]))
    errs = np.hstack((errs, datasets[k][2]))
    instrs = np.hstack((instrs, np.repeat(k, len(datasets[k][0]))))

df = pd.DataFrame({
    'btjd_tdb': np.round(times,8),
    'flux': np.round(fluxs,6),
    'fluxerr': np.round(errs,6),
    'instr': instrs
})

outpath = '../data/phot/photometry_mrt_ready.csv'
df.to_csv(outpath, index=False)
예제 #7
0
파일: lithium.py 프로젝트: lgbouma/earhart
def _make_Randich18_xmatch(datapath, vs_rotators=1, RADIUS=0.5):
    """
    For every Randich+18 Gaia-ESO star with a spectrum, look for a rotator
    match (either the "gold" or "autorot" samples) within RADIUS arcseconds.
    If you find it, pull its data. If there are multiple, take the closest.
    """

    rdf = get_Randich18_NGC2516()

    if vs_rotators:
        raise DeprecationWarning
        rotdir = os.path.join(DATADIR, 'rotation')
        rot_df = pd.read_csv(
            os.path.join(rotdir, 'ngc2516_rotation_periods.csv'))
        comp_df = rot_df[rot_df.Tags == 'gold']
        print(
            'Comparing vs the "gold" NGC2516 rotators sample (core + halo)...')
    else:

        from earhart.helpers import _get_fullfaint_dataframes
        nbhd_df, core_df, halo_df, full_df, target_df = _get_fullfaint_dataframes(
        )
        comp_df = full_df
        print(
            f'Comparing vs the {len(comp_df)} "fullfaint" kinematic NGC2516 rotators sample (core + halo)...'
        )

    c_comp = SkyCoord(ra=nparr(comp_df.ra) * u.deg,
                      dec=nparr(comp_df.dec) * u.deg)
    c_r18 = SkyCoord(ra=nparr(rdf._RA) * u.deg, dec=nparr(rdf._DE) * u.deg)

    cutoff_radius = RADIUS * u.arcsec
    has_matchs, match_idxs, match_rows = [], [], []
    for ix, _c in enumerate(c_r18):
        if ix % 100 == 0:
            print(f'{ix}/{len(c_r18)}')
        seps = _c.separation(c_comp)
        if min(seps.to(u.arcsec)) < cutoff_radius:
            has_matchs.append(True)
            match_idx = np.argmin(seps)
            match_idxs.append(match_idx)
            match_rows.append(comp_df.iloc[match_idx])
        else:
            has_matchs.append(False)

    has_matchs = nparr(has_matchs)

    left_df = rdf[has_matchs]

    right_df = pd.DataFrame(match_rows)

    mdf = pd.concat(
        (left_df.reset_index(drop=True), right_df.reset_index(drop=True)),
        axis=1)

    if vs_rotators:
        print(
            f'Got {len(mdf)} gold rot matches from {len(rdf)} Randich+18 shots.'
        )
    else:
        print(
            f'Got {len(mdf)} fullfaint kinematic matches from {len(rdf)} Randich+18 shots.'
        )

    # "Comparing the Gaia color and GES Teff, 15 of these (all with
    # Bp-Rp0 $>$ 2.0) are spurious matches, which we remove."
    if not vs_rotators:

        from earhart.priors import AVG_EBpmRp
        assert abs(AVG_EBpmRp - 0.1343) < 1e-4  # used by KC19

        badmatch = (((mdf['phot_bp_mean_mag'] - mdf['phot_rp_mean_mag'] -
                      AVG_EBpmRp) > 2.0)
                    & (mdf['Teff'] > 4300))
        mdf = mdf[~badmatch]
        print(
            f'Got {len(mdf)} fullfaint kinematic matches from {len(rdf)} Randich+18 shots after cleaning "BADMATCHES".'
        )
        print(f'Got {len(mdf[mdf.subcluster=="core"])} in core.')
        print(f'Got {len(mdf[mdf.subcluster=="halo"])} in halo.')

    mdf.to_csv(datapath, index=False)
예제 #8
0
파일: find_rvs.py 프로젝트: lgbouma/cdips
def wrangle_eso_for_rv_availability(ra, dec):
    """
    Checks via ESO query for available RVs on:
        ['HARPS', 'ESPRESSO', 'FORS2', 'UVES', 'XSHOOTER']

    Possible future expansion: actually get the RVs. (For now, just this is
    just used as a flag to let the user know the RVs might exist!)

    Returns tuple of:
        (nan, nan, provenance)
    """
    eso = Eso()
    eso.ROW_LIMIT = 9999

    coord = SkyCoord(ra=ra, dec=dec, unit=(u.deg, u.deg), frame='icrs')
    print('begin ESO search for {}'.format(repr(coord)))

    rastr = (str(coord.ra.to_string(u.hour)).replace('h', ' ').replace(
        'm', ' ').replace('s', ' '))

    decstr = (str(coord.dec.to_string()).replace('d', ' ').replace(
        'm', ' ').replace('s', ' '))

    # search within 10 arcsec of given position
    boxsize = '00 00 10'
    res = eso.query_main(column_filters={
        'ra': rastr,
        'dec': decstr,
        'box': boxsize
    })

    if res is None:
        return np.nan, np.nan, np.nan

    # limit search to the following instruments, in order of preference
    instruments = ['HARPS', 'ESPRESSO', 'FORS2', 'UVES', 'XSHOOTER']
    sel = np.zeros((len(res))).astype(bool)
    for instrument in instruments:
        sel |= (nparr(res['Instrument']) == instrument)
    res = res[sel]

    # limit returned cateogires
    badcategories = ['CALIB']
    sel = np.zeros((len(res))).astype(bool)
    for badcategory in badcategories:
        sel |= (nparr(res['Category']) != badcategory)
    res = res[sel]

    if len(res) >= 1:

        # XSHOOTER doesn't seem to give archival RVs. would need to derive
        # from spectra yourself
        if np.all(nparr(res['Instrument']) == 'XSHOOTER'):
            return np.nan, np.nan, 'XSHOOTER'

        # Embargo lasts a year on all ESO observations.
        nt = Time.now()
        embargo_end = nt.mjd - 365
        if np.all(nparr(res['MJD-OBS']) > embargo_end):
            return np.nan, np.nan, np.unique(res['Instrument'])[0]

        # HARPS gives archival RVs. downloading them can be done... but for
        # s6+s7, only a few objects are viable.
        if np.all(nparr(res['Instrument']) == 'HARPS'):
            print('WARNING: SKIPPING AUTOMATION OF HARPS ARCHIVAL RV GETTING')
            return np.nan, np.nan, 'HARPS'

    else:
        return np.nan, np.nan, np.nan
예제 #9
0
def generate_verification_page(lcd, ls, freq, power, cutoutpaths, c_obj,
                               outvppath, outd, show_binned=True):
    """
    Make the verification page, which consists of:

    top row: entire light curve (with horiz bar showing rotation period)

    bottom row:
        lomb scargle periodogram  |  phased light curve  |  image w/ aperture

    ----------
    args:

        lcd (dict): has the light curve, aperture positions, some lomb
        scargle results.

        ls: LombScargle instance with everything passed.

        cutoutpaths (list): FFI cutout FITS paths.

        c_obj (SkyCoord): astropy sky coordinate of the target

        outvppath (str): path to save verification page to
    """
    cutout_wcs = lcd['cutout_wcss'][0]

    mpl.rcParams['xtick.direction'] = 'in'
    mpl.rcParams['ytick.direction'] = 'in'

    plt.close('all')

    fig = plt.figure(figsize=(12,12))

    #ax0 = plt.subplot2grid((3, 3), (0, 0), colspan=3)
    #ax1 = plt.subplot2grid((3, 3), (1, 0), colspan=3)
    #ax2 = plt.subplot2grid((3, 3), (2, 0))
    #ax3 = plt.subplot2grid((3, 3), (2, 1))
    #ax4 = plt.subplot2grid((3, 3), (2, 2), projection=cutout_wcs)

    ax0 = plt.subplot2grid((3, 3), (1, 0), colspan=3)
    ax1 = plt.subplot2grid((3, 3), (2, 0), colspan=3)
    ax2 = plt.subplot2grid((3, 3), (0, 0))
    ax3 = plt.subplot2grid((3, 3), (0, 1))
    ax4 = plt.subplot2grid((3, 3), (0, 2), projection=cutout_wcs)

    #
    # row 0: entire light curve, pre-detrending (with horiz bar showing
    # rotation period). plot model LC too.
    #
    try:
        ax0.scatter(lcd['predetrending_time'], lcd['predetrending_rel_flux'],
                    c='k', alpha=1.0, zorder=3, s=10, rasterized=True,
                    linewidths=0)
    except KeyError as e:
        print('ERR! {}\nReturning.'.format(e))
        return


    try:
        model_flux = nparr(lcd['predetrending_rel_flux']/lcd['rel_flux'])
    except ValueError:
        model_flux = 0

    if isinstance(model_flux, np.ndarray):
        ngroups, groups = find_lc_timegroups(lcd['predetrending_time'], mingap=0.5)
        for group in groups:
            ax0.plot(lcd['predetrending_time'][group], model_flux[group], c='C0',
                     alpha=1.0, zorder=2, rasterized=True, lw=2)

    # add the bar showing the derived period
    ymax = np.percentile(lcd['predetrending_rel_flux'], 95)
    ymin = np.percentile(lcd['predetrending_rel_flux'], 5)
    ydiff = 1.15*(ymax-ymin)

    epoch = np.nanmin(lcd['predetrending_time']) + lcd['ls_period']
    ax0.plot([epoch, epoch+lcd['ls_period']], [ymax, ymax], color='red', lw=2,
             zorder=4)

    ax0.set_ylim((ymin-ydiff,ymax+ydiff))

    #ax0.set_xlabel('Time [BJD$_{\mathrm{TDB}}$]')
    ax0.set_xticklabels('')
    ax0.set_ylabel('Raw flux')

    name = outd['name']
    group_id = outd['group_id']
    if name=='nan':
        nstr = 'Group {}'.format(group_id)
    else:
        nstr = '{}'.format(name)


    if not np.isfinite(outd['teff']):
        outd['teff'] = 0

    ax0.text(0.98, 0.97,
        'Teff={:d}K. {}'.format(int(outd['teff']), nstr),
             ha='right', va='top', fontsize='large', zorder=2,
             transform=ax0.transAxes
    )

    #
    # row 1: entire light curve (with horiz bar showing rotation period)
    #
    ax1.scatter(lcd['time'], lcd['rel_flux'], c='k', alpha=1.0, zorder=2, s=10,
                rasterized=True, linewidths=0)

    # add the bar showing the derived period
    ymax = np.percentile(lcd['rel_flux'], 95)
    ymin = np.percentile(lcd['rel_flux'], 5)
    ydiff = 1.15*(ymax-ymin)

    epoch = np.nanmin(lcd['time']) + lcd['ls_period']
    ax1.plot([epoch, epoch+lcd['ls_period']], [ymax, ymax], color='red', lw=2)

    ax1.set_ylim((ymin-ydiff,ymax+ydiff))

    ax1.set_xlabel('Time [BJD$_{\mathrm{TDB}}$]')
    ax1.set_ylabel('Detrended flux')

    #
    # row 2, col 0: lomb scargle periodogram
    #
    ax2.plot(1/freq, power, c='k')
    ax2.set_xscale('log')
    ax2.text(0.03, 0.97, 'FAP={:.1e}\nP={:.1f}d'.format(
        lcd['ls_fap'], lcd['ls_period']), ha='left', va='top',
        fontsize='large', zorder=2, transform=ax2.transAxes
    )
    ax2.set_xlabel('Period [day]', labelpad=-1)
    ax2.set_ylabel('LS power')

    #
    # row 2, col 1: phased light curve 
    #
    phzd = phase_magseries(lcd['time'], lcd['rel_flux'], lcd['ls_period'],
                           lcd['time'][np.argmin(lcd['rel_flux'])], wrap=False,
                           sort=True)

    ax3.scatter(phzd['phase'], phzd['mags'], c='k', rasterized=True, s=10,
                linewidths=0, zorder=1)

    if show_binned:
        try:
            binphasedlc = phase_bin_magseries(phzd['phase'], phzd['mags'],
                                              binsize=1e-2, minbinelems=5)
            binplotphase = binphasedlc['binnedphases']
            binplotmags = binphasedlc['binnedmags']

            ax3.scatter(binplotphase, binplotmags, s=10, c='darkorange',
                        linewidths=0, zorder=3, rasterized=True)
        except TypeError as e:
            print(e)
            pass

    xlim = ax3.get_xlim()
    ax3.hlines(1.0, xlim[0], xlim[1], colors='gray', linestyles='dotted',
               zorder=2)
    ax3.set_xlim(xlim)

    ymax = np.percentile(lcd['rel_flux'], 95)
    ymin = np.percentile(lcd['rel_flux'], 5)
    ydiff = 1.15*(ymax-ymin)
    ax3.set_ylim((ymin-ydiff,ymax+ydiff))

    ax3.set_xlabel('Phase', labelpad=-1)
    ax3.set_ylabel('Flux', labelpad=-0.5)

    #
    # row2, col2: image w/ aperture. put on the nbhr stars as dots too, to
    # ensure the wcs isn't wonky!
    #

    # acquire neighbor stars.
    radius = 2.0*u.arcminute

    nbhr_stars = Catalogs.query_region(
        "{} {}".format(float(c_obj.ra.value), float(c_obj.dec.value)),
        catalog="TIC",
        radius=radius
    )

    try:
        Tmag_cutoff = 15
        px,py = cutout_wcs.all_world2pix(
            nbhr_stars[nbhr_stars['Tmag'] < Tmag_cutoff]['ra'],
            nbhr_stars[nbhr_stars['Tmag'] < Tmag_cutoff]['dec'],
            0
        )
    except Exception as e:
        print('ERR! wcs all_world2pix got {}'.format(repr(e)))
        return

    tmags = nbhr_stars[nbhr_stars['Tmag'] < Tmag_cutoff]['Tmag']

    sel = (px > 0) & (px < 19) & (py > 0) & (py < 19)
    px,py = px[sel], py[sel]
    tmags = tmags[sel]

    ra, dec = float(c_obj.ra.value), float(c_obj.dec.value)
    target_x, target_y = cutout_wcs.all_world2pix(ra,dec,0)

    #
    # finally make it
    #

    img = lcd['median_imgs'][0]

    # some images come out as nans.
    if np.all(np.isnan(img)):
        img = np.ones_like(img)

    interval = vis.PercentileInterval(99.9)
    vmin,vmax = interval.get_limits(img)
    norm = vis.ImageNormalize(
        vmin=vmin, vmax=vmax, stretch=vis.LogStretch(1000))

    cset = ax4.imshow(img, cmap='YlGnBu_r', origin='lower', zorder=1,
                      norm=norm)

    ax4.scatter(px, py, marker='x', c='r', s=5, rasterized=True, zorder=2,
                linewidths=1)
    ax4.plot(target_x, target_y, mew=0.5, zorder=5, markerfacecolor='yellow',
             markersize=7, marker='*', color='k', lw=0)

    #ax4.coords.grid(True, color='white', ls='dotted', lw=1)
    lon = ax4.coords['ra']
    lat = ax4.coords['dec']

    lon.set_ticks(spacing=1*u.arcminute)
    lat.set_ticks(spacing=1*u.arcminute)

    lon.set_ticklabel(exclude_overlapping=True)
    lat.set_ticklabel(exclude_overlapping=True)

    ax4.coords.grid(True, color='white', alpha=0.3, lw=0.3, ls='dotted')

    #cb0 = fig.colorbar(cset, ax=ax4, extend='neither', fraction=0.046, pad=0.04)

    # overplot aperture
    radius_px = 3
    circle = plt.Circle((target_x, target_y), radius_px,
                         color='C1', fill=False, zorder=5)
    ax4.add_artist(circle)

    #
    # cleanup
    # 
    for ax in [ax0,ax1,ax2,ax3,ax4]:
        ax.get_yaxis().set_tick_params(which='both', direction='in',
                                       labelsize='small', top=True, right=True)
        ax.get_xaxis().set_tick_params(which='both', direction='in',
                                       labelsize='small', top=True, right=True)

    fig.tight_layout(w_pad=0.5, h_pad=0)

    #
    # save
    #
    fig.savefig(outvppath, dpi=300, bbox_inches='tight')
    print('made {}'.format(outvppath))
예제 #10
0
def make_plots(df, compare_groups, atts):
    # for each group comparison
    results = {}
    img_locations = {}
    pvalues = {}
    idx = 1
    for g1, g2 in compare_groups:
        # prep data storage
        rootdir = '/home/nathan/Dropbox/NPPC/Domestication/Final Plots'
        directory = '{0}/{1}_{2}'.format(rootdir, g1, g2)

        # split data
        d = split_on_two_sample_types(df, g1, g2)
        bayes = {}
        pvals = {}
        tmp_gp_loc = {}
        # for each attribute
        for a in atts:
            if 'median' in a:
                directory = '{0}/{1}/{2}'.format(directory, '/_spike_medians',
                                                 a)
                dmed = aggregate_average_attribute(d, a, median=True)
                d1 = nparr(dmed[dmed['Sample Type'] == g1][a])
                d2 = nparr(dmed[dmed['Sample Type'] == g2][a])
                d1 = normalize(list(dmed[dmed['Sample Type'] == g1][a]))[0]
                d2 = normalize(list(dmed[dmed['Sample Type'] == g2][a]))[0]
            elif 'mean' in a:
                directory = '{0}/{1}/{2}'.format(directory, '/_spike_means', a)
                dmean = aggregate_average_attribute(d, a)
                d1 = nparr(dmean[dmean['Sample Type'] == g1][a])
                d2 = nparr(dmean[dmean['Sample Type'] == g2][a])
                #d1 = normalize(list(dmean[dmean['Sample Type'] == g1][a]))[0]
                #d2 = normalize(list(dmean[dmean['Sample Type'] == g2][a]))[0]
            else:
                directory = '{0}/{1}'.format(directory, a)
                d1 = nparr(d[d['Sample Type'] == g1][a])
                d2 = nparr(d[d['Sample Type'] == g2][a])
                #d1 = normalize(list(d[d['Sample Type'] == g1][a]))[0]
                #d2 = normalize(list(d[d['Sample Type'] == g2][a]))[0]

            if not os.path.exists(directory):
                os.makedirs(directory)

            _, p = stats.ttest_ind(d1, d2, equal_var=False)
            # res, summ = baysian_hypothesis_test(d1, d2, g1, g2)

            # bayes[a] = (res['difference of means'] < 0).mean() if (
            #     res['difference of means'] < 0).mean() < 0.5 else (1-(res['difference of means'] < 0).mean())

            # fp = plot_forest_plot(res, res.varnames[0], res.varnames[1])
            # plt.gcf().suptitle('{0} - 95% Credible Interval'.format(a))
            # plt.gca().set_title('')
            # plt.gcf().savefig('{0}/bayes_{1}.png'.format(directory, a))

            # dm = plot_difference_of_means(res)
            # plt.gca().set_title('')
            # plt.gcf().suptitle('{0} - Difference of Means'.format(a))
            # plt.gcf().savefig(
            #     '{0}/bayes_difference_of_means_{1}.png'.format(directory, a))

            fig, ax, p = plot_boxplot(d, a, x='Sample Type', p=p)
            fig.tight_layout()
            fig.savefig('{0}/{1}.png'.format(directory, a))
            tmp_gp_loc['{0}'.format(a)] = '{0}/{1}.png'.format(directory, a)
            directory = '{0}/{1}_{2}'.format(rootdir, g1, g2)
            pvals[a] = p
            idx = idx + 1
        pvalues['{0}+{1}'.format(g1, g2)] = pvals
        results['{0}+{1}'.format(g1, g2)] = bayes
        img_locations['{0}_{1}'.format(g1, g2)] = tmp_gp_loc

    # Now do on spike averages
    return (img_locations, results, pvalues)
예제 #11
0
if basedata == 'fullfaint_edr3':
    nbhd_df, cg18_df, kc19_df, target_df = _get_fullfaint_edr3_dataframes()
else:
    raise NotImplementedError

c_cg18, c_cg18_j2000 = precess_gaia_coordinates(cg18_df)
c_kc19, c_kc19_j2000 = precess_gaia_coordinates(kc19_df)
c_target, c_target_j2000 = precess_gaia_coordinates(target_df)

# require parallax S/N>5 to avoid negative parallaxes
sel_nbhd = (nbhd_df.parallax / nbhd_df.parallax_error) > 5
nbhd_df = nbhd_df[sel_nbhd]
c_nbhd, c_nbhd_j2000 = precess_gaia_coordinates(nbhd_df)

# now search many DENIS cones through astroquery's vizier.
get_mag = lambda df: nparr(df.phot_g_mean_mag)
get_ids = lambda df: nparr(df.source_id)

get_merge = lambda df0, df_dxm: df0.merge(
    df_dxm, left_on='source_id', right_on='_id', how='left')

target_dxm = get_denis_xmatch(c_target_j2000,
                              _id=get_ids(target_df),
                              mag=get_mag(target_df))
out_target = get_merge(target_df, target_dxm)

cg18_dxm = get_denis_xmatch(c_cg18_j2000,
                            _id=get_ids(cg18_df),
                            mag=get_mag(cg18_df))
out_cg18 = get_merge(cg18_df, cg18_dxm)
예제 #12
0
    # this plot is lines for each percentile in [2,25,50,75,98],
    # binned for every magnitude interval.
    markers = itertools.cycle(('o', 'v', '>', 'D', 's', 'P'))

    #1190 is nsub, 1192 is rsub
    for projid, sstr in zip([1190, 1192], ['nsub', 'rsub']):

        pctile_df = pd.read_csv(
            '../data/stats_files_1190_vs_1192/' +
            '{}_percentiles_RMS_vs_med_mag_TF2.csv'.format(projid))

        for ix, row in pctile_df.iterrows():
            pctile = row.name
            label = '{} - {}%'.format(sstr, str(pctile))

            midbins = nparr(row.index)
            vals = nparr(row)

            ax.plot(midbins, vals, label=label, marker=next(markers))

        ax.legend(loc='best', fontsize='xx-small')

        ax.set_yscale('log')
        ax.set_xlabel('{:s} median instrument magnitude'.format(apstr.upper()))
        ax.set_ylabel('{:s} {:s}'.format(apstr.upper(), yaxisval))
        if percentiles_xlim:
            ax.set_xlim(percentiles_xlim)
        if percentiles_ylim:
            ax.set_ylim(percentiles_ylim)

        savname = (os.path.join(
예제 #13
0
import numpy as np, pandas as pd
from cdips_followup.manage_ephemerides import query_ephemeris
from cdips_followup.utils import (get_cdips_candidates,
                                  given_sourceid_get_gaiarow)
from numpy import array as nparr
from astropy.coordinates import ICRS
from astropy import units as u

from astroquery.gaia import Gaia

source_id = '2103737241426734336'

gaia_r = given_sourceid_get_gaiarow(source_id)
ra, dec = float(gaia_r['ra']), float(gaia_r['dec'])
pmra, pmdec = float(gaia_r['pmra']), float(gaia_r['pmdec'])

c = ICRS(nparr(ra) * u.deg, nparr(dec) * u.deg)

rahmsstr = c.ra.to_string(u.hour, sep=' ', pad=True)
decdmsstr = c.dec.to_string(u.degree, sep=' ', pad=True)

#Please note that proper motion of RA is in seconds of time per year, as required by Magellan telescope control software, which means: divide the proper motion in arcsec by 15, then divide by cosine of Declination.
# Columns are pipe-separated: Name | RA | Dec | Equinox | pmRA (sec/year) | pmDC (arcsec/year) | Epoch | Binning (1x2 or 3x3) | Speed (Normally “Normal”) | Comment including at least magnitude
pmDC = pmdec / 1e3
pmRA = (pmra / (1e3 * 15)) / np.cos(np.deg2rad(dec))

print('pmRA (sec/year) | pmDC (arcsec/year) | Epoch')
print('{:.5f} | {:.4f} | {:.1f} '.format(pmRA, pmDC, 2015.5))
def main(
        ticid,
        pickledir='/home/luke/Dropbox/proj/tessorbitaldecay/results/tess_lightcurve_fit_parameters/',
        sampledir='/home/luke/local/emcee_chains/',
        fittype='mandelagol_and_line',
        ndim=4):

    # from Southworth+ 2009 table 8:

    w08_rp = 1.416 * u.Rjup
    w08_rstar = 0.937 * u.Rsun

    g09_rp = 1.304 * u.Rjup
    g09_rstar = 0.873 * u.Rsun

    w09_rp = 1.365 * u.Rjup
    w09_rstar = 0.912 * u.Rsun

    s09_rp = 1.371 * u.Rjup
    s09_rstar = 0.914 * u.Rsun

    for paper, rp, rstar in zip(
        ['Wilson+08', 'Gillon+09', 'Winn+09', 'Southworth+09'],
        [w08_rp, g09_rp, w09_rp, s09_rp],
        [w08_rstar, g09_rstar, w09_rstar, s09_rstar]):

        print('{:s}: Rp/Rstar = {:.4g}'.format(paper, (rp / rstar).cgs))

    # NOW GET UR MEASURED RP/RSTAR VALUES, COMPARE
    pickledir += str(ticid)
    fpattern = ('{:s}_{:s}_fit_empiricalerrs_t???.pickle'.format(
        str(ticid), fittype))
    fnames = np.sort(glob(os.path.join(pickledir, fpattern)))
    samplepattern = (
        '{:s}_{:s}_fit_samples_{:d}d_t???_empiricalerrs.h5'.format(
            str(ticid), fittype, ndim))
    samplenames = np.sort(glob(sampledir + samplepattern))

    rp_list, rp_bigerrs = ([], [])

    transit_ix = 0
    for fname, samplename in zip(fnames, samplenames):
        transit_ix += 1

        d = pickle.load(open(fname, 'rb'))

        fitparams = d['fitinfo']['finalparams']
        fiterrs = d['fitinfo']['finalparamerrs']

        try:
            d = pickle.load(open(fname, 'rb'))

            fitparams = d['fitinfo']['finalparams']
            fiterrs = d['fitinfo']['finalparamerrs']

            rp_list.append(fitparams['rp'])
            rp_merrs = fiterrs['std_merrs']['rp']
            rp_perrs = fiterrs['std_perrs']['rp']
            rp_bigerrs.append(max((rp_merrs, rp_perrs)))

        except Exception as e:
            print(e)
            print('transit {:d} failed, continue'.format(transit_ix))
            continue

    rp, rp_bigerr = (nparr(rp_list), nparr(rp_bigerrs))

    print('measured values from TESS:')
    for _rp, _rperr in zip(rp, rp_bigerr):
        print('{:.4f} +/- {:.4f}'.format(_rp, _rperr))

    print('average: {:.4f}'.format(np.mean(rp[rp_bigerr < 0.01])))
    print('std: {:.4f}'.format(np.std(rp[rp_bigerr < 0.01])))
예제 #15
0
def main(is_dayspecific_exofop_upload=1,
         cdipssource_vnum=0.4,
         uploadnamestr='sectors_12_thru_13_clear_threshold'):
    """
    Put together a few useful CSV candidate summaries:

    * bulk uploads to exofop/tess

    * observer info sparse (focus on TICIDs, gaia mags, positions on sky, etc)

    * observer info full (stellar rvs for membership assessment; ephemeris
    information)

    * merge of everything (exoFOP upload, + the subset of gaia information
    useful to observers)

    ----------
    Args:

        is_dayspecific_exofop_upload: if True, reads in the manually-written (from
        google spreadsheet) comments and source_ids, and writes those to a
        special "TO_EXOFOP" csv file.

        uploadnamestr: used as unique identifying string in file names
    """

    #
    # Read in the results from the fits
    #
    paramglob = os.path.join(
        fitdir,
        "sector-*_CLEAR_THRESHOLD/fitresults/hlsp_*gaiatwo*_llc/*fitparameters.csv"
    )
    parampaths = glob(paramglob)
    statusglob = os.path.join(
        fitdir,
        "sector-*_CLEAR_THRESHOLD/fitresults/hlsp_*gaiatwo*_llc/*.stat")
    statuspaths = glob(statusglob)

    statuses = [
        dict(load_status(f)['fivetransitparam_fit']) for f in statuspaths
    ]

    param_df = pd.concat((pd.read_csv(f, sep='|') for f in parampaths))

    outpath = os.path.join(
        fitdir, "{}_{}_mergedfitparams.csv".format(today_YYYYMMDD(),
                                                   uploadnamestr))
    param_df['param_path'] = parampaths
    param_df.to_csv(outpath, index=False, sep='|')
    print('made {}'.format(outpath))

    status_df = pd.DataFrame(statuses)

    status_df['statuspath'] = statuspaths

    status_gaiaids = list(
        map(
            lambda x: int(
                os.path.dirname(x).split('gaiatwo')[1].split('-')[0].lstrip(
                    '0')), statuspaths))

    status_df['source_id'] = status_gaiaids

    if is_dayspecific_exofop_upload:

        #
        # Manually commented candidates are the only ones we're uploading.
        #
        manual_comment_df = pd.read_csv(
            '/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/data/exoFOP_uploads/{}_cdips_candidate_upload.csv'
            .format(today_YYYYMMDD()),
            sep=",")
        common = status_df.merge(manual_comment_df,
                                 on='source_id',
                                 how='inner')
        sel_status_df = status_df[status_df.source_id.isin(common.source_id)]

        #
        # WARN: the MCMC fits should have converged before uploading.
        # (20190918 had two exceptions, where the fit looked fine.)
        #
        if len(sel_status_df[sel_status_df['is_converged'] == 'False']) > 0:

            print('\nWRN! THE FOLLOWING CANDIDATES ARE NOT CONVERGED')
            print(sel_status_df[sel_status_df['is_converged'] == 'False'])

        param_gaiaids = list(
            map(
                lambda x: int(
                    os.path.basename(x).split('gaiatwo')[1].split('-')[0].
                    lstrip('0')), parampaths))
        param_df['source_id'] = param_gaiaids

        #
        # Require that you actually have a parameter file (...).
        #
        _df = sel_status_df.merge(param_df, on='source_id', how='inner')

        to_exofop_df = param_df[param_df.source_id.isin(_df.source_id)]

        if len(to_exofop_df) != len(manual_comment_df):

            print('\nWRN! {} CANDIDATES DID NOT HAVE PARAMETERS'.format(
                len(manual_comment_df) - len(to_exofop_df)))
            print('They are...')
            print(manual_comment_df[~manual_comment_df.source_id.
                                    isin(to_exofop_df.source_id)])
            print('\n')

        #
        # Duplicate entries in "to_exofop_df" are multi-sector. Average their
        # parameters (really will end up just being durations) across sectors,
        # and then remove the duplicate multi-sector rows using the "groupby"
        # aggregator. This removes the string-based columns, which we can
        # reclaim by a "drop_duplicates" call, since they don't have
        # sector-specific information.  Then, assign comments and format as
        # appropriate for ExoFop-TESS. Unique tag for the entire upload.
        #

        to_exofop_df['source_id'] = to_exofop_df['source_id'].astype(str)

        mean_val_to_exofop_df = to_exofop_df.groupby(
            'target').mean().reset_index()

        string_cols = [
            'target', 'flag', 'disp', 'tag', 'group', 'notes', 'source_id'
        ]
        dup_dropped_str_df = (to_exofop_df.drop_duplicates(
            subset=['target'], keep='first', inplace=False)[string_cols])

        out_df = mean_val_to_exofop_df.merge(dup_dropped_str_df,
                                             how='left',
                                             on='target')

        #
        # The above procedure got the epochs on multisector planets wrong.
        # Determine (t0,P) by fitting a line to entries with >=3 sectors
        # instead. For the two-sector case, due to bad covariance matrices,
        # just use the newest ephemeris.
        #
        multisector_df = (to_exofop_df[to_exofop_df.target.groupby(
            to_exofop_df.target).transform('value_counts') > 1])
        u_multisector_df = out_df[out_df.target.isin(multisector_df.target)]

        # temporarily drop the multisector rows from out_df (they will be
        # re-merged)
        out_df = out_df.drop(np.argwhere(
            out_df.target.isin(multisector_df.target)).flatten(),
                             axis=0)

        ephem_d = {}
        for ix, t in enumerate(np.unique(multisector_df.target)):
            sel = (multisector_df.target == t)
            tmid = nparr(multisector_df[sel].epoch)
            tmid_err = nparr(multisector_df[sel].epoch_unc)
            init_period = nparr(multisector_df[sel].period.mean())

            E, init_t0 = get_epochs_given_midtimes_and_period(tmid,
                                                              init_period,
                                                              verbose=False)

            popt, pcov = curve_fit(linear_model,
                                   E,
                                   tmid,
                                   p0=(init_period, init_t0),
                                   sigma=tmid_err)

            if np.all(np.isinf(pcov)):
                # if least-squares doesn't give good error (i.e., just two
                # epochs), take the most recent epoch.
                s = np.argmax(tmid)
                use_t0 = tmid[s]
                use_t0_err = tmid_err[s]
                use_period = nparr(multisector_df[sel].period)[s]
                use_period_err = nparr(multisector_df[sel].period_unc)[s]

            else:
                use_t0 = popt[1]
                use_t0_err = pcov[1, 1]**0.5
                use_period = popt[0]
                use_period_err = pcov[0, 0]**0.5

            if DEBUG:
                print(
                    'init tmid {}, tmiderr {}\nperiod {}, perioderr {}'.format(
                        tmid, tmid_err, nparr(multisector_df[sel].period),
                        nparr(multisector_df[sel].period_unc)))
                print(
                    'use tmid {}, tmiderr {}\nperiod {}, perioderr {}'.format(
                        use_t0, use_t0_err, use_period, use_period_err))
                print(10 * '-')

            ephem_d[ix] = {
                'target': t,
                'epoch': use_t0,
                'epoch_unc': use_t0_err,
                'period': use_period,
                'period_unc': use_period_err
            }

        ephem_df = pd.DataFrame(ephem_d).T

        mdf = ephem_df.merge(u_multisector_df,
                             how='left',
                             on='target',
                             suffixes=('', '_DEPRECATED'))
        mdf = mdf.drop([c for c in mdf.columns if 'DEPRECATED' in c],
                       axis=1,
                       inplace=False)

        temp_df = out_df.append(mdf, ignore_index=True, sort=False)
        out_df = temp_df

        to_exofop_df = out_df[COLUMN_ORDER]

        # to_exofop_df = mdf[COLUMN_ORDER] # special behavior for 2020/02/07 fix
        # to_exofop_df['flag'] = 'newparams'

        _df = manual_comment_df[manual_comment_df.source_id.isin(
            to_exofop_df.source_id)]

        comments = list(_df['comment'])
        # comments = 'Fixed ephemeris bug. (Old epoch was erroneous).' # #2020/02/07

        for c in comments:
            assert len(c) <= 119

        to_exofop_df = to_exofop_df.sort_values(by="source_id")
        _df = _df.sort_values(by="source_id")

        to_exofop_df['notes'] = comments
        to_exofop_df['tag'] = ('{}_bouma_cdips-v01_00001'.format(
            today_YYYYMMDD()))

        istoi = ~to_exofop_df['target'].astype(str).str.startswith('TIC')
        if np.any(istoi):
            newtargetname = 'TOI' + to_exofop_df[istoi].target.astype(str)
            to_exofop_df.loc[istoi, 'target'] = newtargetname

        outpath = os.path.join(
            exofopdir, "{}_{}_w_sourceid.csv".format(today_YYYYMMDD(),
                                                     uploadnamestr))
        to_exofop_df.to_csv(outpath, index=False, sep='|')
        print('made {}'.format(outpath))

        to_exofop_df = to_exofop_df.drop(['source_id'], axis=1)

        outpath = os.path.join(
            exofopdir, "params_planet_{}_001.txt".format(today_YYYYMMDD()))
        for c in ['epoch', 'epoch_unc', 'period', 'period_unc']:
            to_exofop_df[c] = to_exofop_df[c].astype(float)
        to_exofop_df = to_exofop_df.round(FORMATDICT)
        to_exofop_df['depth'] = to_exofop_df['depth'].astype(int)
        to_exofop_df['depth_unc'] = to_exofop_df['depth_unc'].astype(int)
        to_exofop_df.to_csv(outpath, index=False, sep='|', header=False)
        print('made {}'.format(outpath))

        # manually check these...
        print('\n' + 42 * '=' + '\n')
        print('\nPeriod uncertainties [minutes]')
        print(to_exofop_df['period_unc'] * 24 * 60)
        print('\nEpoch uncertainties [minutes]')
        print(to_exofop_df['epoch_unc'] * 24 * 60)
        print('\nPlanet radii [Rearth]')
        print(to_exofop_df[['radius', 'radius_unc', 'notes']])
        print('\n' + 42 * '=' + '\n')

    #
    # above is the format exofop-TESS wants. however it's not particularly
    # useful for followup. for that, we want: gaia IDs, magnitudes, ra, dec.
    #
    gaiaids = list(
        map(
            lambda x: int(
                os.path.basename(x).split('gaiatwo')[1].split('-')[0].lstrip(
                    '0')), parampaths))

    lcnames = list(
        map(
            lambda x: os.path.basename(x).replace('_fitparameters.csv', '.fits'
                                                  ), parampaths))

    lcdir = '/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-*/cam?_ccd?/'
    lcpaths = [glob(os.path.join(lcdir, lcn))[0] for lcn in lcnames]

    # now get the header values
    kwlist = [
        'RA_OBJ', 'DEC_OBJ', 'CDIPSREF', 'CDCLSTER', 'phot_g_mean_mag',
        'phot_bp_mean_mag', 'phot_rp_mean_mag', 'TESSMAG', 'Gaia-ID', 'TICID',
        'TICTEFF', 'TICRAD', 'TICMASS'
    ]

    for k in kwlist:
        thislist = []
        for l in lcpaths:
            thislist.append(iu.get_header_keyword(l, k, ext=0))
        param_df[k] = np.array(thislist)

    # now search for stellar RV xmatch
    res = [
        fr.get_rv_xmatch(ra, dec, G_mag=gmag, dr2_sourceid=s)
        for ra, dec, gmag, s in zip(
            list(param_df['RA_OBJ']), list(param_df['DEC_OBJ']),
            list(param_df['phot_g_mean_mag']), list(param_df['Gaia-ID']))
    ]

    res = np.array(res)
    param_df['stellar_rv'] = res[:, 0]
    param_df['stellar_rv_unc'] = res[:, 1]
    param_df['stellar_rv_provenance'] = res[:, 2]

    # make column showing whether there are ESO spectra available
    res = [
        fr.wrangle_eso_for_rv_availability(ra, dec)
        for ra, dec in zip(list(param_df['RA_OBJ']), list(param_df['DEC_OBJ']))
    ]
    param_df['eso_rv_availability'] = nparr(res)[:, 2]

    #
    # try to get cluster RV. first from Soubiran, then from Kharchenko.
    # to do this, load in CDIPS target catalog. merging the CDCLSTER name
    # (comma-delimited string) against the target catalog on source identifiers
    # allows unique cluster name identification, since I already did that,
    # earlier.
    #
    cdips_df = ccl.get_cdips_pub_catalog(ver=cdipssource_vnum)
    dcols = 'cluster;reference;source_id;unique_cluster_name'
    ccdf = cdips_df[dcols.split(';')]
    ccdf['source_id'] = ccdf['source_id'].astype(np.int64)
    mdf = param_df.merge(ccdf,
                         how='left',
                         left_on='source_id',
                         right_on='source_id')
    param_df['unique_cluster_name'] = nparr(mdf['unique_cluster_name'])

    s19 = gvc.get_soubiran_19_rv_table()
    k13_param = gvc.get_k13_param_table()

    c_rvs, c_err_rvs, c_rv_nstar, c_rv_prov = [], [], [], []
    for ix, row in param_df.iterrows():

        if row['unique_cluster_name'] in nparr(s19['ID']):
            sel = (s19['ID'] == row['unique_cluster_name'])
            c_rvs.append(float(s19[sel]['RV'].iloc[0]))
            c_err_rvs.append(float(s19[sel]['e_RV'].iloc[0]))
            c_rv_nstar.append(int(s19[sel]['Nsele'].iloc[0]))
            c_rv_prov.append('Soubiran+19')
            continue

        elif row['unique_cluster_name'] in nparr(k13_param['Name']):
            sel = (k13_param['Name'] == row['unique_cluster_name'])
            c_rvs.append(float(k13_param[sel]['RV'].iloc[0]))
            c_err_rvs.append(float(k13_param[sel]['e_RV'].iloc[0]))
            c_rv_nstar.append(int(k13_param[sel]['o_RV'].iloc[0]))
            c_rv_prov.append('Kharchenko+13')
            continue

        else:
            c_rvs.append(np.nan)
            c_err_rvs.append(np.nan)
            c_rv_nstar.append(np.nan)
            c_rv_prov.append('')

    param_df['cluster_rv'] = c_rvs
    param_df['cluster_err_rv'] = c_err_rvs
    param_df['cluster_rv_nstar'] = c_rv_nstar
    param_df['cluster_rv_provenance'] = c_rv_prov

    #
    # finally, begin writing the output
    #

    outpath = ("/home/lbouma/proj/cdips/results/fit_gold/"
               "{}_{}_fitparams_plus_observer_info.csv".format(
                   today_YYYYMMDD(), uploadnamestr))
    param_df.to_csv(outpath, index=False, sep='|')
    print('made {}'.format(outpath))

    #
    # sparse observer info cut
    #
    scols = [
        'target', 'flag', 'disp', 'tag', 'group', 'RA_OBJ', 'DEC_OBJ',
        'CDIPSREF', 'CDCLSTER', 'phot_g_mean_mag', 'phot_bp_mean_mag',
        'phot_rp_mean_mag', 'TICID', 'TESSMAG', 'TICTEFF', 'TICRAD', 'TICMASS',
        'Gaia-ID'
    ]
    sparam_df = param_df[scols]

    outpath = ("/home/lbouma/proj/cdips/results/fit_gold/"
               "{}_{}_observer_info_sparse.csv".format(today_YYYYMMDD(),
                                                       uploadnamestr))
    sparam_df.to_csv(outpath, index=False, sep='|')
    print('made {}'.format(outpath))

    #
    # full observer info cut
    #
    scols = [
        'target', 'flag', 'disp', 'tag', 'group', 'RA_OBJ', 'DEC_OBJ',
        'CDIPSREF', 'CDCLSTER', 'phot_g_mean_mag', 'phot_bp_mean_mag',
        'phot_rp_mean_mag', 'TICID', 'TESSMAG', 'TICTEFF', 'TICRAD', 'TICMASS',
        'Gaia-ID', 'period', 'period_unc', 'epoch', 'epoch_unc', 'depth',
        'depth_unc', 'duration', 'duration_unc', 'radius', 'radius_unc',
        'stellar_rv', 'stellar_rv_unc', 'stellar_rv_provenance',
        'eso_rv_availability', 'cluster_rv', 'cluster_err_rv',
        'cluster_rv_nstar', 'cluster_rv_provenance'
    ]
    sparam_df = param_df[scols]

    outpath = ("/home/lbouma/proj/cdips/results/fit_gold/"
               "{}_{}_observer_info_full.csv".format(today_YYYYMMDD(),
                                                     uploadnamestr))
    sparam_df.to_csv(outpath, index=False, sep='|')
    print('made {}'.format(outpath))
예제 #16
0
        print(outstr)


if __name__ == "__main__":

    calc_dilution = 0
    get_merge_dilution = 0
    merge_tic = 0
    merge_xmatch = 0
    do_yield_calc = 1

    cg18_df = get_cg18_stars_above_cutoff_T_mag()

    if calc_dilution:
        compute_dilution_fractions(
            nparr(cg18_df.source_id.astype(str))
        )

    if get_merge_dilution:
        cg18_df = get_merge_dilution_fractions(
            cg18_df, nparr(cg18_df.source_id.astype(str))
        )

    if merge_tic:
        df = get_star_info(cg18_df)

    if merge_xmatch:
        _merge_tic8_xmatch()

    if do_yield_calc:
        for a,b in product([True,False],[True,False]):
예제 #17
0
def plot_rms_vs_mag(stats,
                    yaxisval='RMS',
                    percentiles_xlim=[5.8, 16.2],
                    percentiles_ylim=[1e-5, 1e-1],
                    percentiles=[25, 50, 75],
                    outdir='../results/rms_vs_mag/',
                    outprefix='',
                    catmagstr=None):
    """
    catmagstr (str): None, 'Tmag', or 'cat_mag' (where the latter is Gaia Rp)
    """

    if isinstance(catmagstr, str):
        if catmagstr == 'Tmag':
            df = pd.read_csv(
                '../data/rms_vs_mag/projid_1301_gaia_2mass_tic_all_matches.csv'
            )

            matched_gaia_ids = np.array(df['source_id']).astype(int)
            stats_ids = stats['lcobj'].astype(int)

            int1d, stats_ind, df_ind = np.intersect1d(stats_ids,
                                                      matched_gaia_ids,
                                                      return_indices=True)

            print('starting with {} LCs w/ Gaia IDs'.format(len(stats)))
            stats = stats[stats_ind]
            print('post xmatch {} LCs w/ Gaia IDs & 2mass ids & Tmag'.format(
                len(stats)))

            # NOTE: this makes no sense. wtf. why are so few in the intersection?
            df = df.iloc[df_ind]

            assert np.array_equal(stats['lcobj'].astype(int),
                                  df['source_id'].astype(int))

            # if above is true, then can do this
            mags = df['tmag']

    apstr = 'tf2'

    medstr = 'med_' + apstr if not isinstance(catmagstr, str) else catmagstr
    yvalstr = 'stdev_' + apstr

    if catmagstr != 'Tmag':
        mags = stats[medstr]
    rms = stats[yvalstr]

    minmag = np.floor(np.nanmin(mags)).astype(int)
    maxmag = np.ceil(np.nanmax(mags)).astype(int)
    magdiff = 0.25
    mag_bins = [(me, me + magdiff)
                for me in np.arange(minmag, maxmag, magdiff)]

    percentile_dict = {}
    for mag_bin in mag_bins:

        thismagmean = np.round(np.mean(mag_bin), 2)
        percentile_dict[thismagmean] = {}

        thismin, thismax = min(mag_bin), max(mag_bin)
        sel = (mags > thismin) & (mags <= thismax)

        for percentile in percentiles:
            val = np.nanpercentile(stats[sel][yvalstr], percentile)
            percentile_dict[thismagmean][percentile] = np.round(val, 7)

    pctile_df = pd.DataFrame(percentile_dict)

    plt.close('all')
    fig, ax = plt.subplots(figsize=(0.8 * 4, 0.8 * 3))

    # this plot is lines for each percentile in [2,25,50,75,98],
    for ix, row in pctile_df.iterrows():
        pctile = row.name
        label = '{}%'.format(str(pctile))
        if label == '50%':
            label = "Median"
        midbins, vals = nparr(row.index), nparr(row)

        # NOTE: not showing
        #ax.plot(midbins, vals, label=label, marker='o', ms=0, zorder=3, lw=0.5)

    ax.scatter(mags,
               rms,
               c='k',
               alpha=0.12,
               zorder=-5,
               s=0.5,
               rasterized=True,
               linewidths=0)

    if yaxisval == 'RMS':
        Tmag = np.linspace(6, 16, num=200)

        # lnA = 3.29685004771
        # B = 0.8500214657
        # C = -0.2850416324
        # D = 0.039590832137
        # E = -0.00223080159
        # F = 4.73508403525e-5
        # ln_sigma_1hr = (lnA + B*Tmag + C*Tmag**2 + D*Tmag**3 + E*Tmag**4 +
        #                 F*Tmag**5)
        # sigma_1hr = np.exp(ln_sigma_1hr)
        # sigma_30min = sigma_1hr * np.sqrt(2)

        # RA, dec. (90, -66) is southern ecliptic pole. these are "good
        # coords", but we don't care!
        coords = np.array([90 * np.ones_like(Tmag),
                           -66 * np.ones_like(Tmag)]).T
        out = noise_model(Tmag, coords=coords, exptime=1800)

        noise_star = out[2, :]
        noise_ro = out[4, :]
        noise_star_plus_ro = np.sqrt(noise_star**2 + noise_ro**2)

        ax.plot(Tmag,
                noise_star_plus_ro,
                ls='-',
                zorder=-2,
                lw=1,
                color='C1',
                label='Photon + read')
        ax.plot(Tmag,
                noise_star,
                ls='--',
                zorder=-3,
                lw=1,
                color='C3',
                label='Photon')
        ax.plot(Tmag,
                noise_ro,
                ls='--',
                zorder=-4,
                lw=1,
                color='C4',
                label='Read')

    ax.legend(loc='upper left', fontsize='xx-small')
    ax.set_yscale('log')
    xlabel = 'Instrument magnitude'
    if catmagstr == 'cat_mag':
        xlabel = 'TESS magnitude'  # NOTE: this is a lie. but a white one -- we checked, they're the same!! just cross-matching is insanely annoying 'Gaia Rp magnitude'
    if catmagstr == 'Tmag':
        xlabel = 'TESS magnitude'
    ax.set_xlabel(xlabel, labelpad=0.8)
    ax.set_ylabel('RMS (30 minutes)', labelpad=0.8)
    ax.set_xlim(percentiles_xlim)
    ax.set_ylim(percentiles_ylim)

    ax.yaxis.set_ticks_position('both')
    ax.xaxis.set_ticks_position('both')
    ax.get_yaxis().set_tick_params(which='both', direction='in')
    ax.get_xaxis().set_tick_params(which='both', direction='in')
    for tick in ax.xaxis.get_major_ticks():
        tick.label.set_fontsize('small')
    for tick in ax.yaxis.get_major_ticks():
        tick.label.set_fontsize('small')

    if not isinstance(catmagstr, str):
        savname = os.path.join(
            outdir, 'percentiles_{:s}_vs_med_mag_{:s}.png'.format(
                yaxisval, apstr.upper()))
    else:
        if catmagstr == 'cat_mag':
            savname = os.path.join(
                outdir, 'percentiles_{:s}_vs_GaiaRp_mag_{:s}.png'.format(
                    yaxisval, apstr.upper()))
        if catmagstr == 'Tmag':
            savname = os.path.join(
                outdir, 'percentiles_{:s}_vs_TESS_mag_{:s}.png'.format(
                    yaxisval, apstr.upper()))
    fig.tight_layout(pad=0.2)
    fig.savefig(savname, dpi=400)
    print('%sZ: made plot: %s' % (datetime.utcnow().isoformat(), savname))
예제 #18
0
def compute_dilution_fraction(
    source_id,
    aperture_radii=[0.75,1.,1.25,1.5,1.75,2.,2.25,2.5],
    ):

    assert isinstance(source_id, str)

    outpath = (
        '../../data/dilution_fractions/dilutionvalues/{}.csv'.
        format(source_id)
    )

    if os.path.exists(outpath):
        print('found {}, skip'.format(outpath))
        return
    #
    # Cone query down to G_Rp=19, within say 6 pixels = 120 arcseconds =
    # 0.03333 degrees.  Do it w/ gaia2read, b/c it's faster than internet-based
    # queries.
    #

    ra, dec, target_Tmag = get_ra_dec_Tmag_given_sourceid(source_id)
    df = conequery_given_radec_sourceid(ra, dec, source_id)

    #
    # Calculate separations and Tmags.
    #
    c_obj = SkyCoord(ra, dec, unit=(u.deg), frame='icrs')

    c_cone = SkyCoord(nparr(df['RA[deg][2]']), nparr(df['Dec[deg][3]']),
                      unit=(u.deg), frame='icrs')

    cone_seps = c_cone.separation(c_obj).to(u.arcsec).value

    df['sep_arcsec'] = cone_seps

    px_scale = 20.25 # arcsec/px
    df['sep_px'] = cone_seps / px_scale

    Tmag_pred = (df['phot_g_mean_mag[20]']
                - 0.00522555 * (df['phot_bp_mean_mag[25]'] - df['phot_rp_mean_mag[30]'])**3
                + 0.0891337 * (df['phot_bp_mean_mag[25]'] - df['phot_rp_mean_mag[30]'])**2
                - 0.633923 * (df['phot_bp_mean_mag[25]'] - df['phot_rp_mean_mag[30]'])
                + 0.0324473)

    df['Tmag_pred'] = Tmag_pred

    #
    # Compute dilution fraction for each aperture radius.
    #

    dilutions = []
    nstars = []

    for ap_radius in aperture_radii:

        sdf = df[df.sep_px < ap_radius]

        numerator = 10**(-0.4 * target_Tmag)
        denominator = np.sum( 10**(-0.4 * nparr(sdf[~pd.isnull(sdf.Tmag_pred)].Tmag_pred) ) )
        dilution = numerator/denominator

        dilutions.append(dilution)
        nstars.append(len(sdf))

    #
    # Save dataframe of dilutions and nstars for each target star.
    #
    outdf = pd.DataFrame({
        'ap_radius': aperture_radii,
        'dilution': dilutions,
        'nstars': nstars
    })

    outdf.to_csv(outpath, index=False)
    print('made {}'.format(outpath))
예제 #19
0
def test_detrending(source_id=None):

    df = pd.read_csv('data/example_data_{}.csv'.format(source_id))

    outpng = '{}_detrend_test_from_tfa.png'.format(source_id)
    flat_flux, trend_flux = dtr.detrend_flux(nparr(df.tfatime),
                                             nparr(df.tfaflux),
                                             break_tolerance=0.5)
    plot_detrending_from_tfa(nparr(df.time),
                             nparr(df.tfatime),
                             nparr(df.rawflux),
                             nparr(df.tfaflux),
                             flat_flux,
                             trend_flux,
                             ap_index=2,
                             returnfig=False,
                             savpath=outpng)

    outpng = '{}_detrend_test_from_raw.png'.format(source_id)
    flat_flux, trend_flux = dtr.detrend_flux(nparr(df.time),
                                             nparr(df.rawflux),
                                             break_tolerance=0.5)
    plot_detrending_from_raw(nparr(df.time),
                             nparr(df.tfatime),
                             nparr(df.rawflux),
                             nparr(df.tfaflux),
                             flat_flux,
                             trend_flux,
                             ap_index=2,
                             returnfig=False,
                             savpath=outpng)
예제 #20
0
def get_merged_companion_isochrone(age=5e9, mstar=1):

    outpath = f'../data/companion_isochrones/MIST_plus_Baraffe_merged_{age:.1e}.csv'

    if not os.path.exists(outpath):

        #
        # Get Teff, L for the stellar models of MIST at 5 Gyr.
        #

        if age == 5e9:
            # MIST isochrones, v1.2 from the website
            mistpath = os.path.join(
                datadir, 'MIST_v1.2_feh_p0.00_afe_p0.0_vvcrit0.4_basic.iso')
            iso = ISO(mistpath)

            # 10**9.7 = 5.01 Gyr. I'm OK with not interpolating further here.
            mist_age_ind = iso.age_index(9.7)
            mist_logTeff = iso.isos[mist_age_ind]['log_Teff']
            mist_logL = iso.isos[mist_age_ind]['log_L']
            mist_initial_mass = iso.isos[mist_age_ind]['initial_mass']

        elif age == 3.5e7:
            # MIST isochrones, v1.2, interpolated on website
            mistpath = os.path.join(datadir, 'MIST_iso_5ecfe9b1811ee.iso')
            iso = ISO(mistpath)

            assert len(iso.isos) == 1

            mist_logTeff = iso.isos[0]['log_Teff']
            mist_logL = iso.isos[0]['log_L']
            mist_initial_mass = iso.isos[0]['initial_mass']

        mist_df = pd.DataFrame({
            'mass': mist_initial_mass,
            'lum': 10**(mist_logL),
            'teff': 10**(mist_logTeff),
        })

        #
        # There is one point that overlaps: Mstar = 0.1Msun. For that point,
        # use the Baraffe model.
        #
        sel = (mist_df.mass < mstar) & (mist_df.mass > 0.1)
        mist_df = mist_df[sel]

        #
        # Get Teff, L for the Baraffe03 models 5 Gyr.
        #

        if age == 5e9:
            agestr = '5gyr'
        elif age == 3.5e7:
            agestr = '50myr'
        else:
            raise NotImplementedError

        # Baraffe+2003 isochrones for substellar mass objects
        bar_df = pd.read_csv(os.path.join(datadir, f'COND03_{agestr}.csv'),
                             delim_whitespace=True)

        bar_df = bar_df.drop(
            ['g', 'R', 'Mv', 'Mr', 'Mi', 'Mj', 'Mh', 'Mk', 'Mll', 'Mm'],
            axis=1)

        bar_df['L/Ls'] = 10**nparr(bar_df['L/Ls'])

        bar_df = bar_df.rename(columns={
            'L/Ls': 'lum',
            'Teff': 'teff',
            'M/Ms': 'mass'
        })

        #
        # merge
        #
        mdf = pd.concat((bar_df, mist_df), sort=False).reset_index()

        mdf = mdf.drop(['index'], axis=1)

        mdf.to_csv(outpath, index=False)

    return pd.read_csv(outpath)