def given_ticid_get_quinn_row(ticid, disposition='STAR'): dr2_source_id = tic_to_gaiadr2(ticid) tic_res = tic_objectsearch(ticid) res_path = tic_res['cachefname'] with open(res_path) as f: _d = json.load(f) assert len(_d['data']) == 1 tic8_data = _d['data'][0] ra = tic8_data['ra'] dec = tic8_data['dec'] vmagstr = str(tic8_data['Vmag']) c = ICRS(nparr(ra)*u.deg, nparr(dec)*u.deg) rahmsstr = c.ra.to_string(u.hour, sep=' ', pad=True) decdmsstr = c.dec.to_string(u.degree, sep=' ', pad=True) quinnrow = ( f'{ticid}\t{disposition}\t' + f'T{str(ticid).zfill(10)}\t'+ f'{rahmsstr}\t{decdmsstr}\t'+ f'{vmagstr}' ) return quinnrow
def insert_ephemeris(ephemeris_type=None, targetid=None, ephemsourcefile=None): """ ephemeris_type: type of ephemeris source file. These could be Hartman's text files, cdips-pipeline, ExoFOP-TESS CSV files, or MIT QLP toi-plus catalog: ['cdipspipeline', 'exofoptess_toi', 'hartmanupdate', 'toiplus'] targetid: if type is MITQLP toi-plus, then e.g., 'TIC308538095.01' or '451.01'. ephemsourcefile: if type is Hartman's text files, then path to source file containing the ephemeris information. """ valid_types = ['cdipspipeline', 'exofoptess_toi', 'exofoptess_ctoi','hartmanupdate', 'toiplus', 'sg1_update'] if ephemeris_type not in valid_types: errmsg = ( 'got {} type. expected one of valid_types.'. format(ephemeris_type) ) raise ValueError(errmsg) if ephemeris_type =='hartmanupdate': assert isinstance(ephemsourcefile, str) if ephemeris_type =='toiplus': assert isinstance(targetid, str) # # Retrieve dict with period, epoch, duration (and uncertainties). # if ephemeris_type == 'hartmanupdate': ephem_dict = read_hartman_updateephem_file(ephemsourcefile) elif ephemeris_type == 'exofoptess_toi': wrnmsg = ( 'WRN! EPHEMERIS FROM EXOFOPTESS TOI CATALOG. ENSURE LATEST ' 'VERSION HAS BEEN DOWNLOADED' ) print(wrnmsg) ephem_dict = read_exofoptess_toi_ephem(targetid) elif ephemeris_type == 'exofoptess_ctoi': wrnmsg = ( 'WRN! EPHEMERIS FROM EXOFOPTESS CTOI CATALOG. ENSURE LATEST ' 'VERSION HAS BEEN DOWNLOADED' ) print(wrnmsg) ephem_dict = read_exofoptess_ctoi_ephem(targetid) elif ephemeris_type == 'cdipspipeline': df = pd.read_csv(ephemsourcefile, sep="|") sdf = df[df.target == targetid] selcol = ['period', 'epoch', 'duration', 'period_unc', 'epoch_unc', 'duration_unc', 'depth', 'depth_unc'] e_dict = sdf[selcol].to_dict('index') keys = ['period_val', 'epoch_val', 'duration_val', 'period_unc', 'epoch_unc', 'duration_unc', 'depth_val', 'depth_unc'] ephem_dict = {} for k, c in zip(keys, selcol): ephem_dict[k] = e_dict[list(e_dict.keys())[0]][c] elif ephemeris_type == 'sg1_update': df = pd.read_csv(ephemsourcefile, sep=';') sdf = df[df.target == targetid] selcol = ['period', 'epoch', 'duration', 'period_unc', 'epoch_unc', 'duration_unc', 'depth', 'depth_unc'] e_dict = sdf[selcol].to_dict('index') keys = ['period_val', 'epoch_val', 'duration_val', 'period_unc', 'epoch_unc', 'duration_unc', 'depth_val', 'depth_unc'] ephem_dict = {} for k, c in zip(keys, selcol): ephem_dict[k] = e_dict[list(e_dict.keys())[0]][c] else: raise NotImplementedError # # Construct and insert the new row. # if ephemeris_type == 'hartmanupdate': if 'TIC' not in ephemsourcefile: errmsg = ( 'assumed {} had TIC*.updatepehem.txt'. format(ephemsourcefile) ) raise ValueError(errmsg) # assumes ../data/updated_ephemerides/20191030/TIC308538095.updateephem.txt ticid = os.path.basename(ephemsourcefile).split('.')[0].lstrip('TIC') source_id = tic_to_gaiadr2(ticid) targetid = 'TIC{}.01'.format(ticid) ephemeris_origin = os.path.abspath(ephemsourcefile) elif ephemeris_type == 'exofoptess_toi': toidf = get_exofop_toi_catalog() sel = toidf['TOI'].astype(str) == targetid targetrow = toidf[sel] ticid = str(targetrow['TIC ID'].iloc[0]) source_id = tic_to_gaiadr2(ticid) targetid = targetid ephemeris_origin = get_exofop_toi_catalog(returnpath=True) elif ephemeris_type == 'exofoptess_ctoi': ctoidf = get_exofop_ctoi_catalog() sel = ctoidf['CTOI'].astype(str) == targetid.replace('TIC','') targetrow = ctoidf[sel] ticid = str(targetrow['TIC ID'].iloc[0]) source_id = tic_to_gaiadr2(ticid) targetid = targetid ephemeris_origin = get_exofop_ctoi_catalog(returnpath=True) elif ephemeris_type == 'cdipspipeline': if not targetid.startswith('TIC'): raise NotImplementedError ticid = targetid.replace('TIC','').replace('.01','') source_id = str(sdf.source_id.iloc[0]) targetid = targetid ephemeris_origin = os.path.abspath(ephemsourcefile) elif ephemeris_type == 'sg1_update': toidf = get_exofop_toi_catalog() if targetid.startswith('TIC'): # CTOI ticid = targetid.replace('TIC','').replace('.01','') else: # TOI ticid = str(toidf[toidf.TOI.astype(str) == targetid]['TIC ID'].iloc[0]) source_id = tic_to_gaiadr2(str(ticid)) targetid = targetid ephemeris_origin = os.path.abspath(ephemsourcefile) else: raise NotImplementedError new_row = pd.DataFrame({ 'source_id': source_id, 'ticid': ticid, 'targetid': targetid, 'insert_time': pd.Timestamp.now(), 'period': ephem_dict['period_val'], 'period_unc': ephem_dict['period_unc'], 'epoch': ephem_dict['epoch_val'], 'epoch_unc': ephem_dict['epoch_unc'], 'duration': ephem_dict['duration_val'], 'duration_unc': ephem_dict['duration_unc'], 'depth': ephem_dict['depth_val'], 'depth_unc': ephem_dict['depth_unc'], 'ephemeris_origin': ephemeris_origin }, index=[0]) if np.any( [new_row.period.isnull().bool(), new_row.epoch.isnull().bool(), new_row.duration.isnull().bool()] ): print(42*'-') print(f'WRN! Skipping {ticid} because got null period / epoch / durn.') print(42*'-') return ephem_df = pd.read_csv(EPHEM_PATH) new_ephem_df = pd.concat((ephem_df, new_row), sort=False) save_ephemerides_csv_file(new_ephem_df)
def insert_candidate(source_id=None, ticid=None, manual_dict=None, raise_error_if_duplicate=True): """ Insert a candidate to the candidates.csv database by passing either Gaia DR2 source_id or otherwise ticid (string). Optional Arguments: ---------- manual_dict: dict With keys: nbhd_rating (0-2, or -1 for null), init_priority (0-2), current_priority (0-2), pending_spectroscopic_observations (str, '--' if null), pending_photometry_observations (str, '--' if null), comment (str, '--' if null) candidate_provenance (str) isretired (0 or 1) raise_error_if_duplicate: boolean If attempting an insert on a source_id that already exists, an error will be raised, and the insert will not be performed. If false, a warning is raised, and the insert will not be performed. """ # # Get identifiers (source_id, ticid, toi, targetid). # validate_source_id_ticid(source_id, ticid) if isinstance(source_id, str): ticid = gaiadr2_to_tic(source_id) elif isinstance(ticid, str): source_id = tic_to_gaiadr2(ticid) toiid = ticid_to_toiid(ticid) if isinstance(toiid, str): toiid = toiid.replace('.01', '') targetid = ticid_and_toiid_to_targetid(ticid, toiid) # # Get CDIPS & GaiaDR2 catalog information, or else assign nans. # cdips_r = get_cdips_pub_catalog_entry(source_id) iscdipstarget = 1 if isinstance(cdips_r, pd.DataFrame) else 0 if isinstance(cdips_r, pd.DataFrame): assert len(cdips_r) == 1 cdips_r = cdips_r.iloc[0] for col in cdips_r.index: if pd.isnull(cdips_r[col]): cdips_r[col] = -1 else: cdips_cols = [ 'cluster', 'reference', 'ext_catalog_name', 'ra', 'dec', 'pmra', 'pmdec', 'parallax', 'phot_g_mean_mag', 'phot_bp_mean_mag', 'phot_rp_mean_mag', 'k13_name_match', 'unique_cluster_name', 'how_match', 'not_in_k13', 'comment', 'logt', 'e_logt', 'logt_provenance' ] cdips_r = pd.Series({'source_id': source_id}) for col in cdips_cols: cdips_r[col] = '-1' # # Get TIC information, or else assign nans # ticcols = [ 'ID', 'GAIA', 'Bmag', 'Vmag', 'Jmag', 'Hmag', 'Kmag', 'Tmag', 'Teff', 'logg', 'rad', 'mass' ] tic_r = get_tic_star_information(ticid, desiredcols=ticcols) if isinstance(tic_r, pd.DataFrame): assert len(tic_r) == 1 tic_r = tic_r.iloc[0] if not tic_r.GAIA == source_id: errmsg = ( 'expected tic GAIA ID ({}) to match my GAIA ID ({})'.format( tic_r.GAIA, source_id)) raise AssertionError(errmsg) for col in ticcols: if pd.isnull(tic_r[col]): tic_r[col] = -1 else: tic_r = pd.Series({'source_id': source_id}) for col in ticcols: tic_r[col] = -1 # # Get the fit information as uploaded to ExoFOP-TESS. By default, use the # TOI table information. Otherwise, use the CTOI table. # plproperties_r = get_exofop_toi_catalog_entry(ticid) plkeyd = { 'rp': 'Planet Radius (R_Earth)', 'rp_unc': 'Planet Radius (R_Earth) err', 'period': 'Period (days)', 'depth': 'Depth (mmag)' } if plproperties_r is None: plproperties_r = get_exofop_ctoi_catalog_entry(ticid) plkeyd = { 'rp': 'Radius (R_Earth)', 'rp_unc': 'Radius (R_Earth) Error', 'period': 'Period (days)', 'depth': 'Depth mmag' } if isinstance(plproperties_r, pd.DataFrame): assert len(plproperties_r) != 0 if len(plproperties_r) > 1: print(42 * '-') print(f'WRN! Got multiple catalog entries for TIC{ticid}') print(42 * '-') plproperties_r = plproperties_r.iloc[0] for col in plproperties_r.index: if pd.isnull(plproperties_r[col]): plproperties_r[col] = -1 else: plproperties_r = pd.DataFrame({v: -1 for v in plkeyd.values()}, index=[0]) # # Get: nbhd_rating, init_priority, current_priority, # pending_spectroscopic_observations, pending_photometry_observations, # comment, candidate_provenance, isretired. # if isinstance(manual_dict, dict): # Dictionary was passed containing the manually entries that SHOULD be # manually written. d = manual_dict nbhd_rating = d['nbhd_rating'] init_priority = d['init_priority'] current_priority = d['current_priority'] pending_spectroscopic_observations = d[ 'pending_spectroscopic_observations'] pending_photometry_observations = d['pending_photometry_observations'] comment = d['comment'] candidate_provenance = d['candidate_provenance'] isretired = d['isretired'] else: # Set reasonable defaults. Raise warning. init_priority = 1 nbhd_rating = init_priority current_priority = init_priority pending_spectroscopic_observations = '' pending_photometry_observations = '' comment = '' candidate_provenance = 'insert_candidate (w/out manual entries)' isretired = 0 print( 'WRN! For {}, did not get manual entries for PRIORITY, or COMMENT'. format(source_id)) # # Construct and insert the new row. # refkey = 'reference' if 'reference' in cdips_r else 'reference_id' agekey = 'logt' if 'logt' in cdips_r else 'mean_age' new_row = pd.DataFrame( { 'source_id': str(source_id), 'ticid': str(ticid), 'toi': str(toiid), 'targetid': str(targetid), 'iscdipstarget': iscdipstarget, 'reference': cdips_r[refkey], 'name': cdips_r.cluster, 'age': cdips_r[agekey], 'nbhd_rating': nbhd_rating if not pd.isnull(nbhd_rating) else '--', 'init_priority': init_priority, 'current_priority': current_priority, 'pending_spectroscopic_observations': pending_spectroscopic_observations, 'pending_photometry_observations': pending_photometry_observations, 'comment': comment, 'rp': plproperties_r[plkeyd['rp']], 'rp_unc': plproperties_r[plkeyd['rp_unc']], 'period': plproperties_r[plkeyd['period']], 'depth': plproperties_r[plkeyd['depth']], 'gaia_ra': cdips_r.ra, 'gaia_dec': cdips_r.dec, 'gaia_plx': cdips_r.parallax, 'gaia_Gmag': cdips_r.phot_g_mean_mag, 'gaia_Bmag': cdips_r.phot_bp_mean_mag, 'gaia_Rmag': cdips_r.phot_rp_mean_mag, 'tic_Bmag': tic_r.Bmag, 'tic_Vmag': tic_r.Vmag, 'tic_Jmag': tic_r.Jmag, 'tic_Hmag': tic_r.Hmag, 'tic_Kmag': tic_r.Kmag, 'tic_Tmag': tic_r.Tmag, 'tic_teff': tic_r.Teff if not pd.isnull(tic_r.Teff) else -1, 'tic_logg': tic_r.logg, 'tic_rstar': tic_r.rad, 'tic_mstar': tic_r.mass, 'candidate_provenance': candidate_provenance, 'insert_time': pd.Timestamp.now(), 'last_update_time': pd.Timestamp.now(), 'isretired': isretired, 'disposition': 'PC', 'rot_quality': '--', 'Prot': '--', 'vsini': '--', 'rot_amp': '--', 'sig_Prot': '--', 'Tdur': '--', 'sig_Tdur': '--', 'Mp_pred': '--', 'K_orb': '--', 'K_RM': '--', 'K_orb/sig_Prot': '--', 'K_RM/sig_Tdur': '--' }, index=[0]) cand_df = pd.read_csv(CAND_PATH, sep='|') if np.any(cand_df.source_id.astype(str).str.contains(str(source_id))): msg = ('Found existing candidates.csv entry for {}'.format(source_id)) if raise_error_if_duplicate: raise AssertionError('ERR! : ' + msg) else: print('WRN! : ' + msg) print('WRN! Not doing the insert.') return None new_cand_df = pd.concat((cand_df, new_row), sort=False) new_cand_df = format_candidates_file(new_cand_df) save_candidates_csv_file(new_cand_df)
f'{savstr}_TOI_x_CDIPStargets') if not os.path.exists(outdir): os.mkdir(outdir) outpath = os.path.join(outdir, 'exofop_CTOI_cache.csv') if not os.path.exists(outpath): df = get_exofop_toi_catalog(ver=verstr) #... this runs ~15 TICID/minute. 4 hours. annoying. # alternative ideas: # 1. run a smart MAST query, uploading a table of TIC IDs, xmatching on the # Gaia ID column of the TIC8 # 2. bulk download all TIC8. source_ids = [] for ix, r in df.iterrows(): print(ix, r['TOI']) try: source_ids.append(tic_to_gaiadr2(str(r['TIC ID']))) except: source_ids.append(np.nan) df['source_id'] = source_ids df.to_csv(outpath, index=False) df = pd.read_csv(outpath) df['source_id'] = df.source_id.astype(str) df_cdips = get_cdips_catalog(ver=CDIPSVER) df_cdips['source_id'] = df_cdips.source_id.astype(str) mdf = df.merge(df_cdips, how='left', on='source_id') outdf = mdf[~pd.isnull(mdf.reference_id)]
print(f'N_inner: {len(mdf)}') # PATHOS object ids that I don't already have a classification on # (see 20200610_pathos_merge_notes.txt) objs = np.array([3, 8, 9, 15, 23, 31]) n_int_path = '../../data/Nardiello_2020_PATHOS-II_interesting.csv' if not os.path.exists(n_int_path): sdf = pathos_df[pathos_df.PATHOSID.isin(objs)] gaia_ids = [] for t in sdf.TICID: g = tic_to_gaiadr2(t) gaia_ids.append(g) sdf['GAIA'] = gaia_ids sdf.to_csv(n_int_path, index=False) else: sdf = pd.read_csv(n_int_path) classfxdir = '/Users/luke/Dropbox/proj/cdips/results/vetting_classifications' classpaths = glob(os.path.join(classfxdir, '*LGB*classifications.csv')) cdf = pd.concat([pd.read_csv(f) for f in classpaths]) gaia_ids = [ n.split('_')[4].split('-')[0].replace('gaiatwo', '').lstrip('0') for n in cdf.Name
def test_tic2gaiadrtwo(): assert tic_to_gaiadr2('402026209') == '6535499658122055552'
def main(): ########################################## # CHANGE BELOW savstr = '20211211_v1096tau' # eg, 20191207_TOI1098_request_2m_tc_secondary. "ephemupdate" if it is one. (this cancels pending observations) overwrite = 1 validate = 1 submit = 1 tic_id = '56551765' # '120105470' source_id = None # '6113920619134019456' # can use instead of TIC filtermode = 'rp' # 'zs', 'gp', 'ip' telescope_class = '1m0' # '1m0', '2m0', 'special' #telescope_class = 'special' # '1m0', '2m0', 'special' ipp_value = 1 # usually 1 #max_search_time = Time('2022-12-31 23:59:00') max_search_time = Time('2022-01-31 23:59:00') verify_ephemeris_uncertainty = 1 # require t_tra uncertainty < 2 hours inflate_duration = 1 # if t_tra uncertainty > 1 hour, inflate transit duration by +/- 45 minutes per side transit_type = 'totals' # see above max_n_events = 99 # else None. n_events is per eventclass. raise_error = False # raise an error if max_duration_error flag raised. max_duration_error = 30 # the submitted LCOGT request must match requested durn to within this difference [minutes] sites = None #['Keck Observatory'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory'] #sites = ['Keck Observatory'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory'] force_acceptability = 50 # None or int. # CHANGE ABOVE ########################################## manual_ephemeris = False create_eventclasses = TRANSITTYPEDICT[transit_type] submit_eventclasses = TRANSITTYPEDICT[transit_type] if source_id is None: assert isinstance(tic_id, str) source_id = tic_to_gaiadr2(tic_id) if manual_ephemeris: period = 42 epoch = 2458660.00000 duration = 2.00000 else: # get ephemeris from ephemerides.csv d = query_ephemeris(source_id=source_id) period, epoch, duration = (d['period'], d['epoch'], d['duration']) period_unc, epoch_unc, duration_unc = (d['period_unc'], d['epoch_unc'], d['duration_unc']) if verify_ephemeris_uncertainty: delta_t_tra_today = (get_ephemeris_uncertainty(epoch, epoch_unc, period, period_unc, epoch_obs='today')) if delta_t_tra_today * 24 < 0: msg = f'ERR! Got negative ephem unc of {delta_t_tra_today*24:.1f} hr. Need to give a believable ephem unc..' raise ValueError(msg) if delta_t_tra_today * 24 > 2: msg = f'ERR! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is too high.' raise ValueError(msg) if delta_t_tra_today * 24 > 1: msg = f'WRN! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is risky.' print(msg) if inflate_duration: assert verify_ephemeris_uncertainty if delta_t_tra_today * 24 > 1: msg = f'... inflating transit duration for scheduling pursposes by 1.5 hours.' print(msg) duration += 1.5 # add # "requests" is a list of lists. Higher level is each eventclass. Level # below is each event, in that eventclass. requests = get_dedicated_request(savstr, source_id, period, epoch, duration, create_eventclasses, overwrite=overwrite, max_search_time=max_search_time, filtermode=filtermode, telescope_class=telescope_class, ipp_value=ipp_value, sites=sites, force_acceptability=force_acceptability) # if a maximum number of events is set, impose it! if isinstance(max_n_events, int): _requests = [] for ix in range(len(create_eventclasses)): print('starting with {} {} events.'.format(len( requests[ix]), create_eventclasses[ix])) for eventclass in requests: _eventclass = [] starttimes = [] for req in eventclass: starttimes.append(req['requests'][0]['windows'][0]['start']) # sort by start time, cut to get the closest ones. sort_times = np.sort(starttimes) sel_times = sort_times[:max_n_events] for req in eventclass: starttime = req['requests'][0]['windows'][0]['start'] if starttime in sel_times: _eventclass.append(req) if len(_eventclass) > 0: _requests.append(_eventclass) if len(_requests) == 0: print('WRN!: got no times') return assert len(_requests[0]) <= max_n_events requests = _requests print('WRN!: trimmed to {} events.'.format(len(requests[0]))) if len(sel_times) > 0: print('WRN!: max time: \n{}'.format(repr(sel_times[-1]))) print('\nWRN!: selected times: \n{}'.format(repr(sel_times))) else: print('WRN!: got no times') given_dedicated_requests_validate_submit( requests, submit_eventclasses, validate=validate, submit=submit, max_duration_error=max_duration_error, raise_error=raise_error)