def _fetch_tesscut_path(self, target, sector, download_dir, cutout_size): """Downloads TESS FFI cutout and returns path to local file. Parameters ---------- download_dir : str Path to location of `.lightkurve-cache` directory where downloaded cutouts are stored cutout_size : int, float or tuple Side length of cutout in pixels. Tuples should have dimensions (y, x). Default size is (5, 5) Returns ------- path : str Path to locally downloaded cutout file """ from astroquery.mast import TesscutClass from astroquery.mast.core import MastClass coords = MastClass()._resolve_object(target) # Set cutout_size defaults if cutout_size is None: cutout_size = 5 # Check existence of `~/.lightkurve-cache/tesscut` tesscut_dir = os.path.join(download_dir, 'tesscut') if not os.path.isdir(tesscut_dir): # if it doesn't exist, make a new cache directory try: os.mkdir(tesscut_dir) # downloads into default cache if OSError occurs except OSError: tesscut_dir = download_dir # Resolve SkyCoord of given target coords = MastClass()._resolve_object(target) cutout_path = TesscutClass().download_cutouts(coords, size=cutout_size, sector=sector, path=tesscut_dir) path = os.path.join(download_dir, cutout_path[0][0]) return path
def _fetch_tesscut_path(self, target, sector, download_dir, cutout_size): """Downloads TESS FFI cutout and returns path to local file. Parameters ---------- download_dir : str Path to location of `.lightkurve-cache` directory where downloaded cutouts are stored cutout_size : int, float or tuple Side length of cutout in pixels. Tuples should have dimensions (y, x). Default size is (5, 5) Returns ------- path : str Path to locally downloaded cutout file """ from astroquery.mast import TesscutClass coords = _resolve_object(target) # Set cutout_size defaults if cutout_size is None: cutout_size = 5 # Check existence of `~/.lightkurve-cache/tesscut` tesscut_dir = os.path.join(download_dir, 'tesscut') if not os.path.isdir(tesscut_dir): # if it doesn't exist, make a new cache directory try: os.mkdir(tesscut_dir) # downloads into default cache if OSError occurs except OSError: tesscut_dir = download_dir # Resolve SkyCoord of given target coords = _resolve_object(target) # build path string name and check if it exists # this is necessary to ensure cutouts are not downloaded multiple times sec = TesscutClass().get_sectors(coords) sector_name = sec[sec['sector'] == sector]['sectorName'][0] if isinstance(cutout_size, int): size_str = str(int(cutout_size)) + 'x' + str(int(cutout_size)) elif isinstance(cutout_size, tuple) or isinstance(cutout_size, list): size_str = str(int(cutout_size[1])) + 'x' + str(int( cutout_size[0])) # search cache for file with matching ra, dec, and cutout size # ra and dec are searched within 0.001 degrees of input target ra_string = str(coords.ra.value) dec_string = str(coords.dec.value) matchstring = r"{}_{}*_{}*_{}_astrocut.fits".format( sector_name, ra_string[:ra_string.find('.') + 4], dec_string[:dec_string.find('.') + 4], size_str) cached_files = glob.glob(os.path.join(tesscut_dir, matchstring)) # if any files exist, return the path to them instead of downloading if len(cached_files) > 0: path = cached_files[0] log.debug("Cached file found.") # otherwise the file will be downloaded else: cutout_path = TesscutClass().download_cutouts(coords, size=cutout_size, sector=sector, path=tesscut_dir) path = os.path.join(download_dir, cutout_path[0][0]) log.debug("Finished downloading.") return path
def execute_triceratops(cpus, indir, object_id, sectors, lc_file, transit_depth, period, t0, transit_duration, rp_rstar, a_rstar, bins, scenarios, sigma_mode, contrast_curve_file): """ Calculates probabilities of the signal being caused by any of the following astrophysical sources: TP No unresolved companion. Transiting planet with Porb around target star. (i, Rp) EB No unresolved companion. Eclipsing binary with Porb around target star. (i, qshort) EBx2P No unresolved companion. Eclipsing binary with 2 × Porb around target star. (i, qshort) PTP Unresolved bound companion. Transiting planet with Porb around primary star. (i, Rp, qlong) PEB Unresolved bound companion. Eclipsing binary with Porb around primary star. (i, qshort, qlong) PEBx2P Unresolved bound companion. Eclipsing binary with 2 × Porb around primary star. (i, qshort, qlong) STP Unresolved bound companion. Transiting planet with Porb around secondary star. (i, Rp, qlong) SEB Unresolved bound companion. Eclipsing binary with Porb around secondary star. (i, qshort, qlong) SEBx2P Unresolved bound companion. Eclipsing binary with 2 × Porb around secondary star. (i, qshort, qlong) DTP Unresolved background star. Transiting planet with Porb around target star. (i, Rp, simulated star) DEB Unresolved background star. Eclipsing binary with Porb around target star. (i, qshort, simulated star) DEBx2P Unresolved background star. Eclipsing binary with 2 × Porb around target star. (i, qshort, simulated star) BTP Unresolved background star. Transiting planet with Porb around background star. (i, Rp, simulated star) BEB Unresolved background star. Eclipsing binary with Porb around background star. (i, qshort, simulated star) BEBx2P Unresolved background star. Eclipsing binary with 2 × Porb around background star. (i, qshort, simulated star) NTP No unresolved companion. Transiting planet with Porb around nearby star. (i, Rp) NEB No unresolved companion. Eclipsing binary with Porb around nearby star. (i, qshort) NEBx2P No unresolved companion. Eclipsing binary with 2 × Porb around nearby star. (i, qshort) FPP = 1 - (TP + PTP + DTP) NFPP = NTP + NEB + NEBx2P Giacalone & Dressing (2020) define validated planets as TOIs with NFPP < 10−3 and FPP < 0.015 (or FPP ≤ 0.01, when rounding to the nearest percent) @param cpus: number of cpus to be used @param indir: root directory to store the results @param id_int: the object id for which the analysis will be run @param sectors: the sectors of the tic @param lc_file: the light curve source file @param transit_depth: the depth of the transit signal (ppts) @param period: the period of the transit signal /days) @param t0: the t0 of the transit signal (days) @param transit_duration: the duration of the transit signal (minutes) @param rp_rstar: radius of planet divided by radius of star @param a_rstar: semimajor axis divided by radius of star @param bins: the number of bins to average the folded curve @param scenarios: the number of scenarios to validate @param sigma_mode: the way to calculate the sigma for the validation ['flux_err' | 'binning'] @param contrast_curve_file: the auxiliary contrast curve file to give more information to the validation engine. """ save_dir = indir + "/triceratops" if os.path.exists(save_dir): shutil.rmtree(save_dir, ignore_errors=True) if not os.path.exists(save_dir): os.makedirs(save_dir) duration = transit_duration / 60 / 24 logging.info("----------------------") logging.info("Validation procedures") logging.info("----------------------") logging.info("Pre-processing sectors") mission, mission_prefix, id_int = LcBuilder().parse_object_info( object_id) if mission == "TESS": sectors = np.array(sectors) sectors_cut = TesscutClass().get_sectors("TIC " + str(id_int)) sectors_cut = np.array( [sector_row["sector"] for sector_row in sectors_cut]) if len(sectors) != len(sectors_cut): logging.warning("WARN: Some sectors were not found in TESSCUT") logging.warning("WARN: Sherlock sectors were: " + str(sectors)) logging.warning("WARN: TESSCUT sectors were: " + str(sectors_cut)) sectors = np.intersect1d(sectors, sectors_cut) if len(sectors) == 0: logging.warning( "There are no available sectors to be validated, skipping TRICERATOPS." ) return save_dir, None, None logging.info("Will execute validation for sectors: " + str(sectors)) logging.info("Acquiring triceratops target") target = tr.target(ID=id_int, mission=mission, sectors=sectors) # TODO allow user input apertures logging.info("Reading apertures from directory") apertures = yaml.load(open(object_dir + "/apertures.yaml"), yaml.SafeLoader) apertures = apertures["sectors"] valid_apertures = {} for sector, aperture in apertures.items(): if sector in sectors: valid_apertures[sector] = aperture target.plot_field(save=True, fname=save_dir + "/field_S" + str(sector), sector=sector, ap_pixels=aperture) apertures = np.array( [aperture for sector, aperture in apertures.items()]) valid_apertures = np.array( [aperture for sector, aperture in valid_apertures.items()]) depth = transit_depth / 1000 if contrast_curve_file is not None: logging.info("Reading contrast curve %s", contrast_curve_file) plt.clf() cc = pd.read_csv(contrast_curve_file, header=None) sep, dmag = cc[0].values, cc[1].values plt.plot(sep, dmag, 'k-') plt.ylim(9, 0) plt.ylabel("$\\Delta K_s$", fontsize=20) plt.xlabel("separation ('')", fontsize=20) plt.savefig(save_dir + "/contrast_curve.png") plt.clf() logging.info("Calculating validation closest stars depths") target.calc_depths(depth, valid_apertures) target.stars.to_csv(save_dir + "/stars.csv", index=False) lc = pd.read_csv(lc_file, header=0) time, flux, flux_err = lc["#time"].values, lc["flux"].values, lc[ "flux_err"].values lc_len = len(time) zeros_lc = np.zeros(lc_len) logging.info("Preparing validation light curve for target") if mission == "TESS": lc = TessLightCurve(time=time, flux=flux, flux_err=flux_err, quality=zeros_lc) else: lc = KeplerLightCurve(time=time, flux=flux, flux_err=flux_err, quality=zeros_lc) lc.extra_columns = [] fig, axs = plt.subplots(1, 1, figsize=(8, 4), constrained_layout=True) axs, bin_centers, bin_means, bin_errs = Watson.compute_phased_values_and_fill_plot( object_id, axs, lc, period, t0 + period / 2, depth, duration, rp_rstar, a_rstar, bins=bins) plt.savefig(save_dir + "/folded_curve.png") plt.clf() bin_centers = (bin_centers - 0.5) * period logging.info("Sigma mode is %s", sigma_mode) sigma = np.nanmean( bin_errs) if sigma_mode == 'binning' else np.nanmean(flux_err) logging.info("Computed folded curve sigma = %s", sigma) logging.info("Preparing validation processes inputs") input_n_times = [ ValidatorInput(save_dir, copy.deepcopy(target), bin_centers, bin_means, sigma, period, depth, valid_apertures, value, contrast_curve_file) for value in range(0, scenarios) ] logging.info("Start validation processes") #TODO fix usage of cpus returning same value for all executions with Pool(processes=1) as pool: validation_results = pool.map(TriceratopsThreadValidator.validate, input_n_times) logging.info("Finished validation processes") fpp_sum = 0 fpp2_sum = 0 fpp3_sum = 0 nfpp_sum = 0 probs_total_df = None scenarios_num = len(validation_results[0][4]) star_num = np.zeros((5, scenarios_num)) u1 = np.zeros((5, scenarios_num)) u2 = np.zeros((5, scenarios_num)) fluxratio_EB = np.zeros((5, scenarios_num)) fluxratio_comp = np.zeros((5, scenarios_num)) target = input_n_times[0].target target.star_num = np.zeros(scenarios_num) target.u1 = np.zeros(scenarios_num) target.u2 = np.zeros(scenarios_num) target.fluxratio_EB = np.zeros(scenarios_num) target.fluxratio_comp = np.zeros(scenarios_num) logging.info("Computing final probabilities from the %s scenarios", scenarios) i = 0 with open(save_dir + "/validation.csv", 'w') as the_file: the_file.write("scenario,FPP,NFPP,FPP2,FPP3+\n") for fpp, nfpp, fpp2, fpp3, probs_df, star_num_arr, u1_arr, u2_arr, fluxratio_EB_arr, fluxratio_comp_arr \ in validation_results: if probs_total_df is None: probs_total_df = probs_df else: probs_total_df = pd.concat((probs_total_df, probs_df)) fpp_sum = fpp_sum + fpp fpp2_sum = fpp2_sum + fpp2 fpp3_sum = fpp3_sum + fpp3 nfpp_sum = nfpp_sum + nfpp star_num[i] = star_num_arr u1[i] = u1_arr u2[i] = u2_arr fluxratio_EB[i] = fluxratio_EB_arr fluxratio_comp[i] = fluxratio_comp_arr the_file.write( str(i) + "," + str(fpp) + "," + str(nfpp) + "," + str(fpp2) + "," + str(fpp3) + "\n") i = i + 1 for i in range(0, scenarios_num): target.star_num[i] = np.mean(star_num[:, i]) target.u1[i] = np.mean(u1[:, i]) target.u2[i] = np.mean(u2[:, i]) target.fluxratio_EB[i] = np.mean(fluxratio_EB[:, i]) target.fluxratio_comp[i] = np.mean(fluxratio_comp[:, i]) fpp_sum = fpp_sum / scenarios nfpp_sum = nfpp_sum / scenarios fpp2_sum = fpp2_sum / scenarios fpp3_sum = fpp3_sum / scenarios logging.info("---------------------------------") logging.info("Final probabilities computed") logging.info("---------------------------------") logging.info("FPP=%s", fpp_sum) logging.info("NFPP=%s", nfpp_sum) logging.info("FPP2(Lissauer et al, 2012)=%s", fpp2_sum) logging.info("FPP3+(Lissauer et al, 2012)=%s", fpp3_sum) the_file.write("MEAN" + "," + str(fpp_sum) + "," + str(nfpp_sum) + "," + str(fpp2_sum) + "," + str(fpp3_sum)) probs_total_df = probs_total_df.groupby("scenario", as_index=False).mean() probs_total_df["scenario"] = pd.Categorical( probs_total_df["scenario"], [ "TP", "EB", "EBx2P", "PTP", "PEB", "PEBx2P", "STP", "SEB", "SEBx2P", "DTP", "DEB", "DEBx2P", "BTP", "BEB", "BEBx2P", "NTP", "NEB", "NEBx2P" ]) probs_total_df = probs_total_df.sort_values("scenario") probs_total_df.to_csv(save_dir + "/validation_scenarios.csv", index=False) target.probs = probs_total_df # target.plot_fits(save=True, fname=save_dir + "/scenario_fits", time=lc.time.value, flux_0=lc.flux.value, # flux_err_0=sigma) return save_dir