예제 #1
0
def load_file(file: str,
              clip: float = 4,
              it: int = 1,
              apply_file_correction: bool = False) -> LightCurve:
    """
    Loads and normalizes target content
    :param file: Name of target including path
    :return: LightCurve object
    """
    if not os.path.exists(file):
        raise IOError(ctext(f"File {file} doesn't exist!", error))

    mprint(f"Reading data from {file} ...", log)
    try:
        data = np.loadtxt(file)
    except ValueError:
        data = read_csv(file)
        data = np.array((data.time, data.flux))
    if data.shape[0] > data.shape[1]:
        data = data.T

    if data.shape[0] == 2:
        lc = LightCurve(time=data[0], flux=data[1])
    else:
        lc = LightCurve(time=data[0], flux=data[1], flux_err=data[2])

    lc = lc.remove_nans()
    if apply_file_correction:
        lc.flux = lc.flux + float(np.amin(lc.flux)) + 10
        lc = lc.remove_outliers(clip, maxiters=it)
        lc = mag(lc)
        lc = lc.remove_nans()
    else:
        if np.amax(np.abs(lc.flux)) > 10:
            mprint(
                f"It seems as if your flux isn't in magnitudes. Be aware, that SMURFS expects the flux in magnitudes. "
                f"Continuing ...", warn)
        if np.abs(np.median(lc.flux)) > 1:
            mprint(
                f"The median of your flux is {'%.2f' % np.median(lc.flux)}. To do a proper analysis, the median should "
                f"be close to 0. Be aware, that this might cause issues. Continuing...",
                warn)
    mprint(
        f"Total observation length: {'%.2f' % (lc.time[-1] - lc.time[0])} days.",
        log)
    mprint("Extracted data from target!", info)
    return lc
예제 #2
0
    def run(self, snr: float = 4, window_size: float = 2, f_min: float = None, f_max: float = None,
            skip_similar: bool = False, similar_chancel=True, extend_frequencies: int = 0, improve_fit=True,
            mode='lmfit',frequency_detection=None,fit_fun : Union[Tuple[callable,callable],callable] = None):
        """
        Starts the frequency analysis by instantiating a *FrequencyFinder* object and running it. After finishing the
        run, combinations are computed. See *FrequencyFinder.run* for an explanation of the algorithm.

        :param snr: Signal to noise ratio, that provides a lower end of the analysis.
        :param window_size: Window size, with which the SNR is computed.
        :param f_min: Minimum frequency that is considered in the analysis.
        :param f_max: Maximum frequency that is considered in the analysis.
        :param skip_similar: Flag that skips a certain range if too many similar frequencies in this range are found in a row.
        :param similar_chancel: Flat that chancels the run after 10 frequencies found that are too similar.
        :param extend_frequencies: Extends the analysis by this number of insignificant frequencies.
        :param improve_fit: If this flag is set, all combined frequencies are re-fitted after every new frequency was found
        :param mode: Fitting mode. You can choose between 'scipy' and 'lmfit'
        :param frequency_detection: If this value is not None and the ratio between the amplitude of the found frequency and the amplitude of the frequency in the original spectrum exceeds this value, this frequency is ignored.
        :param fit_fun: You can pass a function to smurfs to replace its default fit function. SMURFS will pass this function a kwargs object.
        """

        if fit_fun is not None and not (callable(fit_fun) or (isinstance(fit_fun,tuple) and len(fit_fun)==2)):
            raise AttributeError("fit_fun must be either a function, or a tuple of two functions")

        self.snr = snr
        self.window_size = window_size
        self.f_min = f_min
        self.f_max = f_max
        self.skip_similar = skip_similar
        self.similar_chanel = similar_chancel
        self.extend_frequencies = 0

        self._ff = FFinder(self, f_min, f_max)
        self._result = self._ff.run(snr=snr, window_size=window_size, skip_similar=skip_similar,
                                    similar_chancel=similar_chancel
                                    , extend_frequencies=extend_frequencies, improve_fit=improve_fit, mode=mode
                                    ,frequency_detection=frequency_detection,fit_fun=fit_fun)
        self._combinations = get_combinations((self._result[self._result.significant == True].index+1).tolist(),
                                              unp.nominal_values(
                                                  self._result[self._result.significant == True].frequency.tolist())
                                              , unp.nominal_values(
                self._result[self._result.significant == True].amp.tolist()))

        self.res_lc = self._ff.res_lc

        mprint(f"{self.label} Analysis done!",info)
예제 #3
0
def main(args=None):
    if args is None:
        args = sys.argv[1:]

    parser = get_parser()
    args = parser.parse_args(args)

    if "," in args.target:
        targets = args.target.split(",")
    else:
        targets = [args.target]

    f_min = None if args.frequencyRange.split(",")[0] == 'None' else float(args.frequencyRange.split(",")[0])
    f_max = None if args.frequencyRange.split(",")[1] == 'None' else float(args.frequencyRange.split(",")[1])

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        if len(targets) == 1:
            target = targets[0]

            if os.path.exists(target) and os.path.isfile(target):
                s = Smurfs(file=target, flux_type=args.fluxType, mission=args.mission, sigma_clip=args.sigmaClip,
                           iters=args.iters, do_pca=args.do_pca, do_psf=args.do_psf,
                           apply_file_correction=args.apply_corrections)
            else:
                s = Smurfs(target_name=target, flux_type=args.fluxType, mission=args.mission, sigma_clip=args.sigmaClip,
                           iters=args.iters, do_pca=args.do_pca, do_psf=args.do_psf)

            improve_fit = args.improveFitMode == 'all'

            s.run(snr=args.snr, window_size=args.windowSize, f_min=f_min, f_max=f_max,
                  skip_similar=args.skipSimilarFrequencies, similar_chancel=not args.skipCutoff
                  , extend_frequencies=args.extendFrequencies, improve_fit=improve_fit
                  , mode=args.fitMethod,frequency_detection=args.frequencyDetection)

            if args.improveFitMode == 'end':
                mprint("Improving fit ...", state)
                s.improve_result()

            s.save(args.savePath, args.storeObject)

            # start interactive shell, by loading smurfs object directly into the shell
            if args.interactive:
                mprint("Starting interactive shell. Use the 'star' object to interact with the result!", state)
                pickle.dump(s, open("i_obj.smurfs", "wb"))
                cmd = ["ipython", "-i", "-c",
                       "from smurfs import Smurfs;import pickle;star : Smurfs = pickle.load(open('i_obj.smurfs', 'rb'));import os;os.remove('i_obj.smurfs');import matplotlib.pyplot as pl;pl.ion()"]
                subprocess.call(cmd)
                mprint("Done", info)
        else:
            if len(targets[0].split(".")) == 2 and os.path.basename(targets[0]).split(".")[1] in ['txt', 'dat']:
                s = MultiSmurfs(file_list=targets)
            else:
                s = MultiSmurfs(target_list=targets, flux_types=args.fluxType)

            s.run(snr=args.snr, window_size=args.windowSize, f_min=f_min, f_max=f_max,
                  skip_similar=args.skipSimilarFrequencies, similar_chancel=not args.skipCutoff
                  , extend_frequencies=args.extendFrequencies, improve_fit=args.disableImproveFrequencies
                  , mode=args.fitMethod)
            s.save(args.savePath, args.storeObject)
예제 #4
0
def cut_ffi(tic_id:int,clip :float = 4,iter : int = 1,do_pca : bool = False, do_psf :bool = False,flux_type = 'PDCSAP') -> Tuple[LightCurve, List[Figure],List[eleanor.TargetData]]:
    """
    Extracts light curves from FFIs using TESScut and Eleanor. This function automatically combines all available
    sectors for a given target.

    :param tic_id: TIC ID of the target
    :param clip: Sigma clip range of the target.
    :param iter: Iterations of the sigma clipping
    :param do_pca: Perform pca analysis with eleanor
    :param do_psf: Perform psf analysis with eleanor
    :param flux_type: Flux type that is returned. Choose between 'PDCSAP','SAP','PSF'
    :return: Lightcurve
    """
    flux_types = ['SAP','PDCSAP', 'PSF']
    if flux_type not in flux_types:
        raise ValueError(mprint(f"Flux type {flux_type} not recognized. Possible values are: {flux_types}",error))
    f = io.StringIO()
    with redirect_stdout(f):
        stars = eleanor.multi_sectors(tic=tic_id, sectors='all',tc=True)
    mprint(f.getvalue().strip(), log)

    lc_list = []
    data_list = []
    q_list = []

    pca_flag = do_pca or flux_type == 'PDCSAP'
    psf_flag = do_psf or flux_type == 'PSF'

    for star in stars:

        f = io.StringIO()
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            data = eleanor.TargetData(star, height=15, width=15, bkg_size=31, do_pca=pca_flag, do_psf=psf_flag)
        mprint(f.getvalue().strip(), log)
        q = data.quality == 0
        if flux_type == 'SAP':
            lc_list.append(LightCurve(lk.TessLightCurve(time=data.time[q], flux=data.corr_flux[q], targetid=tic_id)))
        elif flux_type == 'PDCSAP':
            lc_list.append(LightCurve(lk.TessLightCurve(time=data.time[q], flux=data.pca_flux[q], targetid=tic_id)))
        else:
            lc_list.append(LightCurve(lk.TessLightCurve(time=data.time[q], flux=data.psf_flux[q], targetid=tic_id)))

        data_list.append(data)
        q_list.append(q)

    fig = create_validation_page(data_list, q_list, f'TIC {tic_id}',do_pca=pca_flag,do_psf=psf_flag,flux_type=flux_type)

    lc : LightCurve = combine_light_curves(lc_list,clip,iter)
    mprint(f"Extracted light curve for TIC {tic_id}!", info)
    return lc, fig,data_list
예제 #5
0
def download_lc(target_name: str, flux_type='PDCSAP', mission: str = 'TESS',sigma_clip=4,iters=1,do_pca : bool = False,do_psf :bool= False) -> Tuple[
    LightCurve, Union[List[Figure],None]]:
    """
    Downloads a light curve using the TESS mission. If the star has been observed in the SC mode, it
    will download the original light curve from MAST. You can also choose the flux type you want to use.

    If it wasn't observed in SC mode, it will try to extract a light curve from the FFIs if the target has
    been observed by TESS.

    You can also download light curves of stars that are observed by the K2 or Kepler mission, by setting
    the mission parameter.

    :param target_name: Name of the target. You can either provide the TIC ID (TIC ...), Kepler ID (KIC ...), K2 ID(EPIC ...) or a name that is resolvable by Simbad.
    :param flux_type: Type of flux in the SC mode. Can be either PDCSAP or SAP or PSF for long cadence data
    :param mission: Mission from which the light curves are extracted. By default TESS only is used. You can consider all missions by passing 'all' (TESS, Kepler, K2)
    :param sigma_clip: Sigma clip parameter. Defines the number of standard deviations that are clipped.
    :param iters: Iterations for the sigma clipping
    :return: lightkurve.LightCurve object and validation page if extracted from FFI
    """
    chosen_mission = [mission] if mission != 'all' else ('Kepler', 'K2', 'TESS')
    mprint(f"Searching processed light curves for {target_name} on mission(s) {','.join(chosen_mission)} ... ", log)

    if chosen_mission == ['TESS']:
        if target_name.startswith('TIC'):
            tic_id = re.findall(r'\d+', target_name)
            if len(tic_id) == 0:
                raise ValueError(ctext("A Tic ID needs to consist of TIC and a number!", error))
            tic_id = int(tic_id[0])
        else:
            mprint(f"Resolving {target_name} to TIC using MAST ...",log)
            try:
                tic_id = Catalogs.query_object(target_name,catalog='TIC',radius=0.003)[0]['ID']
            except KeyError:
                raise ValueError(ctext(f"No TESS observations available for {target_name}", error))

            mprint(f"TIC ID for {target_name}: TIC {tic_id}",log)

        o = Observations.query_criteria(objectname=target_name, radius=str(0 * u.deg), project='TESS',
                                        obs_collection='TESS').to_pandas()

        if len(o) > 0 and len(o[o.target_name != 'TESS FFI']) > 0:
            mprint(f"Short cadence observations available for {target_name}. Downloading ...",info)
            res = search_lightcurvefile(target_name, mission=chosen_mission)
        else: #Only FFI available
            mprint(f"No short cadence data available for {target_name}, extracting from FFI ...",info)
            lc, fig,_ = cut_ffi(tic_id,sigma_clip,iters,do_pca,do_psf,flux_type)
            mprint(f"Total observation length: {'%.2f' % (lc.time[-1] - lc.time[0])} days.", log)
            return lc, fig
    else:
        res = search_lightcurvefile(target_name, mission=chosen_mission)

    if len(res) != 0:
        fig = None
        mprint(f"Found processed light curve for {target_name}!", info)
        res = res.download_all()
        types = []

        for d in res.data:
            type = 'TESS' if isinstance(d, lk.TessLightCurveFile) else 'Kepler'
            if type not in types:
                types.append(type)

        mprint(f"Using {','.join(types)} observations! Combining sectors ...", log)

        if flux_type == 'PSF':
            mprint(f"PSF not available for short cadence data. Reverting to PDCSAP",warn)
            flux_type = 'PDCSAP'

        if flux_type == 'PDCSAP':
            lc_set: List[Union[lk.TessLightCurve, lk.KeplerLightCurve]] = [i for i in res.PDCSAP_FLUX.data]
        elif flux_type == 'SAP':
            lc_set: List[Union[lk.TessLightCurve, lk.KeplerLightCurve]] = [i for i in res.SAP_FLUX.data]
        else:
            raise ValueError(ctext("Flux type needs to be either PDCSAP or SAP", error))
        lc = combine_light_curves(lc_set,sigma_clip=sigma_clip,iters=iters)
    else:
        raise ValueError(ctext(f"No light curve available for {target_name} on mission(s) {chosen_mission}",error))

    mprint(f"Total observation length: {'%.2f' % (lc.time[-1] - lc.time[0])} days.", log)
    return lc, fig
예제 #6
0
    def __init__(self, file=None, time=None, flux=None, target_name=None, flux_type='PDCSAP', label=None,
                 quiet_flag=False,mission = 'TESS',sigma_clip : float=4,iters :int=1,do_pca : bool=False,do_psf :bool = False, apply_file_correction = False):
        Settings.quiet = quiet_flag
        self.validation_page : Figure= None
        if target_name is not None:
            self.lc, self.validation_page = download_lc(target_name, flux_type,mission,sigma_clip,iters,do_pca,do_psf)
            if label is None:
                self.label = target_name
            else:
                self.label = label            
        elif time is None and flux is None and file is None:
            raise AttributeError(
                ctext("You need to either pass a target path or time and flux of the lightcurve object", error))

        elif time is not None and flux is not None:
            mprint("Creating light curve object from time and flux input.", log)
            self.lc: LightCurve = LightCurve(lk.LightCurve(time=time, flux=flux))
            if label is None:
                self.label = 'LC'
            else:
                self.label = label

            if not 0.1 > np.mean(flux) > -0.1:
                mprint("Be aware that the mean of the flux is not 0! This might lead to unintended consequences!",warn)

        elif file is not None:
            self.lc: LightCurve = load_file(file,sigma_clip,iters,apply_file_correction)
            if apply_file_correction:
                self.lc.flux = self.lc.flux + float(np.amin(self.lc.flux)) + 10
                self.lc = self.lc.remove_outliers(sigma_clip, maxiters=iters)
                self.lc = mag(self.lc)
                self.lc = self.lc.remove_nans()
            if label is None:
                self.label = os.path.basename(file).split(".")[0]
            else:
                self.label = label

        else:
            raise AttributeError(
                ctext("You need to either pass a target path or time and flux of the lightcurve object", error))

        self.pdg: Periodogram = Periodogram.from_lightcurve(self.lc)
        self._result = df([], columns=['f_obj', 'frequency', 'amp', 'phase', 'snr', 'res_noise', 'significant'])
        self._combinations = df([],
                                columns=["Name", "ID", "Frequency", "Amplitude", "Solution", "Residual", "Independent",
                                         "Other_Solutions"])
        self._ff: FFinder = None
        self._spectral_window = None

        # Original light curve to perform some processsing on that
        self.original_lc = self.lc.copy()

        # Target settings
        self.target_name = target_name
        self.flux_type = flux_type

        # Frequency settings
        self.snr = np.nan
        self.window_size = np.nan
        self.f_min = np.nan
        self.f_max = np.nan
        self.skip_similar = None
        self.similar_chanel = None
        self.extend_frequencies = np.nan
        self._notes = None

        mprint(f"Duty cycle for {self.label}: {'%.2f' % (self.duty_cycle * 100)}%", info)
예제 #7
0
    def save(self, path: str, store_obj=False):
        """
        Saves the result of the analysis to a given folder.

        :param path: Path where the result is stored
        :param store_obj: If this is set, the Smurfs object is stored, and can be later reloaded.
        """
        if not os.path.exists(path):
            raise IOError(ctext(f"'{path}' does not exist!", error))

        mprint("Saving results, this may take a bit ...", log)
        proj_path = os.path.join(path, self.label.replace(" ","_"))
        index = 1
        while True:
            if not os.path.exists(proj_path):
                break

            proj_path = os.path.join(path, self.label.replace(" ","_") + f"_{index}")
            index += 1

        os.makedirs(proj_path)
        with cd(proj_path):
            os.makedirs("data")
            os.makedirs("plots")

            with cd("data"):
                if self._result is not None:
                    frame: df = self._result.drop(columns=['f_obj'])
                    frame.index.name = 'f_nr'
                    df_list = [(self.settings, '#Settings'),
                               (self.statistics, '#Statistics'),
                               (frame, '#Result')]

                    with open('result.csv', 'w') as f:
                        for fr, comment in df_list:
                            f.write(f"{comment}\n")
                            fr.to_csv(f)
                            f.write("\n\n")

                    self._combinations.to_csv('combinations.csv')
                self.lc.to_csv("LC.txt",**{'index':False})
                self.pdg.to_csv("PS.txt")
                if self._ff is not None:
                    self._ff.res_lc.to_csv("LC_residual.txt",**{'index':False})
                    self._ff.res_pdg.to_csv("PS_residual.txt")

                if self._notes is not None:
                    with open("notes.txt",'w') as f:
                        f.writelines(self._notes)

                if store_obj:
                    pickle.dump(self, open("obj.smurfs", "wb"))

            with cd("plots"):
                images = [(self.lc, "LC.pdf"),
                          (self.pdg, "PS.pdf"),
                          ]

                if self._ff is not None:
                    images += [(self._ff.res_lc, "LC_residual.pdf"),
                               (self._ff.res_pdg, "PS_residual.pdf"),
                               (self._ff, "PS_result.pdf")
                               ]

                for obj, name in images:
                    fig, ax = pl.subplots(figsize=(16, 10))
                    if isinstance(obj, LightCurve):
                        obj.scatter(ax=ax)
                    else:
                        obj.plot(ax=ax, markersize=2)
                    pl.tight_layout()
                    fig.savefig(name)
                    pl.close()

                if self.validation_page is not None:
                    pdf = matplotlib.backends.backend_pdf.PdfPages("Validation_page.pdf")
                    for fig in self.validation_page: ## will open an empty extra figure :(
                        pdf.savefig(fig)
                    pdf.close()

                    for fig in self.validation_page:
                        pl.close(fig)

        mprint(f"{self.label} Data saved!",info)