Beispiel #1
0
def import_schedule(ctx, fname, url, tempo_dir, parfile_dir, reference):

    raise NotImplementedError("Not implemented yet. Aborting.")

    # WIP
    if reference is None:
        cur_t = time.time()
    else:
        cur_t = ephem.ensure_unix(reference)
    sched = parse_26m_sched(fname)
    if len(sched) == 0:
        raise ValueError(
            "Found no observations in schedule file {}.".format(fname))
    for obs in sched:
        pfile = path.join(parfile_dir, obs["name"])
        if not path.isfile(pfile):
            print(
                "Could not find parfile for {} in directory {}. Skipping observation."
                .format(obs["name"], parfile_dir))
            continue
        # Update time from end of previous observation
        cur_t = ephem.lsa_to_unix(obs["lst"] / 24.0 * 360.0, cur_t)
        end = cur_t + obs["duration"] * 3600.0
        ctx.invoke(
            update_polyco,
            pfile,
            unix2mjd(Timespec(cur_t)),
            url=url,
            tempo_dir=tempo_dir,
            end_time=unix2mjd(Timespec(end)),
            name=obs["name"],
            schedule=True,
        )
        cur_t = end
Beispiel #2
0
    def _available_tags(self):
        self._tags = {}
        # Divide observations by source and in groups
        start_t = ephem.ensure_unix(self._revparams["start_time"])
        end_t = ephem.ensure_unix(self._revparams["end_time"])
        connect_db()
        for src in self._revparams["src_db"]:
            # query database for observations within time range and sort by time
            db_src = holo.HolographySource.get(
                holo.HolographySource.name == src)
            db_obs = (holo.HolographyObservation.select().where(
                holo.HolographyObservation.source == db_src).where(
                    (holo.HolographyObservation.start_time > start_t)
                    & (holo.HolographyObservation.finish_time < end_t)).where(
                        (holo.HolographyObservation.quality_flag == 0)
                        | (holo.HolographyObservation.quality_flag == None)).
                      order_by(holo.HolographyObservation.start_time))

            # divide into groups to process together
            n_per = self._revparams["transits_per_run"]
            n_groups = len(db_obs) // n_per + (len(db_obs) % n_per != 0)
            for i in range(n_groups):
                tag = "{}_run{:0>3d}".format(src, i)
                # set up time range for these transits, with 1h padding
                i *= n_per
                j = i + n_per - 1
                if j >= len(db_obs):
                    j = len(db_obs) - 1
                bnds = (db_obs[i].start_time - 3600,
                        db_obs[j].finish_time + 3600)
                self._tags[tag] = {
                    "start": bnds[0],
                    "end": bnds[1],
                    "src_db": src
                }

        return self._tags.keys()
Beispiel #3
0
def csds_in_range(start, end, step=1):
    """Get the CSDs within a time range.

    The start and end parameters must either be strings of the form "CSD\d+"
    (i.e. CSD followed by an int), which specifies an exact CSD start, or a
    form that `ephemeris.ensure_unix` understands.

    Parameters
    ----------
    start : str or parseable to datetime
        Start of interval.
    end : str or parseable to datetime
        End of interval. If `None` use now. Note that for CSD intervals the
        end is *inclusive* (unlike a `range`).

    Returns
    -------
    csds : list of ints
    """

    if end is None:
        end = datetime.datetime.utcnow()

    if start.startswith("CSD"):
        start_csd = int(start[3:])
    else:
        start_csd = ephemeris.unix_to_csd(ephemeris.ensure_unix(start))
        start_csd = math.floor(start_csd)

    if end.startswith("CSD"):
        end_csd = int(end[3:])
    else:
        end_csd = ephemeris.unix_to_csd(ephemeris.ensure_unix(end))
        end_csd = math.ceil(end_csd)

    csds = [day for day in range(start_csd, end_csd + 1, step)]
    return csds
Beispiel #4
0
def sun_coord(unix_time, deg=True):

    date = ephemeris.ensure_unix(np.atleast_1d(unix_time))
    skyfield_time = ephemeris.unix_to_skyfield_time(date)
    ntime = date.size

    coord = np.zeros((ntime, 4), dtype=np.float32)

    planets = skyfield.api.load('de421.bsp')
    sun = planets['sun']

    observer = ephemeris._get_chime().skyfield_obs()

    apparent = observer.at(skyfield_time).observe(sun).apparent()
    radec = apparent.radec(epoch=skyfield_time)

    coord[:, 0] = radec[0].radians
    coord[:, 1] = radec[1].radians

    altaz = apparent.altaz()
    coord[:, 2] = altaz[0].radians
    coord[:, 3] = altaz[1].radians

    # Correct RA from equinox to CIRS coords using
    # the equation of the origins
    era = np.radians(ctime.unix_to_era(date))
    gast = 2 * np.pi * skyfield_time.gast / 24.0
    coord[:, 0] = coord[:, 0] + (era - gast)

    # Convert to hour angle
    coord[:, 0] = _correct_phase_wrap(coord[:, 0] -
                                      np.radians(ephemeris.lsa(date)))

    if deg:
        coord = np.degrees(coord)

    return coord
Beispiel #5
0
def clean(node_name, days, force, now, target, acq):
    """Clean up NODE by marking older files as potentially removable.

    If --target is specified we will only remove files already available in the
    TARGET_GROUP. This is useful for cleaning out intermediate locations such as
    transport disks.

    Using the --days flag will only clean correlator and housekeeping
    files which have a timestamp associated with them. It will not
    touch other types. If no --days flag is given, all files will be
    considered for removal.
    """

    import peewee as pw

    di.connect_database(read_write=True)

    try:
        this_node = di.StorageNode.get(di.StorageNode.name == node_name)
    except pw.DoesNotExist:
        print("Specified node does not exist.")

    # Check to see if we are on an archive node
    if this_node.storage_type == "A":
        if force or click.confirm("DANGER: run clean on archive node?"):
            print("%s is an archive node. Forcing clean." % node_name)
        else:
            print("Cannot clean archive node %s without forcing." % node_name)
            return

    # Select FileCopys on this node.
    files = di.ArchiveFileCopy.select(di.ArchiveFileCopy.id).where(
        di.ArchiveFileCopy.node == this_node,
        di.ArchiveFileCopy.wants_file == "Y")

    # Limit to acquisition
    if acq is not None:
        try:
            acq = di.ArchiveAcq.get(name=acq)
        except pw.DoesNotExit:
            raise RuntimeError("Specified acquisition %s does not exist" % acq)

        files_in_acq = di.ArchiveFile.select().where(di.ArchiveFile.acq == acq)

        files = files.where(di.ArchiveFileCopy.file << files_in_acq)

    # If the target option has been specified, only clean files also available there...
    if target is not None:

        # Fetch a reference to the target group
        try:
            target_group = di.StorageGroup.get(name=target)
        except pw.DoesNotExist:
            raise RuntimeError('Target group "%s" does not exist in the DB.' %
                               target)

        # First get the nodes at the destination...
        nodes_at_target = di.StorageNode.select().where(
            di.StorageNode.group == target_group)

        # Then use this to get a list of all files at the destination...
        files_at_target = (di.ArchiveFile.select().join(
            di.ArchiveFileCopy).where(
                di.ArchiveFileCopy.node << nodes_at_target,
                di.ArchiveFileCopy.has_file == "Y",
            ))

        # Only match files that are also available at the target
        files = files.where(di.ArchiveFileCopy.file << files_at_target)

    # If --days has been set we need to restrict to files older than the given
    # time. This only works for a few particular file types
    if days is not None and days > 0:

        # Get the time for the oldest files to keep
        oldest = datetime.datetime.now() - datetime.timedelta(days)
        oldest_unix = ephemeris.ensure_unix(oldest)

        # List of filetypes we want to update, needs a human readable name and a
        # FileInfo table.
        filetypes = [["correlation", di.CorrFileInfo],
                     ["housekeeping", di.HKFileInfo]]

        file_ids = []

        # Iterate over file types for cleaning
        for name, infotable in filetypes:

            # Filter to fetch only ones with a start time older than `oldest`
            oldfiles = (files.join(di.ArchiveFile).join(infotable).where(
                infotable.start_time < oldest_unix))

            local_file_ids = list(oldfiles)

            # Get number of correlation files
            count = oldfiles.count()

            if count > 0:
                size_bytes = (di.ArchiveFileCopy.select().where(
                    di.ArchiveFileCopy.id << local_file_ids).join(
                        di.ArchiveFile).aggregate(
                            pw.fn.Sum(di.ArchiveFile.size_b)))

                size_gb = int(size_bytes) / 2**30.0

                print("Cleaning up %i %s files (%.1f GB) from %s " %
                      (count, name, size_gb, node_name))

                file_ids += local_file_ids

    # If days is not set, then just select all files that meet the requirements so far
    else:

        file_ids = list(files)
        count = files.count()

        if count > 0:
            size_bytes = (di.ArchiveFileCopy.select().where(
                di.ArchiveFileCopy.id << file_ids).join(
                    di.ArchiveFile).aggregate(pw.fn.Sum(
                        di.ArchiveFile.size_b)))

            size_gb = int(size_bytes) / 1073741824.0

            print("Cleaning up %i files (%.1f GB) from %s " %
                  (count, size_gb, node_name))

    # If there are any files to clean, ask for confirmation and the mark them in
    # the database for removal
    if len(file_ids) > 0:
        if force or click.confirm("  Are you sure?"):
            print("  Marking files for cleaning.")

            state = "N" if now else "M"

            update = di.ArchiveFileCopy.update(
                wants_file=state).where(di.ArchiveFileCopy.id << file_ids)

            n = update.execute()

            print("Marked %i files for cleaning" % n)

        else:
            print("  Cancelled")
    else:
        print("No files selected for cleaning on %s." % node_name)
Beispiel #6
0
    def parse_ant_logs(cls, logs, return_post_report_params=False):
        """
        Unzip and parse .ANT log file output by nsched for John Galt Telescope
        observations

        Parameters
        ----------
        logs : list of strings
            .ZIP filenames. Each .ZIP archive should include a .ANT file and
            a .POST_REPORT file. This method unzips the archive, uses
            `parse_post_report` to read the .POST_REPORT file and extract
            the CHIME sidereal day corresponding to the DRAO sidereal day,
            and then reads the lines in the .ANT file to obtain the pointing
            history of the Galt Telescope during this observation.

            (The DRAO sidereal day is days since the clock in Ev Sheehan's
            office at DRAO was reset. This clock is typically only reset every
            few years, but it does not correspond to any defined date, so the
            date must be figured out from the .POST_REPORT file, which reports
            both the DRAO sidereal day and the UTC date and time.

            Known reset dates: 2017-11-21, 2019-3-10)

        Returns
        -------

        if output_params == False:
            ant_data: A dictionary consisting of lists containing the LST,
                hour angle, RA, and dec (all as Skyfield Angle objects),
                CHIME sidereal day, and DRAO sidereal day.

        if output_params == True
            output_params: dictionary returned by `parse_post_report`
            and
            ant_data: described above

        Files
        -----
        the .ANT and .POST_REPORT files in the input .zip archive are
        extracted into /tmp/26mlog/<loginname>/
        """

        from skyfield.positionlib import Angle
        from caput import time as ctime

        DRAO_lon = ephemeris.CHIMELONGITUDE * 24.0 / 360.0

        def sidlst_to_csd(sid, lst, sid_ref, t_ref):
            """
            Convert an integer DRAO sidereal day and LST to a float
            CHIME sidereal day

            Parameters
            ----------
            sid : int
                DRAO sidereal day
            lst : float, in hours
                local sidereal time
            sid_ref : int
                DRAO sidereal day at the reference time t_ref
            t_ref : skyfield time object, Julian days
                Reference time

            Returns
            -------
            output : float
                CHIME sidereal day
            """
            csd_ref = int(
                ephemeris.csd(ephemeris.datetime_to_unix(
                    t_ref.utc_datetime())))
            csd = sid - sid_ref + csd_ref
            return csd + lst / ephemeris.SIDEREAL_S / 24.0

        ant_data_list = []
        post_report_list = []

        for log in logs:
            doobs = True

            filename = log.split("/")[-1]
            basedir = "/tmp/26mlog/{}/".format(os.getlogin())
            basename, extension = filename.split(".")
            post_report_file = basename + ".POST_REPORT"
            ant_file = basename + ".ANT"

            if extension == "zip":
                try:
                    zipfile.ZipFile(log).extract(post_report_file,
                                                 path=basedir)
                except:
                    print(
                        "Failed to extract {} into {}. Moving right along...".
                        format(post_report_file, basedir))
                    doobs = False
                try:
                    zipfile.ZipFile(log).extract(ant_file, path=basedir)
                except:
                    print(
                        "Failed to extract {} into {}. Moving right along...".
                        format(ant_file, basedir))
                    doobs = False

            if doobs:
                try:
                    post_report_params = cls.parse_post_report(
                        basedir + post_report_file)

                    with open(os.path.join(basedir, ant_file), "r") as f:
                        lines = [line for line in f]
                        ant_data = {"sid": np.array([])}
                        lsth = []
                        lstm = []
                        lsts = []

                        hah = []
                        ham = []
                        has = []

                        decd = []
                        decm = []
                        decs = []

                        for l in lines:
                            arr = l.split()

                            try:
                                lst_hms = [float(x) for x in arr[2].split(":")]

                                # do last element first: if this is going to
                                # crash because a line in the log is incomplete,
                                # we don't want it to append to any of the lists

                                decs.append(float(arr[8].replace('"', "")))
                                decm.append(float(arr[7].replace("'", "")))
                                decd.append(float(arr[6].replace("D", "")))

                                has.append(float(arr[5].replace("S", "")))
                                ham.append(float(arr[4].replace("M", "")))
                                hah.append(float(arr[3].replace("H", "")))

                                lsts.append(float(lst_hms[2]))
                                lstm.append(float(lst_hms[1]))
                                lsth.append(float(lst_hms[0]))

                                ant_data["sid"] = np.append(
                                    ant_data["sid"], int(arr[1]))
                            except:
                                print("Failed in file {} for line \n{}".format(
                                    ant_file, l))
                                if len(ant_data["sid"]) != len(decs):
                                    print("WARNING: mismatch in list lengths.")

                        ant_data["lst"] = Angle(hours=(lsth, lstm, lsts))

                        ha = Angle(hours=(hah, ham, has))
                        dec = Angle(degrees=(decd, decm, decs))

                        ant_data["ha"] = Angle(
                            radians=ha.radians -
                            ephemeris.galt_pointing_model_ha(ha, dec).radians,
                            preference="hours",
                        )

                        ant_data["dec_cirs"] = Angle(
                            radians=dec.radians -
                            ephemeris.galt_pointing_model_dec(ha, dec).radians,
                            preference="degrees",
                        )

                        ant_data["csd"] = sidlst_to_csd(
                            np.array(ant_data["sid"]),
                            ant_data["lst"].hours,
                            post_report_params["SID"],
                            post_report_params["start_time"],
                        )

                    ant_data["t"] = ephemeris.unix_to_skyfield_time(
                        ephemeris.csd_to_unix(ant_data["csd"]))

                    # Correct RA from equinox to CIRS coords (both in radians)
                    era = np.radians(
                        ctime.unix_to_era(ephemeris.ensure_unix(
                            ant_data["t"])))
                    gast = ant_data["t"].gast * 2 * np.pi / 24.0

                    ant_data["ra_cirs"] = Angle(
                        radians=ant_data["lst"].radians -
                        ant_data["ha"].radians + (era - gast),
                        preference="hours",
                    )

                    obs = ephemeris.Star_cirs(
                        ra=ant_data["ra_cirs"],
                        dec=ant_data["dec_cirs"],
                        epoch=ant_data["t"],
                    )

                    ant_data["ra"] = obs.ra
                    ant_data["dec"] = obs.dec

                    ant_data_list.append(ant_data)
                    post_report_list.append(post_report_params)
                except:
                    print("Parsing {} failed".format(post_report_file))

        if return_post_report_params:
            return post_report_list, ant_data_list
        return ant_data
Beispiel #7
0
        def check_for_duplicates(t, src, start_tol, ignore_src_mismatch=False):
            """
            Check for duplicate holography observations, comparing the given
            observation to the existing database

            Inputs
            ------
            t: Skyfield Time object
                beginning time of observation
            src: HolographySource
                target source
            start_tol: float
                Tolerance in seconds within which to search for duplicates
            ignore_src_mismatch: bool (default: False)
                If True, consider observations a match if the time matches
                but the source does not

            Outputs
            -------
            If a duplicate is found: :py:class:`HolographyObservation` object for the
            existing entry in the database

            If no duplicate is found: None
            """
            ts = ephemeris.skyfield_wrapper.timescale

            unixt = ephemeris.ensure_unix(t)

            dup_found = False

            existing_db_entry = cls.select().where(
                cls.start_time.between(unixt - start_tol, unixt + start_tol))
            if len(existing_db_entry) > 0:
                if len(existing_db_entry) > 1:
                    print("Multiple entries found.")
                for entry in existing_db_entry:
                    tt = ts.utc(ephemeris.unix_to_datetime(entry.start_time))
                    # LST = GST + east longitude
                    ttlst = np.mod(tt.gmst + DRAO_lon, 24.0)

                    # Check if source name matches. If not, print a warning
                    # but proceed anyway.
                    if src.name.upper() == entry.source.name.upper():
                        dup_found = True
                        if verbose:
                            print("Observation is already in database.")
                    else:
                        if ignore_src_mismatch:
                            dup_found = True
                        print(
                            "** Observation at same time but with different " +
                            "sources in database: ",
                            src.name,
                            entry.source.name,
                            tt.utc_datetime().isoformat(),
                        )
                        # if the observations match in start time and source,
                        # call them the same observation. Not the most strict
                        # check possible.

                    if dup_found:
                        tf = ts.utc(
                            ephemeris.unix_to_datetime(entry.finish_time))
                        print("Tried to add  :  {} {}; LST={:.3f}".format(
                            src.name,
                            t.utc_datetime().strftime(DATE_FMT_STR), ttlst))
                        print("Existing entry:  {} {}; LST={:.3f}".format(
                            entry.source.name,
                            tt.utc_datetime().strftime(DATE_FMT_STR),
                            ttlst,
                        ))
            if dup_found:
                return existing_db_entry
            else:
                return None
Beispiel #8
0
    def create_from_dict(
        cls,
        dict,
        notes=None,
        start_tol=60.0,
        dryrun=True,
        replace_dup=False,
        verbose=False,
    ):
        """
        Create a holography database entry from a dictionary

        This routine checks for duplicates and overwrites duplicates if and
        only if `replace_dup = True`

        Parameters
        ----------
        dict : dict
            src : :py:class:`HolographySource`
                A HolographySource object for the source
            start_time
                Start time as a Skyfield Time object
            finish_time
                Finish time as a Skyfield Time object
        """
        DATE_FMT_STR = "%Y-%m-%d %H:%M:%S %Z"

        def check_for_duplicates(t, src, start_tol, ignore_src_mismatch=False):
            """
            Check for duplicate holography observations, comparing the given
            observation to the existing database

            Inputs
            ------
            t: Skyfield Time object
                beginning time of observation
            src: HolographySource
                target source
            start_tol: float
                Tolerance in seconds within which to search for duplicates
            ignore_src_mismatch: bool (default: False)
                If True, consider observations a match if the time matches
                but the source does not

            Outputs
            -------
            If a duplicate is found: :py:class:`HolographyObservation` object for the
            existing entry in the database

            If no duplicate is found: None
            """
            ts = ephemeris.skyfield_wrapper.timescale

            unixt = ephemeris.ensure_unix(t)

            dup_found = False

            existing_db_entry = cls.select().where(
                cls.start_time.between(unixt - start_tol, unixt + start_tol))
            if len(existing_db_entry) > 0:
                if len(existing_db_entry) > 1:
                    print("Multiple entries found.")
                for entry in existing_db_entry:
                    tt = ts.utc(ephemeris.unix_to_datetime(entry.start_time))
                    # LST = GST + east longitude
                    ttlst = np.mod(tt.gmst + DRAO_lon, 24.0)

                    # Check if source name matches. If not, print a warning
                    # but proceed anyway.
                    if src.name.upper() == entry.source.name.upper():
                        dup_found = True
                        if verbose:
                            print("Observation is already in database.")
                    else:
                        if ignore_src_mismatch:
                            dup_found = True
                        print(
                            "** Observation at same time but with different " +
                            "sources in database: ",
                            src.name,
                            entry.source.name,
                            tt.utc_datetime().isoformat(),
                        )
                        # if the observations match in start time and source,
                        # call them the same observation. Not the most strict
                        # check possible.

                    if dup_found:
                        tf = ts.utc(
                            ephemeris.unix_to_datetime(entry.finish_time))
                        print("Tried to add  :  {} {}; LST={:.3f}".format(
                            src.name,
                            t.utc_datetime().strftime(DATE_FMT_STR), ttlst))
                        print("Existing entry:  {} {}; LST={:.3f}".format(
                            entry.source.name,
                            tt.utc_datetime().strftime(DATE_FMT_STR),
                            ttlst,
                        ))
            if dup_found:
                return existing_db_entry
            else:
                return None

        # DRAO longitude in hours
        DRAO_lon = ephemeris.chime.longitude * 24.0 / 360.0

        if verbose:
            print(" ")
        addtodb = True

        dup_entries = check_for_duplicates(dict["start_time"], dict["src"],
                                           start_tol)

        if dup_entries is not None:
            if replace_dup:
                if not dryrun:
                    for entry in dup_entries:
                        cls.delete_instance(entry)
                        if verbose:
                            print(
                                "Deleted observation from database and replacing."
                            )
                elif verbose:
                    print(
                        "Would have deleted observation and replaced (dry run)."
                    )
                addtodb = True
            else:
                addtodb = False
                for entry in dup_entries:
                    print("Not replacing duplicate {} observation {}".format(
                        entry.source.name,
                        ephemeris.unix_to_datetime(
                            entry.start_time).strftime(DATE_FMT_STR),
                    ))

        # we've appended this observation to obslist.
        # Now add to the database, if we're supposed to.
        if addtodb:
            string = "Adding to database: {} {} to {}"
            print(
                string.format(
                    dict["src"].name,
                    dict["start_time"].utc_datetime().strftime(DATE_FMT_STR),
                    dict["finish_time"].utc_datetime().strftime(DATE_FMT_STR),
                ))
            if dryrun:
                print("Dry run; doing nothing")
            else:
                cls.create(
                    source=dict["src"],
                    start_time=ephemeris.ensure_unix(dict["start_time"]),
                    finish_time=ephemeris.ensure_unix(dict["finish_time"]),
                    quality_flag=dict["quality_flag"],
                    notes=notes,
                )
Beispiel #9
0
    def parse_post_report(cls, post_report_file):
        """
        read a .POST_REPORT file from the nsched program which controls the
        John Galt Telescope and extract the source name, estimated start time,
        DRAO sidereal day, commanded duration, and estimated finish time

        Parameters
        ----------
        post_report_file : str
            path to the .POST_REPORT file to read

        Returns
        -------
        output_params : dictionary
            output_params['src'] : HolographySource object or string
                If the source is a known source in the holography database,
                return the HolographySource object. If not, return the name
                of the source as a string
            output_params['SID'] : int
                DRAO sidereal day at the beginning of the observation
            output_params['start_time'] : skyfield time object
                UTC time at the beginning of the observation
            output_params['DURATION'] : float
                Commanded duration of the observation in sidereal hours
            output_params['finish_time'] : skyfield time object
                Calculated UTC time at the end of the observation
                Calculated as start_time + duration * ephemeris.SIDEREAL_S

        """
        import re

        ts = ephemeris.skyfield_wrapper.timescale

        output_params = {}

        with open(post_report_file, "r") as f:
            lines = [line for line in f]
            for l in lines:
                if (l.find("Source")) != -1:
                    srcnm = re.search("Source:\s+(.*?)\s+", l).group(1)
                    if srcnm in cls.source_alias:
                        srcnm = cls.source_alias[srcnm]
                if (l.find("DURATION")) != -1:
                    output_params["DURATION"] = float(
                        re.search("DURATION:\s+(.*?)\s+", l).group(1))

                # convert Julian Date to Skyfield time object
                if (l.find("JULIAN DATE")) != -1:
                    output_params["start_time"] = ts.ut1(jd=float(
                        re.search("JULIAN DATE:\s+(.*?)\s+", l).group(1)))

                if l.find("SID:") != -1:
                    output_params["SID"] = int(
                        re.search("SID:\s(.*?)\s+", l).group(1))
            try:
                output_params["src"] = HolographySource.get(name=srcnm)
            except pw.DoesNotExist:
                print("Missing", srcnm)
                output_params["src"] = srcnm

            output_params["finish_time"] = ephemeris.unix_to_skyfield_time(
                ephemeris.ensure_unix(output_params["start_time"]) +
                output_params["DURATION"] * 3600.0 * ephemeris.SIDEREAL_S)

            output_params["quality_flag"] = QUALITY_GOOD

            return output_params
Beispiel #10
0
def rfi_zeroing():

    global InitialKotekanConnection

    # Downtime of RFI zeroing
    downtime_m = app.config["solar_transit_downtime_m"]
    downtime_s = downtime_m * 60
    half_window_s = 0.5 * downtime_s

    logger.info("RFI Solar Transit Toggle: Starting thread")
    while not InitialKotekanConnection:
        time.sleep(1)
    while True:
        # Wait until the correct UTC time of the solar transit at DRAO (deals with daylight savings time)
        time_now = ephemeris.ensure_unix(datetime.datetime.utcnow())

        # Get the *next* transit in the future
        time_to_next_transit = ephemeris.solar_transit(time_now) - time_now

        # Get the *nearest* transit which we need to determine if we are still in the window
        time_to_nearest_transit = (
            ephemeris.solar_transit(time_now - 12 * 3600) - time_now)

        logger.info(
            "RFI Solar Transit Toggle: Time of next transit: {}".format(
                datetime.datetime.fromtimestamp(time_to_next_transit +
                                                time_now)))
        logger.info(
            "RFI Solar Transit Toggle: Time of nearest transit: {}".format(
                datetime.datetime.fromtimestamp(time_to_nearest_transit +
                                                time_now)))

        new_zeroing_state = True

        # Check if we are within the current transit window and wait until the end of it
        if abs(time_to_nearest_transit) < half_window_s:
            new_zeroing_state = False
            downtime_s = half_window_s + time_to_nearest_transit
            logger.info(
                "RFI Solar Transit Toggle: Within solar transit window, disabling zeroing and sleeping for {} seconds until end of window."
                .format(downtime_s))
        # Otherwise, we wait until the start of the next transit window
        else:
            new_zeroing_state = True
            downtime_s = time_to_next_transit - half_window_s
            logger.info(
                "RFI Solar Transit Toggle: Outside solar transit window, enabling zeroing and sleeping for {} seconds until next window."
                .format(downtime_s))

        # Set new RFI zeroing state
        success = set_rfi_zeroing(new_zeroing_state)

        # If we failed to set new RFI zeroing state sleep for a few seconds
        if not success:

            logger.info(
                "RFI Solar Transit Toggle: Failed to set new RFI zeroing state. Will wait for a few seconds and try again."
            )

            time.sleep(5)
            continue

        # Sleep until end of transit window or until the next one occurs
        time.sleep(downtime_s)