示例#1
0
def rinex3_obs(dset):
    """Write RINEX observations in Rinex format 3.03

    Args:
        dset:       Dataset, a dataset containing the data.
    """

    # Initialze variables
    meta = dset.meta
    version = "3.03"
    program = "Where v{}".format(where.__version__)
    run_by = "NMA"
    date = datetime.utcnow()
    time_sys = "GPS"  # TODO: So far only GPS time system can be handled by Where.
    file_created = "{:15s} {:3s}".format(date.strftime("%Y%m%d %H%M%S"), "UTC")
    pos_x = dset.site_pos.itrs[0][0]
    pos_y = dset.site_pos.itrs[0][1]
    pos_z = dset.site_pos.itrs[0][2]

    cfg_sampling_rate = config.tech.sampling_rate.float
    num_satellites = len(dset.unique("satellite"))

    if meta["file_type"] == "O":
        file_type = "OBSERVATION DATA"

    if meta["interval"] <= float(cfg_sampling_rate):
        sampling_rate = cfg_sampling_rate
    else:
        sampling_rate = meta["interval"]
    dset.vars["sampling_rate"] = str(
        int(sampling_rate
            ))  # Used as placeholder for determination of output file name

    with files.open("output_rinex3_obs", file_vars=dset.vars,
                    mode="wt") as fid:

        # ================================
        #  Write RINEX observation header
        # ================================
        #
        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        #      3.02           OBSERVATION DATA    M (MIXED)           RINEX VERSION / TYPE
        fid.write("{:>9s}{:11s}{:20s}{:20s}RINEX VERSION / TYPE\n".format(
            version, "", file_type, meta["sat_sys"]))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        # MAKERINEX 2.0.20023 BKG/GOWETTZELL      2016-03-02 00:20    PGM / RUN BY / DATE
        fid.write("{:20s}{:20s}{:20s}PGM / RUN BY / DATE\n".format(
            program, run_by, file_created))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        # G = GPS R = GLONASS E = GALILEO S = GEO M = MIXED           COMMENT
        if "comment" in meta:
            for line in meta["comment"]:
                fid.write("{:60s}COMMENT\n".format(line))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        # stas                                                        MARKER NAME
        fid.write("{:60s}MARKER NAME\n".format(meta["marker_name"]))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        # 66008M005                                                   MARKER NUMBER
        if "marker_number" in meta:
            fid.write("{:60s}MARKER NUMBER\n".format(meta["marker_number"]))

        if "marker_type" in meta:
            fid.write("{:60s}MARKER TYPE\n".format(meta["marker_type"]))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        # SATREF              Norwegian Mapping Authority             OBSERVER / AGENCY
        fid.write("{:20s}{:40s}OBSERVER / AGENCY\n".format(
            meta["observer"], meta["agency"]))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        # 3008040             SEPT POLARX4        2.9.0               REC # / TYPE / VERS
        fid.write("{:20s}{:20s}{:20s}REC # / TYPE / VERS\n"
                  "".format(meta["receiver_number"], meta["receiver_type"],
                            meta["receiver_version"]))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        # CR620012101         ASH701945C_M    SCIS                    ANT # / TYPE
        fid.write("{:20s}{:40s}ANT # / TYPE\n".format(meta["antenna_number"],
                                                      meta["antenna_type"]))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        #   3275756.7623   321111.1395  5445046.6477                  APPROX POSITION XYZ
        fid.write(
            "{:>14.4f}{:>14.4f}{:>14.4f}{:18s}APPROX POSITION XYZ\n".format(
                pos_x, pos_y, pos_z, ""))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        #         0.0000        0.0000        0.0000                  ANTENNA: DELTA H/E/N
        fid.write("{:>14.4f}{:>14.4f}{:>14.4f}{:18s}ANTENNA: DELTA H/E/N\n"
                  "".format(meta["antenna_height"], meta["antenna_east"],
                            meta["antenna_north"], ""))

        if "ant_vehicle_x" in meta:
            fid.write("{:>14.4f}{:>14.4f}{:>14.4f}{:18s}ANTENNA: DELTA X/Y/Z\n"
                      "".format(meta["ant_vehicle_x"], meta["ant_vehicle_y"],
                                meta["ant_vehicle_z"], ""))

        # TODO: ANTENNA:PHASECENTER
        # TODO: ANTENNA:B.SIGHT XYZ
        # TODO: ANTENNA:ZERODIR AZI
        # TODO: ANTENNA:ZERODIR XYZ
        # TODO: CENTER OF MASS: XYZ

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        # G   26 C1C C1P L1C L1P D1C D1P S1C S1P C2P C2W C2S C2L C2X  SYS / # / OBS TYPES
        #        L2P L2W L2S L2L L2X D2P D2W D2S D2L D2X S2P S2W S2S  SYS / # / OBS TYPES
        # R   16 C1C C1P L1C L1P D1C D1P S1C S1P C2C C2P L2C L2P D2C  SYS / # / OBS TYPES
        #        D2P S2C S2P                                          SYS / # / OBS TYPES
        for sys in sorted(meta["obstypes"]):
            obstypes = meta["obstypes"][sys].copy()
            num_lines = int(len(obstypes) / 13) + 1
            for line in range(0, num_lines):
                num_obstypes = len(obstypes)
                num_obstypes_str = str(num_obstypes) if line == 0 else ""
                spaces = "  " if meta["version"].startswith("2") else " "
                if num_obstypes <= 13:
                    fid.write("{:1s}{:>5s} {:53s}SYS / # / OBS TYPES\n".format(
                        sys, num_obstypes_str, spaces.join(obstypes)))
                    if num_obstypes == 13:
                        break
                else:
                    fid.write("{:1s}{:>5s} {:53s}SYS / # / OBS TYPES\n".format(
                        sys, num_obstypes_str, spaces.join(obstypes[0:13])))
                    del obstypes[0:13]

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        # DBHZ                                                        SIGNAL STRENGTH UNIT
        if "signal_strength_unit" in meta:
            fid.write("{:60s}SIGNAL STRENGTH UNIT\n".format(
                meta["signal_strength_unit"]))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        #      1.000                                                  INTERVAL
        if "interval" in meta:
            fid.write("{:>10.3f}{:50s}INTERVAL\n".format(sampling_rate, ""))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        #   2016    03    01    00    00   00.0000000     GPS         TIME OF FIRST OBS
        if not meta["time_sys"] == "GPS":
            log.fatal("Time system '{}' is not implemented so far in Where.",
                      meta["time_sys"])
        d = dset.time.gps.datetime[0]
        fid.write(
            "{:>6d}{:>6d}{:>6d}{:>6d}{:>6d}{:>13.7f}{:>8s}{:9s}TIME OF FIRST OBS\n"
            "".format(d.year, d.month, d.day, d.hour, d.minute, d.second,
                      time_sys, ""))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        #   2016    03    01    23    59   59.0000000     GPS         TIME OF LAST OBS
        if "time_last_obs" in meta:
            d = dset.time.gps.datetime[-1]
            fid.write(
                "{:>6d}{:>6d}{:>6d}{:>6d}{:>6d}{:>13.7f}{:>8s}{:9s}TIME OF LAST OBS\n"
                "".format(d.year, d.month, d.day, d.hour, d.minute, d.second,
                          time_sys, ""))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        #      0                                                      RCV CLOCK OFFS APPL
        if "rcv_clk_offset_flag" in meta:
            fid.write("{:>6s}{:54s}RCV CLOCK OFFS APPL\n".format(
                meta["rcv_clk_offset_flag"], ""))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        # G APPL_DCB          xyz.uvw.abc//pub/dcb_gps.dat            SYS / DCBS APPLIED
        if "dcbs_applied" in meta:
            for sys in sorted(meta["dcbs_applied"]):
                if sys in meta["obstypes"]:
                    fid.write("{:1s} {:17s} {:40s}SYS / DCBS APPLIED\n"
                              "".format(sys, meta["dcbs_applied"][sys]["prg"],
                                        meta["dcbs_applied"][sys]["url"]))

        if "pcvs_applied" in meta:
            for sys in sorted(meta["pcvs_applied"]):
                if sys in meta["obstypes"]:
                    fid.write("{:1s} {:17s} {:40s}SYS / PCVS APPLIED\n"
                              "".format(sys, meta["pcvs_applied"][sys]["prg"],
                                        meta["pcvs_applied"][sys]["url"]))
        # TODO: SYS / SCALE FACTOR

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        # G L1C  0.00000  12 G01 G02 G03 G04 G05 G06 G07 G08 G09 G10  SYS / PHASE SHIFT
        #                    G11 G12                                  SYS / PHASE SHIFT
        # G L1W  0.00000                                              SYS / PHASE SHIFT
        if "phase_shift" in meta:
            num_sat_limit = 10
            for sys, obstypes in sorted(meta["phase_shift"].items()):

                if sys not in meta["obstypes"]:
                    continue

                if not obstypes:
                    # Note: Phase corrections are unknown.
                    fid.write("{:1s}{:59s}SYS / PHASE SHIFT\n".format(sys, ""))
                    continue

                for type_ in obstypes:
                    if type_ in meta["obstypes"][sys]:
                        # TODO: Remove unused satellites
                        sats = meta["phase_shift"][sys][type_]["sat"].copy()
                        num_lines = int(len(sats) / num_sat_limit) + 1
                        for line in range(0, num_lines):
                            num_sats = len(sats)
                            if line == 0:
                                num_sats_str = str(
                                    num_sats) if num_sats > 0 else ""
                                phase_shift_str = "{:1s} {:>3s} {:>8.5f}{:>4s}" "".format(
                                    sys, type_,
                                    float(meta["phase_shift"][sys][type_]
                                          ["corr"]), num_sats_str)
                            else:
                                phase_shift_str = ""

                            if num_sats <= num_sat_limit:
                                fid.write(
                                    "{:18s} {:41s}SYS / PHASE SHIFT\n".format(
                                        phase_shift_str, " ".join(sats)))
                            else:
                                fid.write(
                                    "{:18s} {:41s}SYS / PHASE SHIFT\n".format(
                                        phase_shift_str,
                                        " ".join(sats[0:num_sat_limit])))
                                del sats[0:num_sat_limit]

        # TODO: WAVELENGTH FACT L1/2  -> given only for RINEX 2.11, but could be of interest in RINEX file

        if "R" in meta["obstypes"]:
            # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
            #  22 R01  1 R02 -4 R03  5 R04  6 R05  1 R06 -4 R07  5 R08  6 GLONASS SLOT / FRQ #
            #     R09 -6 R10 -7 R11  0 R13 -2 R14 -7 R15  0 R17  4 R18 -3 GLONASS SLOT / FRQ #
            #     R19  3 R20  2 R21  4 R22 -3 R23  3 R24  2               GLONASS SLOT / FRQ #
            # TODO: Remove unused satellites from 'GLONASS SLOT / FRQ #'
            if "glonass_slot" in meta:
                num_sat = len(meta["glonass_slot"])
                glonass_slots = dict(meta["glonass_slot"])
                num_lines = int(num_sat / 8) + 1
                for idx in range(0, num_lines):
                    line = "{:>3d}".format(num_sat) if idx == 0 else "   "
                    for num, (slot, bias) in enumerate(
                            sorted(glonass_slots.items())):
                        if num == 8:
                            break
                        line = line + " {:3s} {:>2d}".format(slot, bias)
                        del glonass_slots[slot]
                    fid.write(line.ljust(60) + "GLONASS SLOT / FRQ #\n")

            # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
            #  C1C  -10.000 C1P  -10.123 C2C  -10.432 C2P  -10.634        GLONASS COD/PHS/BIS
            line = ""
            if "glonass_bias" in meta:
                for type_, bias in sorted(meta["glonass_bias"].items()):
                    if type_ in meta["obstypes"]["R"]:
                        line = line + " {:3s} {:8.3f}".format(
                            type_, float(bias))
            fid.write(line.ljust(60) + "GLONASS COD/PHS/BIS\n")

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        #     16    17  1851     3                                    LEAP SECONDS
        #
        # NOTE: Entries 'future_past_leap_seconds', 'week', 'week_day' and 'time_sys' are not given in RINEX version
        #       2.11.
        if "leap_seconds" in meta:
            if meta["version"].startswith("2"):
                fid.write("{:>6d}{:54s}LEAP SECONDS\n".format(
                    int(meta["leap_seconds"]["leap_seconds"]), ""))
            else:
                fid.write("{:>6d}{:>6s}{:>6s}{:>6s}{:3s}{:33s}LEAP SECONDS\n"
                          "".format(
                              int(meta["leap_seconds"]["leap_seconds"]),
                              meta["leap_seconds"]["future_past_leap_seconds"],
                              meta["leap_seconds"]["week"],
                              meta["leap_seconds"]["week_day"],
                              meta["leap_seconds"]["time_sys"],
                              "",
                          ))

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        #     71                                                      # OF SATELLITES
        fid.write("{:>6d}{:54s}# OF SATELLITES\n".format(num_satellites, ""))
        # TODO: PRN / # OF OBS

        # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
        #                                                             END OF HEADER
        fid.write("{:60s}END OF HEADER\n".format(""))

        # ================================
        #  Write RINEX observation data
        # ================================
        #
        epoch_prev = dset.time.gps.datetime[0]
        first_obs_in_epoch = True
        obs_epoch_cache = dict()

        # Loop over all observations
        for idx in range(0, dset.num_obs):

            # Write epoch (reading of observations from epoch 'd_prev' in 'obs_epoch_cache' is finished)
            epoch = dset.time.gps.datetime[idx]
            if epoch_prev != epoch:
                num_sat = idx - idx_epoch_start  # TODO: idx_epoch_start is not defined
                _write_epoch(dset, fid, obs_epoch_cache, idx, num_sat,
                             epoch_prev)
                first_obs_in_epoch = True

            if first_obs_in_epoch is True:
                obs_epoch_cache = dict()
                idx_epoch_start = idx
                first_obs_in_epoch = False

            # Save observations for a given epoch in obs_epoch_cache
            #
            # NOTE: The caching is mainly necessary to determine the number of satellites for an epoch and to be
            #       flexible in what kind of order the observation types should be written. The order of the
            #       observation types for a given GNSS is defined via dset.meta['obstypes'] variable.
            if dset.satellite[idx] in obs_epoch_cache:
                log.fatal("Satellite {} occurs twice in epoch {}.",
                          dset.satellite[idx], dset.time.gps.datetime[idx])

            for type_ in dset.meta["obstypes"][dset.system[idx]]:
                lli = " " if dset[type_ + "_lli"][idx] == 0.0 else str(
                    int(dset[type_ + "_lli"][idx]))
                snr = " " if dset[type_ + "_snr"][idx] == 0.0 else str(
                    int(dset[type_ + "_snr"][idx]))
                obs_epoch_cache.setdefault(dset.satellite[idx],
                                           list()).append({
                                               "obs":
                                               dset[type_][idx],
                                               "lli":
                                               lli,
                                               "snr":
                                               snr
                                           })
            epoch_prev = epoch

        # Write last epoch
        num_sat = (idx + 1) - idx_epoch_start
        _write_epoch(dset, fid, obs_epoch_cache, idx, num_sat, epoch_prev)
示例#2
0
def sisre_comparison_report(dset):
    """Compare SISRE datasets

    Args:
        dset (list):       List with different SISRE datasets. The datasets contain the data.
    """
    dsets = dset
    df_merged = pd.DataFrame()

    for name, dset in dsets.items():

        if dset.num_obs == 0:
            log.warn(f"Dataset '{name}' is empty.")
            continue

        user_type_name = _get_user_type_name(name)
        df = dset.as_dataframe(
            fields=["satellite", "system", "sisre",
                    "time.gps"])  # , index="time.gps")
        df = df.rename(columns={"sisre": user_type_name})

        if df_merged.empty:
            df_merged = df
            continue
        df_merged = df_merged.merge(df,
                                    on=["satellite", "system", "time.gps"],
                                    how="outer")

    if df_merged.empty:
        log.fatal(f"All given datasets are empty [{', '.join(dsets.keys())}].")

    with files.open(file_key="output_sisre_comparison_report",
                    file_vars=dsets[next(iter(dsets))].vars,
                    mode="wt") as fid:
        _header(fid)
        fid.write("#Comparison of SISRE analyses\n")

        # Generate figure directory to save figures generated for SISRE report
        figure_dir = files.path("output_sisre_comparison_report_figure",
                                file_vars=dset.vars)
        figure_dir.mkdir(parents=True, exist_ok=True)

        _plot_bar_sisre_satellite_percentile(df_merged,
                                             fid,
                                             figure_dir,
                                             threshold=False)
        _plot_bar_sisre_satellite_percentile(df_merged,
                                             fid,
                                             figure_dir,
                                             threshold=True)
        _plot_bar_sisre_signal_combination_percentile(df_merged,
                                                      fid,
                                                      figure_dir,
                                                      threshold=False)
        _plot_bar_sisre_signal_combination_percentile(df_merged,
                                                      fid,
                                                      figure_dir,
                                                      threshold=True)
        _plot_bar_sisre_signal_combination_rms(df_merged, fid, figure_dir)

    # Generate PDF from Markdown file
    _markdown_to_pdf(dset)
示例#3
0
def _ignore_epochs_exceeding_validity_and_unhealthy_satellites(dset: "Dataset") -> np.ndarray:
    """Remove GNSS observations which exceeds the validity length of a broadcast navigation record and unhealthy
       satellites

    How long a broadcast ephemeris block is valid depends on the GNSS:

        - BeiDou:   Not defined. Fit interval of 1 hour is used. This is an assumption due to update rate of ephemeris 
                    of 1 hours.
        - Galileo:  See appendix C.4.4.1 in :cite:`galileo-os-sdd`.
        - GPS:      Indicates the curve-fit interval used by the GPS Control Segment in determining the ephemeris
                    parameters, which is given in HOURS (see section 6.6 in :cite:`rinex2`).
        - IRNSS:    Not defined. Fit interval of 2 hour is used. This is an assumption due to update rate of ephemeris
                    of 2 hours.
        - QZSS:     Fit interval is given as flag (see section 4.1.2.7 in :cite:`is-qzss-pnt-001`):
                         0 - 2 hours
                         1 - more than 2 hours
                         blank - not known

    Method 'brdc.unhealthy_satellites' declares satellite as unhealhty, if at least one satellite ephemeris epoch is set
    to unhealthy. But it can happen that a satellite is only unhealthy for a certain period of the day. This is not 
    handled via 'brdc.unhealthy_satellites' method. Therefore this routine checks every epoch, if a satellite ephemeris
    epoch is healthy or not. If the option 'ignore_unhealthy_satellites' is set to 'True', than satellite observation
    are removed, which are flagged as unhealthy.

    Args:
        dset:   A Dataset containing model data.
    """
    brdc = apriori.get(
        "orbit",
        rundate=dset.rundate,
        station=dset.vars["station"],
        apriori_orbit="broadcast",
    )

    brdc_block_idx = brdc._get_brdc_block_idx(dset)
    keep_idx = np.ones(dset.num_obs, dtype=bool)

    ignore_unhealthy_satellites = config.tech.get("ignore_unhealthy_satellites", default=True).bool
    log.info(f"The following satellites are unhealthy: {', '.join(brdc.unhealthy_satellites())}")

    # Loop over subset of Dataset, which includes only observations from available satellites
    for obs, (idx, time) in enumerate(zip(brdc_block_idx, dset.time.gps.mjd)):

        # Remove unhealthy satellites epochwise
        if ignore_unhealthy_satellites:
            if brdc.dset_edit.sv_health[idx] > 0:
                keep_idx[obs] = False
                continue

        tk = (time - brdc.dset_edit.toe.gps.mjd[idx]) * Unit.day2second
        sys = np.array(dset.system)[obs]

        if sys == "C":
            # TODO: :cite:`bds-sis-icd-2.1` does not define validity length of navigation record
            fit_interval = 1.0  # Assumption due to update rate of ephemeris of 1 hours
            toe_limit = fit_interval * 3600.0

        elif sys == "E":
            # Galileo navigation data record is valid for 4 hours after time of ephemeris due to Appendix C.4.4.1 in
            # Galileo-OS-SDD (2016).
            fit_interval = 4.0
            toe_limit = fit_interval * 3600.0

            # Only observation epochs after time of ephemeris should be used for Galileo. Therefore epochs with negative
            # tk has to be removed.
            if tk < 0:
                keep_idx[obs] = False
                continue

        elif sys == "G":

            # TODO: Due to :cite:`rinex3`, was the implementation of the fit interval field from the GPS navigation message
            #       an issue for the RINEX 3.02. Some implementations wrote the flag and others wrote a time interval.
            #       The RINEX 3.03 release specifies that the fit interval should be a time period for GPS and a flag
            #       for QZSS. TPP navigation files write a flag instead of a time interval for GPS, whereby 0 = 4h and
            #       1 > 4h. Should it be handled in the RINEX parser?

            # GPS navigation data record is valid for (TOE - fit_interval/2 <= epoch < TOE + fit_interval/2)
            fit_interval = 4.0 if brdc.dset_edit.fit_interval[idx] == 0.0 else brdc.dset_edit.fit_interval[idx]
            toe_limit = fit_interval * 1800.0  # toe_limit = fit_interval/2 * 3600 = fit_interval * 1800

        elif sys == "I":
            # TODO: :cite:`irnss-icd-sps` does not define validity length of navigation record
            fit_interval = 2.0 # Assumption due to update rate of ephemeris of 2 hours
            toe_limit = fit_interval * 3600.0

        elif sys == "J":
            # TODO: Due to :cite:`rinex3`, was the implementation of the fit interval field from the GPS navigation message
            #       an issue for the RINEX 3.02. Some implementations wrote the flag and others wrote a time interval.
            #       The RINEX 3.03 release specifies that the fit interval should be a time period for GPS and a flag
            #       for QZSS. TPP navigation files write a flag instead of a time interval for GPS, whereby 0 = 4h and
            #       1 > 4h. Should it be handled in the RINEX parser?

            # QZSS navigation data record is valid for (TOE - fit_interval/2 <= epoch < TOE + fit_interval/2) due to
            # section 4.1.2.7 in :cite:`is-qzss-pnt-001`
            fit_interval = 2.0 if brdc.dset_edit.fit_interval[idx] == 0.0 else brdc.dset_edit.fit_interval[idx]
            toe_limit = fit_interval * 1800.0  # toe_limit = fit_interval/2 * 3600 = fit_interval * 1800

        else:
            log.fatal(f"Broadcast ephemeris validity length interval is not defined for GNSS {sys!r}.")

        # Remove observations, if they exceed fit interval limit 'toe_limit'
        if abs(tk) > toe_limit:
            keep_idx[obs] = False

        ##+DEBUG
        #    print('DEBUG: {:6s} {:8d} {:4s} {}  TOE({})  abs({:7.0f}) > {:6.0f}'.format('REJECT', obs,
        #          np.array(dset.satellite)[obs], dset.time.gps.datetime[obs],
        #          brdc.dset_edit.toe.gps.datetime[idx], tk, toe_limit))
        # else:
        #    print('DEBUG: {:6s} {:8d} {:4s} {}  TOE({})  abs({:7.0f}) <={:6.0f}'.format('KEEP', obs,
        #          np.array(dset.satellite)[obs], dset.time.gps.datetime[obs],
        #          brdc.dset_edit.toe.gps.datetime[idx], tk, toe_limit))
        ##-DEBUG

    num_removed_obs = dset.num_obs - np.count_nonzero(keep_idx)
    log.info(
        f"Removing {num_removed_obs} observations based on _ignore_epochs_exceeding_validity_and_unhealthy_satellites"
    )

    # log.debug('Following entries are removed: {}\n', 'DEBUG:  \n'.join([s+t.strftime('  %Y-%m-%d %H:%M:%S (GPS)')
    #                                                              for s, t in zip(np.array(dset.satellite)[keep_idx],
    #                                                                              dset.time.gps.datetime[keep_idx])]))

    return keep_idx
示例#4
0
def linear_combination(type_: str,
                       dset: "Dataset") -> Dict[str, Dict[str, Any]]:
    """Calculate linear combination of observations for given linear combination type and same observation type
    (code, phase, doppler, snr)

    Args:
        dset:    Dataset
        type_:   Type of linear combination, which can be 'geometry_free', 'ionosphere_free', 'narrow_lane' or 
                 'wide_lane'.

    Returns:
        Dictionary with observation type as key (code, phase, doppler and/or snr) and dictionary with array with 
        linear combination values as values in [m] and name of combined observations.
    """
    func = {
        "geometry_free": geometry_free_linear_combination,
        "ionosphere_free": ionosphere_free_linear_combination,
        "narrow_lane": narrowlane_linear_combination,
        "wide_lane": widelane_linear_combination,
    }

    cfg_obs_code = config.tech.gnss_select_obs.obs_code.list
    linear_comb = dict()
    for obs_code in cfg_obs_code:
        linear_comb[obs_code] = dict(val=np.zeros(dset.num_obs))

    for sys in dset.unique("system"):
        idx = dset.filter(system=sys)

        # Get observations for the 1st and 2nd frequency
        #
        # NOTE: The GNSS observation types defined in meta variable 'obstypes' has a defined order, which is determined
        #       by the given observation types for each GNSS and the priority list.
        #
        obs_num = 0
        for obs_code in cfg_obs_code:

            obs_1 = dset.meta["obstypes"][sys][obs_num]
            obs_2 = dset.meta["obstypes"][sys][obs_num + 1]
            linear_comb[obs_code].setdefault("sys_obs", dict()).update(
                {sys: [obs_1, obs_2]})

            log.debug(
                f"Generate {type_} combination for GNSS '{sys}' and {obs_code} observations {obs_1} and {obs_2}."
            )

            if type_ == "geometry_free":
                linear_comb[obs_code]["val"][idx] = func[type_](
                    dset.obs[obs_1][idx], dset.obs[obs_2][idx])
            else:
                f1 = getattr(enums, "gnss_freq_" +
                             sys)["f" + obs_1[1]]  # Frequency of 1st band
                f2 = getattr(enums, "gnss_freq_" +
                             sys)["f" + obs_2[1]]  # Frequency of 2nd band
                log.debug(
                    f"Frequencies for {type_} combination: f1 = {f1} Hz ({obs_1}), f2 = {f2} Hz ({obs_2})."
                )

                try:
                    linear_comb[obs_code]["val"][idx] = func[type_](
                        dset.obs[obs_1][idx], dset.obs[obs_2][idx], f1, f2)
                except KeyError:
                    log.fatal(f"Linear combination 'type_' is not defined.")

            obs_num += 2

    return linear_comb
示例#5
0
    def parse_observation_epoch(self, line, cache):
        """Parse observation epoch information of RINEX observation record

        Only the last 2-digits of the year is given in the observation epoch, therefore it is necessary to get the
        complete 4-digit year based on the `TIME OF FIRST OBS` and `TIME OF LAST OBS` the RINEX header entries.

        In addition the RINEX observation are decimated based on the given sampling rate.
        """
        # Reject empty lines
        line["year"] = line["year"].strip()
        if (not line["year"].isnumeric()) and (not line["sat_list"]):
            return

        # Reject comment lines
        if line["sat_list"][28:29].isalpha():
            return

        # Read observation epoch entry
        if line["year"]:

            # Get correct 4-digit year (in observation epoch only 2-digit year is given)
            first_obs_year = self.meta["time_first_obs"][0:4]
            year = int(first_obs_year[0:2] + line["year"].zfill(2))

            # Check if 'year' is unique in the complete RINEX file
            if "time_last_obs" in self.meta:
                last_obs_year = self.meta["time_last_obs"][0:4]

                if first_obs_year != last_obs_year:
                    log.fatal(
                        "Different year for first and last observation is given in RINEX  with ({}) and ({}). "
                        "RINEX routine has to be improved.",
                        first_obs_year,
                        last_obs_year,
                    )

            cache["sat_list"] = list()
            cache[
                "obs_time"] = "{year}-{month:02d}-{day:02d}T{hour:02d}:{minute:02d}:{second:010.7f}" "".format(
                    year=year,
                    month=int(line["month"]),
                    day=int(line["day"]),
                    hour=int(line["hour"]),
                    minute=int(line["minute"]),
                    second=float(line["second"]),
                )
            cache["obs_sec"] = (int(line["hour"]) * unit.hour2second +
                                int(line["minute"]) * unit.minute2second +
                                float(line["second"]))
            cache["epoch_flag"] = int(line["epoch_flag"])
            cache["rcv_clk_offset"] = _float(line["rcv_clk_offset"])

            # Decimate RINEX observation defined by sampling rate [seconds]
            if cache["obs_sec"] % self.sampling_rate != 0:
                cache["obs_sec"] = None  # Ignore epoch

            cache["num_sat"] = int(line["num_sat"])

        if (line["epoch_flag"].strip() != "0") and line["epoch_flag"].strip():
            log.fatal(
                "Epoch {} is not ok, which is indicated by epoch flag {}. How it should be handled in Where?",
                cache["obs_time"],
                line["epoch_flag"],
            )  # TODO: Handle flagged epochs

        # Generate satellite list for given epoch
        for i in range(0, len(line["sat_list"]), 3):
            sat = line["sat_list"][i:i + 3].rstrip()
            if sat:
                sat = sat[0].replace(" ", "G") + sat[1].replace(
                    " ", "0") + sat[2]  # Blank satellite system
                cache["sat_list"].append(sat)  # identifier indicates GPS ('G')

        cache["len_sat_list"] = len(cache["sat_list"])
示例#6
0
def main(date: "datedoy", pipeline: "pipeline", items: "option", specifier: "option"):
    log.init(log_level="info")
    dsets = dict()

    # Additional options
    stage = util.read_option_value("--stage")
    writer_names = util.read_option_value("--writers").replace(",", " ").split()
    items_ = [s.strip() for s in items.split(",")]

    # Get optional options
    label = util.read_option_value("--label", default="None")
    # TODO label = "last" if label == "last" else label
    station = util.read_option_value("--station", default="")
    id_ = util.read_option_value("--id", default="")

    # Get dataset variables
    dset_vars = config.create_file_vars(rundate=date, pipeline=pipeline)

    # Read datasets for given specifier
    if specifier == "id":
        for id_ in items_:
            try:
                dset = dataset.Dataset().read(
                    rundate=date, pipeline=pipeline, stage=stage, label=label, id=id_, station=station
                )
            except OSError:
                log.warn(f"No data to read for Dataset id '{id_}'.")
                continue

            dset.vars.update(dset_vars)
            dset.vars["id"] = id_
            dsets.update({id_: dset})

    elif specifier == "station":
        for station in items_:

            try:
                dset = dataset.Dataset().read(
                    rundate=date, pipeline=pipeline, stage=stage, label=label, id=id_, station=station
                )
            except OSError:
                log.warn(f"No data to read for Dataset station '{station}'.")
                continue

            dset.vars.update(dset_vars)
            dset.vars["station"] = station
            dsets.update({station: dset})

    elif specifier == "stage":
        for stage in items_:

            try:
                dset = dataset.Dataset().read(
                    rundate=date, pipeline=pipeline, stage=stage, label=label, id=id_, station=station
                )
            except OSError:
                log.warn(f"No data to read for Dataset stage '{stage}'.")
                continue
            dset.vars.update(dset_vars)
            dset.vars["stage"] = stage
            dsets.update({stage: dset})
    else:
        log.fatal(f"Specifier {specifier} is not defined. It should be either 'id', 'station' or 'stage'.")

    if len(dsets) == 0:
        log.fatal(f"All given datasets are empty [{', '.join(dsets.keys())}].")
    elif len(dsets) == 1:
        log.warn(f"Nothing to compare. Only dataset '{list(dsets.keys())[0]}' is available.")

    # Loop over writers
    for writer in writer_names:
        write(writer, dset=dsets)
示例#7
0
def gpt2w_wrapper(mjd, lat, lon, hell):
    """Calculates meteorological data and mapping function coefficients based on GPT2w model

    The functions calls the GPT2w library routine ``gpt2w_1w.f`` (see
    http://ggosatm.hg.tuwien.ac.at/DELAY/SOURCE/GPT2w). The Fortran routine ``gpt2w_1w.f`` reads the grid file
    ``gpt2_1wA.grd``, which should be available in the same folder, where the Fortran programs runs. Therefore we
    change the current directory to the GPT2w source directory, so that ``gpt2w_1w.f`` can read the grid file.

    Due to performance reasons the GPT2w values are not determined for each observation. The call of the Fortran
    routine ``gpt2w_1w.f`` takes time, because the grid file ``gpt2_1wA.grd`` has to be read for each
    observation. Instead the GPT2w values are calculated only once for each unique day (modified Julian date rounded to
    integer) and saved in the cache _GPT2W. The final GPT2W values are computed by a linear interpolation of the daily
    determined GPT2W values.  The difference between the use of routine ``gpt2w_1w.f`` for each observation and the
    linear interpolation between daily solution is on the submillimeter level and can therefore be neglected.

    Args:
        mjd (numpy.float64):  Modified Julian date.
        lat (list):           Array with latitude for each station in [rad].
        lon (list):           Array with longitude for each station in [rad].
        hell (list):          Array with height for each station in [m].

    Returns:
        numpy.ndarray:  Array with following entries:

    =======  ===========  =======================================================
     Index    Unit         Description
    =======  ===========  =======================================================
     [0]      hPa          Pressure value
     [1]      Celsius      Temperature values
     [2]      degree/km    Temperature lapse rate
     [3]      K            Mean temperature of the water vapor
     [4]      hPa          Water vapor pressure
     [5]                   Hydrostatic mapping function coefficient ah
     [6]                   Wet mapping function coefficient aw
     [7]                   Water vapor decrease factor
     [8]      m            Geoid undulation (based on 9x9 EGM model)
    =======  ===========  =======================================================
    """
    nstat = len(lat)  # Number of stations
    it = 0  # Use of time variations (annual and semiannual terms)

    # TODO: Case not handled if several stations are included in Dateset. Is that the case for VLBI?

    if not (len(lat) == len(lon) == len(hell)):
        log.fatal(
            "Length of latitude, longitute and ellipsoidal height array is not equal."
        )

    # Change directory so that gpt2w.f can read the gpt2_5.grd-file in the GPT2w source directory
    current_dir = os.getcwd()
    os.chdir(files.path(ext_gpt2w.__name__))

    # Loop over all unique dates (rounded to integer value)
    for date in _rounded_dates(mjd):

        # Check if date is already included in cache
        if date not in _GPT2W:
            _GPT2W[date] = np.array(
                ext_gpt2w.gpt2_1w(date, lat, lon, hell, nstat, it)).reshape(-1)
    os.chdir(current_dir)

    # Linear interpolation between two daily GPT2W solutions
    mjd_int, mjd_frac = divmod(mjd, 1)
    output = _GPT2W[mjd_int] + mjd_frac * (_GPT2W[mjd_int + 1] -
                                           _GPT2W[mjd_int])

    return output
示例#8
0
    def write_to_dataset(self, dset):
        """Write data based on GNSS SP3 orbit file

        TODO:
            Add 'vel' and 'sat_clock_rate' entries to Dataset.

        Args:
            dset (Dataset): Dataset with following fields:

        ====================  ===============  =======  ========================================================
         Field                 Type             Unit     Description
        ====================  ===============  =======  ========================================================
         sat_clock_bias        numpy.ndarray     m       Satellite clock offset from GPS time
         sat_pos               PositionTable     m       Satellite position
         satellite             numpy.ndarray             Satellite PRN number
         system                numpy.ndarray             GNSS identifier
         time                  TimeTable                 Observation epoch in GPS time
        ====================  ===============  =======  ========================================================

            and following Dataset `meta` data:

        ====================  ===========================================================================
         Entry                 Description
        ====================  ===========================================================================
        agency                 Agency responsible for generating the SP3 file
        base_clkrate           Base number used for computing standard deviation of clock bias and clock rate
        base_posvel            Base number used for computing standard deviation of position and velocity
        coord_sys              Coordinate system
        data_used              Data used
        day                    Day of Gregorian date of first orbit epoch
        epoch_interval         Epoch interval between observation entries
        file_type              File type (G - GPS only, M - mixed files, R - GLONASS only, L - LEO, E - GALILEO)
        gpssec                 GPS seconds (seconds of GPS week) at the first orbit epoch
        gpsweek                GPS week at the first orbit epoch
        hour                   Hour of Gregorian date of first orbit epoch
        minute                 Minute of Gregorian date of first orbit epoch
        mjd_frac               Fractional part of Modified Julian Day at the first orbit epoch
        mjd_int                Integer part of Modified Julian Day at the first orbit epoch
        month                  Month of Gregorian date of first orbit epoch
        num_epoch              Number of epochs in the ephermeris file
        orb_type               Orbit type (F - fitted, E - extrapolated or predicted, B - broadcast, HLM - Helmert ...)
        pv_flag                Position (P) or velocity (V) flag
        second                 Seconds of Gregorian date of first orbit epoch
        time_sys               Time system (GPS, GLO, GAL, TAI, UTC)
        version                Format version (e.g. a (GPS only), b (GPS, GLONASS), c, d)
        year                   Year of Gregorian date of first orbit epoch
        ====================  ===========================================================================

        """
        dset.num_obs = len(self.data["time"])
        dset.meta.update(self.meta)
        if dset.meta["time_sys"] == "GPS":
            dset.add_time("time", val=self.data["time"], scale="gps")
        elif dset.meta["time_sys"] == "UTC":
            dset.add_time("time", val=self.data["time"], scale="utc")
        else:
            log.fatal(
                f"Time system {dset.meta['time_sys']} is not handled so far in Where"
            )

        dset.add_text("satellite", val=self.data["satellite"])
        dset.add_text("system", val=self.data["system"])
        dset.add_position("sat_pos",
                          time="time",
                          itrs=np.array(self.data["sat_pos"]),
                          unit="meter")
        dset.add_float("sat_clock_bias",
                       val=np.array(self.data["sat_clock_bias"]),
                       unit="meter")
示例#9
0
    def write_to_dataset(self, dset):
        """Write data based on GNSS SP3 orbit file

        TODO:
            Add 'vel' and 'sat_clock_rate' entries to Dataset.

        Args:
            dset (Dataset): Dataset with following fields:

        ====================  ===============  =======  ========================================================
         Field                 Type             Unit     Description
        ====================  ===============  =======  ========================================================
         sat_clock_bias        numpy.ndarray     m       Satellite clock offset from GPS time
         sat_pos               PositionTable     m       Satellite position
         satellite             numpy.ndarray             Satellite PRN number
         system                numpy.ndarray             GNSS identifier
         time                  TimeTable                 Observation epoch in GPS time
        ====================  ===============  =======  ========================================================

            and following Dataset `meta` data:

        ====================  ===========================================================================
         Entry                 Description
        ====================  ===========================================================================
        agency                 Agency responsible for generating the SP3 file
        base_clkrate           Base number used for computing standard deviation of clock bias and clock rate
        base_posvel            Base number used for computing standard deviation of position and velocity
        coord_sys              Coordinate system
        data_used              Data used
        day                    Day of Gregorian date of first orbit epoch
        epoch_interval         Epoch interval between observation entries
        file_type              File type (G - GPS only, M - mixed files, R - GLONASS only, L - LEO, E - GALILEO)
        gpssec                 GPS seconds (seconds of GPS week) at the first orbit epoch
        gpsweek                GPS week at the first orbit epoch
        hour                   Hour of Gregorian date of first orbit epoch
        minute                 Minute of Gregorian date of first orbit epoch
        mjd_frac               Fractional part of Modified Julian Day at the first orbit epoch
        mjd_int                Integer part of Modified Julian Day at the first orbit epoch
        month                  Month of Gregorian date of first orbit epoch
        num_epoch              Number of epochs in the ephermeris file
        orb_type               Orbit type (F - fitted, E - extrapolated or predicted, B - broadcast, HLM - Helmert ...)
        pv_flag                Position (P) or velocity (V) flag
        second                 Seconds of Gregorian date of first orbit epoch
        time_sys               Time system (GPS, GLO, GAL, TAI, UTC)
        version                Format version (e.g. a (GPS only), b (GPS, GLONASS), c, d)
        year                   Year of Gregorian date of first orbit epoch
        ====================  ===========================================================================

        """
        dset.num_obs = len(self.data["time"])
        dset.meta.update(self.meta)

        # TODO workaround: "isot" does not work for initialization of time field (only 5 decimals for seconds are
        #                  allowed). Therefore self.data["time"] is converted to datetime object.
        from datetime import datetime, timedelta

        date = []
        millisec = []
        for v in self.data["time"]:
            val, val2 = v.split(".")
            date.append(datetime.strptime(val, "%Y-%m-%dT%H:%M:%S"))
            millisec.append(timedelta(milliseconds=int(val2)))

        if dset.meta["time_sys"] == "GPS":
            dset.add_time("time",
                          val=date,
                          val2=millisec,
                          scale="gps",
                          fmt="datetime")
        elif dset.meta["time_sys"] == "UTC":
            dset.add_time("time",
                          val=date,
                          val2=millisec,
                          scale="utc",
                          fmt="datetime")
        else:
            log.fatal(
                f"Time system {dset.meta['time_sys']} is not handled so far in Where."
            )

        dset.add_text("satellite", val=self.data["satellite"])
        dset.add_text("system", val=self.data["system"])
        dset.add_position("sat_pos",
                          time=dset.time,
                          system="trs",
                          val=np.array(self.data["sat_pos"]))
        dset.add_float("sat_clock_bias",
                       val=np.array(self.data["sat_clock_bias"]))
示例#10
0
def data_handling(dset):
    """Edits data based on SLR handling file

    Args:
        dset:     A Dataset containing model data.

    Returns:
        Array containing False for observations to throw away
    """
    handling = apriori.get("slr_handling_file", time=dset.time)

    for station in dset.unique("station"):
        # Estimate range bias E
        intervals = handling.get(station, {}).get("E", [])
        for interval, info in intervals:
            start_x, end_x = interval
            int_idx = dset.filter(station=station) & (dset.time >= start_x) & (
                dset.time <= end_x)
            if np.any(int_idx):
                log.info(
                    f"ILRS handling: Estimating range bias for station {station} in interval {start_x}-{end_x}"
                )
                log.dev(
                    "ILRS Data Handling: What if there is a break in the middle of a pass?"
                )
                dset.estimate_range[:] = np.logical_or(int_idx,
                                                       dset.estimate_range)
        # Apply range bias R
        intervals = handling.get(station, {}).get("R", [])
        for interval, info in intervals:
            start_x, end_x = interval
            int_idx = dset.filter(station=station) & (dset.time >= start_x) & (
                dset.time <= end_x)
            if np.any(int_idx):
                log.info(
                    f"ILRS handling: Applying range bias for station {station} in interval {start_x}-{end_x}"
                )
                RB = info["e_value"]
                if info["unit"] == "mm":
                    dset.range_bias[:] += int_idx * RB * Unit.mm2m
                elif info["unit"] == "ms":
                    dset.range_bias[:] += int_idx * RB * Unit.millisec2seconds * constant.c
                else:
                    log.fatal(
                        "Unknown unit on ILRS Data handling file for range bias applied"
                    )
        # Estimate time bias U
        intervals = handling.get(station, {}).get("U", [])
        for interval, info in intervals:
            start_x, end_x = interval
            int_idx = dset.filter(station=station) & (dset.time >= start_x) & (
                dset.time <= end_x)
            if np.any(int_idx):
                log.warn(
                    f"ILRS handling: Estimating time bias for station {station} in interval {start_x}-{end_x}"
                )
                dset.estimate_time |= int_idx
        # Apply time bias T
        intervals = handling.get(station, {}).get("T", [])
        for interval, info in intervals:
            start_x, end_x = interval
            int_idx = dset.filter(station=station) & (dset.time >= start_x) & (
                dset.time <= end_x)
            if np.any(int_idx):
                log.info(
                    f"ILRS handling: Applying time bias for station {station} in interval {start_x}-{end_x}"
                )
                t_midInterval = Time(start_x + 1 / 2 * (end_x - start_x),
                                     format="datetime")
                TB = info["e_value"]
                drift = info["e_rate"]
                if info["unit"] == "us":
                    time_drifted = (dset.time - t_midInterval).jd * drift
                    dset.time_bias[:] += int_idx * (
                        -np.repeat(TB, dset.num_obs) -
                        time_drifted) * Unit.microsec2sec
                else:
                    log.fatal(
                        "Unknown unit on ILRS Data handling file for time bias applied"
                    )
        # Apply pressure bias P
        intervals = handling.get(station, {}).get("P", [])
        for interval, info in intervals:
            start_x, end_x = interval
            int_idx = dset.filter(station=station) & (dset.time >= start_x) & (
                dset.time <= end_x)
            if np.any(int_idx):
                log.fatal(f"ILRS handling: TODO: Implement pressure bias!")
        # Target signature bias C
        intervals = handling.get(station, {}).get("P", [])
        for interval, info in intervals:
            start_x, end_x = interval
            int_idx = dset.filter(station=station) & (dset.time >= start_x) & (
                dset.time <= end_x)
            if np.any(int_idx):
                log.fatal(
                    f"ILRS handling: TODO: Implement target signature bias!")
    return
示例#11
0
文件: __init__.py 项目: mfkiwl/where
def run(rundate, pipeline, *args, **kwargs):
    """Run a Where pipeline for a given date and session

    Args:
        rundate:   Rundate of analysis.
        pipeline:  Pipeline used for analysis.
        session:   Session in analysis.
    """

    if not setup.has_config(rundate, pipeline, *args, **kwargs):
        log.fatal(
            f"No configuration found for {pipeline.upper()} {rundate.strftime(config.FMT_date)}"
        )

    # Set up config
    config.init(rundate, pipeline, **kwargs)

    # Register filekey suffix
    filekey_suffix = config.tech.filekey_suffix.list
    if filekey_suffix:
        config.files.profiles = filekey_suffix

    # Validate input arguments
    try:
        prefix = plugins.call(package_name=__name__,
                              plugin_name=pipeline,
                              part="validate_args",
                              rundate=rundate,
                              **kwargs)
    except mg_exceptions.UnknownPluginError:
        log.warn(
            f"Pipeline {pipeline} has not defined function 'validate_args'")
    except exceptions.InvalidArgsError as err:

        from where.tools import delete

        # Clean up {placeholder} directories created by config
        delete.delete_analysis(rundate, pipeline, **kwargs)
        log.fatal(err)

    # Set up console logger and start file logger
    try:
        prefix = plugins.call(package_name=__name__,
                              plugin_name=pipeline,
                              part="log_prefix",
                              rundate=rundate,
                              **kwargs)
    except mg_exceptions.UnknownPluginError:
        log.warn(f"Pipeline {pipeline} has not defined function 'log_prefix'")
        prefix = ""

    log_cfg = config.where.log
    log.init(log_level=log_cfg.default_level.str, prefix=prefix)
    if log_cfg.log_to_file.bool:
        log.file_init(
            file_path=config.files.path("log"),
            log_level=log_cfg.default_level.str,
            prefix=prefix,
            rotation=log_cfg.number_of_log_backups.int,
        )

    # Update analysis config and file variables
    config.set_analysis(rundate, pipeline=pipeline, **kwargs)
    config.set_file_vars(file_vars())

    log.blank()  # Empty line for visual clarity

    # Read which stages that should be executed once for each iterable
    skip_stages = config.tech.skip_stages.list
    stage_iterate = config.tech.stage_iterate.list
    dset_list = []
    dset = None

    if stage_iterate:
        # Read which list should be iterated over and the placeholder name of each entry
        iterate_over, _, var_name = config.tech.stage_iterate_over.str.partition(
            ":")
        var_name = var_name.strip()

        # Iterate
        for item in config.tech[iterate_over].list:
            kwargs[var_name] = item
            log.blank()
            log.info(f"***** Running {item} *****")

            for prev_stage, stage in zip([None] + stage_iterate,
                                         stage_iterate):
                if stage not in skip_stages:
                    dset = run_stage(rundate, pipeline, dset, stage,
                                     prev_stage, **kwargs)

            if dset is not None:
                dset_list.append(dset)
                dset = None
        kwargs[var_name] = "combined"

    if dset_list:
        dset_list[0].merge_with(*dset_list[1:], sort_by="time")
        dset = dset_list[0]
        if len(dset_list) > 1:
            log.info(f"Combining dataset for {len(dset_list)} {iterate_over}")
            dset.write_as(stage=stage_iterate[-1], label=2, **kwargs)

    # Read which stages that should be executed once
    stage_once = config.tech.stage_once.list
    # Find which stages we will run analysis for
    if not stage_once and not stage_iterate:
        stage_list = [s for s in stages(pipeline)]
        prev_stage_start = None
    else:
        stage_list = [s for s in stage_once]
        prev_stage_start = stage_iterate[-1] if stage_iterate else None

    for prev_stage, stage in zip([prev_stage_start] + stage_list, stage_list):
        if stage not in skip_stages:
            dset = run_stage(rundate, pipeline, dset, stage, prev_stage,
                             **kwargs)
            log.blank()

        if dset is not None and dset.num_obs == 0:
            log.warn(f"No observations in dataset after {stage} stage.")
            break

    # Store configuration to library
    setup.store_config_to_library(rundate, pipeline, **kwargs)

    # Write requirements to file for reproducibility
    util.write_requirements()
示例#12
0
    def satellite_clock_correction(self):
        """Determine satellite clock correction based on precise satellite clock product

        The GNSS satellite clock bias is read from RINEX clock files. Afterwards the satellite clock bias is determined
        via a cubic interpolation for the observation time.

        TODO:
            * Beware of the extrapolation (bounds_error=False in interpolate).
            * Check for satellite clock interpolation in:
              "Hesselbarth, A.: Statische und kinematische GNSS-Auswertung mittels PPP, 2011"

        Returns:
            numpy.ndarray:    GNSS satellite clock corrections for each observation
        """
        correction = np.zeros(self.dset.num_obs)
        sat_transmission_time = self.dset.time.gps.gpssec

        # Get precise GNSS satellite clock values
        clock_product = config.tech.get("clock_product", default="clk").str
        if clock_product == "sp3":
            all_sat_clk = self.dset_edit
        elif clock_product == "clk":

            # TODO: File path information has to be improved, because 3 consecutive days are read.
            log.info(
                "Calculating satellite clock correction (precise) based on RINEX clock file {}.",
                files.path(file_key="gnss_rinex_clk"),
            )

            all_sat_clk = data.Dataset(rundate=self.dset.rundate,
                                       tech=None,
                                       stage=None,
                                       dataset_name="gnss_sat_clk",
                                       dataset_id=0,
                                       empty=True)
            parser = parsers.parse("rinex_clk", rundate=self.dset.rundate)
            parser.write_to_dataset(
                all_sat_clk
            )  # TODO Read RINEX clock file, from day before and day after.
            #     Needed for interpolation. Add handling if these clk-files
            #     are not available. Remove first and last observations?
            #     If precise clock products are not available broadcast
            #     ephemeris should be used.
        else:
            log.fatal(
                "Unknown clock product '{}'. Configuration option 'clock_product' can only be 'sp3' or 'clk'.",
                clock_product,
            )

        # Loop over all satellites given in configuration file
        for sat in self.dset.unique("satellite"):

            # Skip satellites, which are not given in RINEX clock file
            if sat not in all_sat_clk.unique("satellite"):
                # TODO: Maybe satellite is available in SP3 file, which includes also
                #      satellite clock bias, but only for every 15 min instead of
                #      every 5 min (or 30 s by use of igs<wwwwd>.clk_30s).
                continue

            idx = self.dset.filter(satellite=sat)
            clk_idx = all_sat_clk.filter(satellite=sat)

            # Interpolation of GNSS precise satellite clock values
            # TODO: Check if interpolation method is ok.
            sat_clock_bias_ip = interpolate.interp1d(
                all_sat_clk.time.gps.gpssec[clk_idx],
                all_sat_clk.sat_clock_bias[clk_idx],
                axis=0,
                kind="cubic",
                bounds_error=False,
                fill_value=all_sat_clk.sat_clock_bias[clk_idx][-1],
            )
            correction[idx] = sat_clock_bias_ip(sat_transmission_time[idx])

        return correction
示例#13
0
    def _calculate(self, dset):
        """Calculate precise orbits and satellite clock correction for given observation epochs

        The satellite position is determined for each observation epoch by interpolating within the given SP3 orbit time
        entries. The satellite velocities are calculated based on satellite positions 0.5 second before and after the
        observation epoch.

        Args:
            dset (Dataset): Dataset representing calculated precise orbits with following fields:

        ========================  ===============  =======  ========================================================
         Field                     Type             Unit     Description
        ========================  ===============  =======  ========================================================
         gnss_satellite_clock     numpy.ndarray     m       Satellite clock correction
         gnss_relativistic_clock  numpy.ndarray     m       Relativistic clock correction due to orbit eccentricity
         sat_posvel               PosVelTable       m       Satellite position and velocity
         satellite                numpy.ndarray             Satellite numbers
         system                   numpy.ndarray             GNSS identifiers
         time                     TimeTable                 Observation epochs
        =======================  ===============  =======  ========================================================
        """
        # Check if satellites are given in SP3 file
        # TODO: Another solution has to be found for satellites not given in SP3 file, e.g. use of broadcast
        #       ephemeris.
        not_available_sat = set(self.satellite) - set(self.dset_edit.satellite)
        if not_available_sat:
            log.fatal(
                "Satellites {} are not given in precise orbit file {}.",
                ", ".join(sorted(not_available_sat)),
                self.dset_edit.meta["parser"]["file_path"],
            )

        log.info(
            "Calculating satellite position/velocity (precise) based on SP3 precise orbit file {}.",
            ", ".join(self.dset_edit.meta["parser"]["file_path"]),
        )

        dset.vars["orbit"] = self.name
        dset.num_obs = len(self.time)
        dset.add_time("time", val=self.time, scale=self.time.scale)
        dset.add_text("satellite", val=self.satellite)
        dset.add_text("system", val=self.system)

        sat_pos = np.zeros((dset.num_obs, 3))
        sat_vel = np.zeros((dset.num_obs, 3))
        ref_time = dset.time[0]  # Reference epoch used for interpolation

        # Loop over all given satellites
        for sat in set(self.satellite):

            log.debug("Interpolation for satellite: {}", sat)

            # Get array with information about, when observation are available for the given satellite (indicated by
            # True)
            idx = dset.filter(satellite=sat)
            orb_idx = self.dset_edit.filter(satellite=sat)

            if np.min(dset.time[idx].gps.mjd) < np.min(
                    self.dset_edit.time[orb_idx].mjd):
                log.fatal(
                    "Interpolation range is exceeded by satellite {} ( {} [epoch] < {} [precise orbit])."
                    "".format(
                        sat, np.max(dset.time[idx].gps.datetime),
                        np.max(self.dset_edit.time[orb_idx].gps.datetime)))

            if np.max(dset.time[idx].gps.mjd) > np.max(
                    self.dset_edit.time[orb_idx].mjd):
                log.fatal(
                    "Interpolation range is exceeded by satellite {} ({} [epoch] > {} [precise orbit])."
                    "".format(
                        sat, np.max(dset.time[idx].gps.datetime),
                        np.max(self.dset_edit.time[orb_idx].gps.datetime)))

            # Interpolation for given observation epochs (transmission time)
            sat_pos[idx], sat_vel[
                idx] = interpolation.interpolate_with_derivative(
                    self.dset_edit.time[orb_idx].gps.sec_to_reference(
                        ref_time),
                    self.dset_edit.sat_pos.itrs[orb_idx],
                    dset.time[idx].gps.sec_to_reference(ref_time),
                    kind="lagrange",
                    window=10,
                    dx=0.5,
                )

            if np.isnan(np.sum(sat_pos[idx])) or np.isnan(np.sum(
                    sat_vel[idx])):
                log.fatal(
                    "NaN occurred by determination of precise satellite position and velocity for satellite {}.",
                    sat)

        # Add satellite clock correction to Dataset
        dset.add_float("gnss_satellite_clock",
                       val=self.satellite_clock_correction(),
                       unit="meter")

        # Add satellite position and velocity to Dataset
        dset.add_posvel("sat_posvel",
                        time="time",
                        itrs=np.hstack((sat_pos, sat_vel)))

        # Add relativistic clock correction to Dataset
        dset.add_float("gnss_relativistic_clock",
                       val=self.relativistic_clock_correction(),
                       unit="meter")

        # +DEBUG
        # for num_obs  in range(0, dset.num_obs):
        #    print('DEBUG: ', dset.satellite[num_obs],
        #                     dset.time.gps.datetime[num_obs],
        #                     dset.time.gps.mjd_frac[num_obs]*24*3600,
        #                     ' '.join([str(v) for v in dset.sat_posvel.itrs_pos[num_obs]]),
        #                     dset.gnss_satellite_clock[num_obs],
        #                     dset.gnss_relativistic_clock[num_obs])
        # -DEBUG

        return dset
示例#14
0
 def parse_scale_factor(self, line, _):
     """Parse entries of RINEX header `SYS / SCALE FACTOR` to instance variable `meta`.
     """
     log.fatal(
         "Reading and applying of RINEX header entry 'SYS / SCALE FACTOR' is not implemented"
     )
示例#15
0
def sisre_comparison_report(dset):
    """Compare SISRE datasets

    The first step is to generate a merged dataframe based on given datasets by selecting certain fields. In the
    following an example is shown:

                           time.gps satellite system        E1    E1/E5b    E1/E5a
        0       2019-01-01 00:00:00       E01      E  0.173793  0.123220  0.171849
        1       2019-01-01 00:00:00       E02      E  0.048395  0.127028  0.108108
        2       2019-01-01 00:00:00       E03      E  0.089328  0.121884  0.079576
        3       2019-01-01 00:00:00       E04      E  0.110866  0.088446  0.092292
        4       2019-01-01 00:00:00       E05      E  0.348935  0.305333  0.258733
            
    Args:
        dset (list):       List with different SISRE datasets. The datasets contain the data.
    """
    dsets = dset
    df_merged = pd.DataFrame()

    for name, dset in dsets.items():

        if dset.num_obs == 0:
            log.warn(f"Dataset '{name}' is empty.")
            continue

        signal_type = _get_signal_type(dset.meta)
        df = dset.as_dataframe(
            fields=["satellite", "system", "sisre",
                    "time.gps"])  # , index="time.gps")
        df = df.rename(columns={"sisre": signal_type})

        if df_merged.empty:
            df_merged = df
            continue
        df_merged = df_merged.merge(df,
                                    on=["satellite", "system", "time.gps"],
                                    how="outer")

    if df_merged.empty:
        log.fatal(f"All given datasets are empty [{', '.join(dsets.keys())}].")

    with config.files.open(file_key="output_sisre_comparison_report",
                           file_vars=dsets[next(iter(dsets))].vars,
                           create_dirs=True,
                           mode="wt") as fid:
        _header(fid)
        fid.write("#Comparison of SISE analyses\n")
        fid.write(
            "In the following SISE analyses results are compared for:\n\n")
        fid.write("* Monthly 95th percentile SISE for satellites\n")
        fid.write(
            "* Monthly 95th percentile and RMS SISE for signal combinations (users)\n"
        )
        fid.write("\\newpage\n")

        # Generate figure directory to save figures generated for SISRE report
        figure_dir = config.files.path("output_sisre_comparison_report_figure",
                                       file_vars=dset.vars)
        figure_dir.mkdir(parents=True, exist_ok=True)

        fid.write(f"\n\n##Monthly 95th percentile SISE for satellites\n")
        # Produce plot with same yrange than for _plot_bar_sisre_signal_combination_percentile threshold plot
        _plot_bar_sisre_satellite_percentile(df_merged,
                                             fid,
                                             figure_dir,
                                             threshold=False,
                                             write_table=True,
                                             yrange=[0, 2])
        _plot_bar_sisre_satellite_percentile(df_merged,
                                             fid,
                                             figure_dir,
                                             threshold=True,
                                             write_table=False)

        fid.write(
            f"\n\n##Monthly 95th percentile and RMS SISE for signal combinations (users)\n"
        )
        _plot_bar_sisre_signal_combination_percentile(df_merged,
                                                      fid,
                                                      figure_dir,
                                                      threshold=False,
                                                      write_table=True)
        _plot_bar_sisre_signal_combination_percentile(df_merged,
                                                      fid,
                                                      figure_dir,
                                                      threshold=True,
                                                      write_table=False)
        _plot_bar_sisre_signal_combination_rms(df_merged,
                                               fid,
                                               figure_dir,
                                               write_table=True)

    # Generate PDF from Markdown file
    _markdown_to_pdf(dset)
示例#16
0
文件: gnss.py 项目: yxw027/where
def linear_combination(type_: str, dset: "Dataset") -> Tuple[np.ndarray]:
    """Calculate linear combination of observations for given linear combination type

    Args:
        dset:    Dataset
        type_:   Type of linear combination: 'ionosphere-free'

    Returns:
        Tuple  with following `numpy.ndarray` arrays

    ===================  ============================================================================================
     Elements             Description
    ===================  ============================================================================================
     code_obs_combined    Array with combined code observations in [m].
     phase_obs_combined   Array with combined carrier phase observations in [m].
    ===================  ============================================================================================
    """
    code_obs_combined = np.zeros(dset.num_obs)
    phase_obs_combined = np.zeros(dset.num_obs)

    for sys in dset.unique("system"):
        idx = dset.filter(system=sys)

        # Get pseudorange and carrier phase observations for the 1st and 2nd frequency
        #
        # NOTE: The GNSS observation types defined in meta variable 'obstypes' has a defined order, which is determined
        #       by the given observation types for each GNSS and the priority list.
        #
        #
        observation_code = config.tech.gnss_select_obs.obs_code.str
        if observation_code == "code":
            C1 = dset.meta["obstypes"][sys][
                0]  # Pseudorange observation for 1st frequency
            C2 = dset.meta["obstypes"][sys][
                1]  # Pseudorange observation for 2nd frequency
        elif observation_code == "phase":
            L1 = dset.meta["obstypes"][sys][
                0]  # Carrier phase observation for 1st frequency
            L2 = dset.meta["obstypes"][sys][
                1]  # Carrier phase observation for 2nd frequency
        elif observation_code == "code:phase":
            C1 = dset.meta["obstypes"][sys][
                0]  # Pseudorange observation for 1st frequency
            L1 = dset.meta["obstypes"][sys][
                1]  # Carrier phase observation for 1st frequency
            C2 = dset.meta["obstypes"][sys][
                2]  # Pseudorange observation for 2nd frequency
            L2 = dset.meta["obstypes"][sys][
                3]  # Carrier phase observation for 2nd frequency
        else:
            log.fatal(
                f"Linear combination determination is not defined for observation code {observation_code}."
            )

        if type_ == "ionosphere-free":
            if "code" in observation_code:
                f1 = getattr(enums, "gnss_freq_" +
                             sys)["f" + C1[1]]  # Frequency of 1st band
                f2 = getattr(enums, "gnss_freq_" +
                             sys)["f" + C2[1]]  # Frequency of 2nd band
                code_obs_combined[idx] = ionosphere_free_linear_combination(
                    dset[C1][idx], dset[C2][idx], f1, f2)
            elif "phase" in observation_code:
                f1 = getattr(enums, "gnss_freq_" +
                             sys)["f" + L1[1]]  # Frequency of 1st band
                f2 = getattr(enums, "gnss_freq_" +
                             sys)["f" + L2[1]]  # Frequency of 2nd band
                phase_obs_combind[idx] = ionosphere_free_linear_combination(
                    dset[L1][idx], dset[L2][idx], f1, f2)
        else:
            log.fatal(f"Linear combination type '{type_}' is not defined.")

    return code_obs_combined, phase_obs_combined
示例#17
0
def estimate_solution(dset: "Dataset") -> None:
    """Write estimate solution results

    Args:
        dset:  A dataset containing the data.
    """
    file_path = config.files.path("output_estimate_solution",
                                  file_vars={
                                      **dset.vars,
                                      **dset.analysis
                                  })

    # Add date field to dataset
    if "date" not in dset.fields:
        dset.add_text(
            "date",
            val=[d.strftime("%Y/%m/%d %H:%M:%S") for d in dset.time.datetime],
            write_level="detail")

    # Add states to WriterField depending on used pipeline
    fields_def = list(FIELDS)

    if dset.vars["pipeline"] == "gnss":

        fields_def.append(
            WriterField(
                "param_name",
                "param_name",
                (),
                object,
                "%-20s",
                20,
                "PARAM_NAME",
                "",
                f"Parameter name: \n"
                f"""
{'': >38}gnss_rcv_clock   - GNSS receiver clock
{'': >38}gnss_site_pos-x  - X-coordinate of site position
{'': >38}gnss_site_pos-y  - Y-coordinate of site position
{'': >38}gnss_site_pos-z  - Z-coordinate of site position
""",
            ))

    elif dset.vars["pipeline"] == "gnss_vel":

        fields_def.append(
            WriterField(
                "param_name",
                "param_name",
                (),
                object,
                "%-20s",
                20,
                "PARAM_NAME",
                "",
                f"Parameter name: \n"
                f"""
{'': >38}gnss_rcv_clock_drift   - GNSS receiver clock drift
{'': >38}gnss_site_vel-x        - X-coordinate of site velocity
{'': >38}gnss_site_vel-y        - Y-coordinate of site velocity
{'': >38}gnss_site_vel-z        - Z-coordinate of site velocity
""",
            ))

    else:
        log.fatal(
            "Estimate solution writer is implemented only for 'gnss' and 'gnss_vel' pipeline."
        )

    # Epochwise estimation or over whole time period
    if config.tech.estimate_epochwise.bool:

        output_array = np.array([])
        for epoch in sorted(set(dset.time.gps.mjd)):
            idx = dset.time.gps.mjd == epoch

            # Append current epoch solution to final output solution for each estimated parameter
            epoch_array = _get_epoch(dset, idx, fields_def)
            output_array = np.concatenate(
                (output_array,
                 epoch_array), axis=0) if output_array.size else epoch_array
    else:
        # Get solution for first observation
        idx = np.squeeze(np.array(np.nonzero(dset.time.gps.mjd)) ==
                         0)  # first observation -> TODO: Better solution?
        output_array = _get_epoch(dset, idx, fields_def)

    # Write to disk
    header = get_header(
        FIELDS,
        pgm_version=f"where {where.__version__}",
        run_by=util.get_user_info()["inst_abbreviation"]
        if "inst_abbreviation" in util.get_user_info() else "",
        summary="Estimate solutions results",
    )
    np.savetxt(
        file_path,
        output_array,
        fmt=tuple(f.format for f in fields_def),
        header=header,
        delimiter="",
        encoding="utf8",
    )
示例#18
0
def eccentricity_vector_station(ecc, dset):
    """Calculate the eccentricity vector for a station.

    Corrections are returned in meters in the Geocentric
    Celestial Reference System for each observation.

    Args:
        dset:        A Dataset containing model data

    Returns:
        Numpy array: GCRS corrections in meters.
    """
    if position.is_position(dset.site_pos):
        ecc_vector = position.PositionDelta(np.zeros((dset.num_obs, 3)),
                                            system="enu",
                                            ref_pos=dset.site_pos,
                                            time=dset.time)
    elif position.is_posvel(dset.site_pos):
        ecc_vector = position.PosVelDelta(np.zeros((dset.num_obs, 6)),
                                          system="enu",
                                          ref_pos=dset.site_pos,
                                          time=dset.time)
    else:
        log.fatal(
            f"dset.site_pos{dset.default_field_suffix} is not a PositionArray or PosVelArray."
        )

    fieldnames = config.tech.eccentricity.identifier.list
    fielddata = [dset[field] for field in fieldnames]
    if len(fieldnames) > 1:
        keys = set(tuple(zip(*fielddata)))
    else:
        keys = fielddata[0].tolist()

    for key in keys:
        if len(fieldnames) > 1:
            filters = dict(zip(fieldnames, key))
        else:
            filters = dict(zip(fieldnames, [key]))

        if key not in ecc:
            ecc_vector[dset.filter(**filters), 0:3] = np.zeros(3)
            if key in _WARNED_MISSING:
                continue
            log.warn(
                f"Missing eccentricity data for {key}. Vector set to zero.")
            _WARNED_MISSING.add(key)
            continue

        if ecc[key]["coord_type"] == "ENU":
            ecc_vector[dset.filter(**filters), 0:3] = ecc[key]["vector"]

    ecc_vector = ecc_vector.trs
    for key in keys:
        if len(fieldnames) > 1:
            filters = dict(zip(fieldnames, key))
        else:
            filters = dict(zip(fieldnames, [key]))

        if key not in ecc:
            ecc_vector[dset.filter(**filters), 0:3] = np.zeros(3)
            continue

        if ecc[key]["coord_type"] == "XYZ":
            ecc_vector[dset.filter(**filters), 0:3] = ecc[key]["vector"]

    return ecc_vector.gcrs
示例#19
0
def meteorological_data(dset):
    """Determine meteorological data (atmospheric pressure, ...) based on configuration file definition.

    Args:
        dset (Dataset): Model data.

    Returns:
        tuple of Numpy Arrays: Includes the following elements, each with entries for each observation

    ============  ===========  =====================================================================================
     Element       Unit         Description
    ============  ===========  =====================================================================================
     pressure      hPa          Atmospheric pressure value
     temperature   Celsius      Temperature values. Marked with 'None', if unknown.
     e             hPa          Water vapor pressure. Marked with 'None', if unknown.
     tm            K            Mean temperature of the water vapor. Marked with 'None', if unknown.
     lambd                      Water vapor decrease factor for each observation. Marked with 'None', if unknown.
    ============  ===========  =====================================================================================
    """
    pressure = None
    temperature = None
    e = None
    tm = None
    lambd = None

    model = config.tech.get("meteorological_data", section=MODEL,
                            default="").str
    mapping_function = config.tech[MODEL].mapping_function.str

    # Use default meteorological models, if no model is defined in configuration file
    if not model:
        try:
            model = MAPPING_METEO_RELATION[mapping_function]
        except KeyError:
            log.fatal(
                "Unknown mapping function '{}'. Available mapping functions are {}",
                mapping_function,
                ", ".join(MAPPING_FUNCTIONS),
            )

    log.debug("Meteorological data model: {}", model)

    if model == "vmf1_gridded":
        pressure = vmf1_gridded_pressure(dset)

    elif model == "gpt":
        pressure, temperature, _ = gpt(dset)

    elif model == "gpt2":
        pressure, temperature, _, e, _, _, _ = gpt2(dset)

    elif model == "gpt2w":
        pressure, temperature, _, tm, e, _, _, lambd, _ = gpt2w(dset)

    elif model == "site_pressure":
        pressure = site_pressure(dset)

    else:
        log.fatal(
            "Unknown meteorological data model {}. Available models are {}",
            model, ", ".join(METEOROLOGICAL_MODELS))

    return pressure, temperature, e, tm, lambd
示例#20
0
def clock_correction(dset):
    """Estimate clock polynomial
    """
    # Take previous clock corrections into account
    try:
        output = dset.vlbi_clock
    except AttributeError:
        output = np.zeros(dset.num_obs)

    # Read order of clock polynomial from config file
    terms = 1 + config.tech.get("order_of_polynomial", section=MODEL, default=2).int

    # Read clock breaks from session config, only split on commas (and commas followed by whitespace)
    clock_breaks = config.tech.get("clock_breaks", section=MODEL, default="").as_list(split_re=", *")
    stations, time_intervals = parse_clock_breaks(dset, clock_breaks)

    # Read reference clock from edit file and store in dataset
    ref_clock_str = config.tech.get("reference_clock", section=MODEL, default="").str
    ref_clock = parse_reference_clock(stations, ref_clock_str)
    dset.meta["ref_clock"] = ref_clock

    # Remove reference clock from list of clocks to be estimated
    idx = stations.index(ref_clock)
    del stations[idx]
    del time_intervals[idx]

    # Number of clock polynomial coefficients
    num_coefficients = len(stations) * terms
    param_names = [
        sta + " clk_a" + str(t) + " " + time_intervals[i][0].utc.iso + " " + time_intervals[i][1].utc.iso
        for i, sta in enumerate(stations)
        for t in range(terms)
    ]
    dset.meta["num_clock_coeff"] = num_coefficients

    # Set up matrices for estimation
    A = np.zeros((dset.num_obs, num_coefficients, 1))

    # Time coefficients, used when setting up A
    t = dset.time.utc.mjd - dset.time.utc[0].mjd
    poly = np.array([t ** n for n in range(terms)]).T

    # Set up the A matrix with time coefficients
    for idx, (station, (t_start, t_end)) in enumerate(zip(stations, time_intervals)):
        filter_time = np.logical_and(t_start.utc.mjd <= dset.time.utc.mjd, dset.time.utc.mjd < t_end.utc.mjd)
        filter_1 = np.logical_and(dset.filter(station_1=station), filter_time)
        A[filter_1, idx * terms : (idx + 1) * terms, 0] = poly[filter_1]
        filter_2 = np.logical_and(dset.filter(station_2=station), filter_time)
        A[filter_2, idx * terms : (idx + 1) * terms, 0] = -poly[filter_2]

    # Calculate normal matrix N and the moment vector U
    U = np.sum(A @ dset.residual[:, None, None], axis=0)
    N = np.sum(A @ A.transpose(0, 2, 1), axis=0)

    # Invert the normal matrix to find corrections, only the non-zero part of the matrix is inverted
    idx = np.logical_not(U == 0)[:, 0]
    X = np.zeros((num_coefficients, 1))
    det = np.linalg.det(N[idx, :][:, idx])
    threshold = 1e-12
    if np.abs(det) < threshold:
        # TODO: what is a good threshold value?
        rank = np.linalg.matrix_rank(N[idx, :][:, idx])
        log.warn(f"Determinant of normal matrix in clock correction is close to zero ({det})")
        log.info(f"Normal matrix shape = {N.shape}, normal matrix rank = {rank}")
        _, R = np.linalg.qr(N[idx, :][:, idx])
        for i, row in enumerate(R):
            if np.max(np.abs(row)) < threshold * 10 ** 3:
                log.error(f"{param_names[i]} linearly dependent (max_row = {np.max(np.abs(row))})")
    try:
        X[idx] = np.linalg.inv(N[idx, :][:, idx]) @ U[idx]
    except np.linalg.LinAlgError:
        log.fatal(f"Singular matrix in {MODEL}")

    # Calculate final corrections
    output += (A.transpose(0, 2, 1) @ X)[:, 0, 0]
    return output
示例#21
0
文件: vlbi.py 项目: uasau/where
def file_vars():
    """File variables that will be available during the running of this technique

    In addition, date and analysis variables are available.

    Returns:
        Dict:  File variables special for this technique.
    """
    _file_vars = dict()

    # Add obs_version for ngs
    if config.tech.get("obs_format").str == "ngs":
        versions = config.files.glob_variable("vlbi_obs_ngs", "obs_version",
                                              r"\d{3}")
        if versions:
            _file_vars["obs_version"] = max(versions)
        elif config.where.files.download_missing.bool:
            # Look online for a candidate
            log.info(
                "No NGS observation file found on disk: Looking for one online."
            )
            obs_versions = [f"{v:03d}" for v in reversed(range(4, 10))]
            for obs_version in obs_versions:
                url = config.files.url("vlbi_obs_ngs",
                                       file_vars=dict(obs_version=obs_version),
                                       is_zipped=True,
                                       use_aliases=False)
                log.info(f"Looking for {url} ...")
                if url.exists():
                    _file_vars["obs_version"] = obs_version
                    break

        if not _file_vars:
            log.fatal("No NGS observation file found")

    # Add obs_version for vgosdb
    if config.tech.get("obs_format").str == "vgosdb":
        versions = config.files.glob_variable("vlbi_obs_vgosdb", "obs_version",
                                              r"\d{3}")
        if versions:
            _file_vars["obs_version"] = max(versions)
        agencies = config.files.glob_variable("vlbi_obs_vgosdb",
                                              "agency",
                                              r"[\w]+",
                                              file_vars=_file_vars)
        if agencies:
            _file_vars[
                "agency"] = "IVS" if "IVS" in agencies else agencies.pop()
            if len(agencies) > 1:
                log.warn(
                    f"Multiple agencies found ({', '.join(agencies)}) for file key vlbi_obs_vgosdb. Using {_file_vars['agency']}"
                )

        if not "obs_version" in _file_vars and not "acengy" in _file_vars:
            log.fatal(
                f"No VGOSDB wrapper file found ({config.files.path('vlbi_obs_vgosdb')})."
            )

    # Sinex file vars
    if "sinex" in config.tech.section_names:
        _file_vars["solution"] = config.tech.sinex.solution.str
        _file_vars["file_agency"] = config.tech.sinex.file_agency.str.lower()

    return _file_vars
示例#22
0
def _select_observations(obstypes_all, obstypes):
    """Select observations based on GNSS observation priority list

    NOTE: The order how the observation types are saved in 'use_obstypes' is relevant for the further processing, e.g.
          by selection of the code observation type for determination of the satellite transmission time or by
          generation of linear combinations from the observations.

    Args:
        obstypes_all (list):    All observation types defined in RINEX header
        obstypes (dict):        Observation types defined in RINEX header given for each GNSS

    Returns:
        tuple:  with following elements

    =================  ======  ==================================================================================
     Elements           Type    Description
    =================  ======  ==================================================================================
     use_obstypes       dict    Selected observation types for each GNSS related to priority list
     remove_obstypes    set     Set with observation types, which can be removed
    =================  ======  ==================================================================================
    """
    use_obstypes = dict()
    keep_obstypes = list()
    remove_obstypes = set(obstypes_all)
    cfg_freq_type = config.tech.freq_type.str
    cfg_obs_code = config.tech[_SECTION].obs_code.list

    # Convert frequency type in frequency numbers
    try:
        freq_numbers = FREQ_NUMBER_DEF[cfg_freq_type]
    except KeyError:
        log.fatal(
            f"Configuration option 'freq_type = {cfg_freq_type}' is not valid."
        )

    # Loop over GNSSs
    for sys in obstypes:
        use_obstypes.update({sys: list()})

        # Loop over observation code
        # TODO: order obs codes after ordered pattern f.eks. code, phase, snr and doppler.
        for obs_code in cfg_obs_code:
            if obs_code not in OBS_CODE_DEF:
                log.fatal(
                    f"Configuration option 'obs_code= {obs_code}' is not valid."
                )

            for freq_num in freq_numbers:
                type_ = OBS_CODE_DEF[obs_code] + freq_num
                selected_obstypes = _select_obstype(sys, type_, obstypes[sys])
                if selected_obstypes:
                    use_obstypes[sys].append(selected_obstypes)
                    keep_obstypes.append(selected_obstypes)
                else:
                    log.warn(
                        f"No {obs_code.upper()} observations available for GNSS '{sys}' and frequency "
                        f"'{freq_num}'.")

        log.info(
            f"Selected observation types for GNSS {sys!r}: {', '.join(use_obstypes[sys])}"
        )

    remove_obstypes.difference_update(keep_obstypes)

    return use_obstypes, remove_obstypes
示例#23
0
def _generate_dataframe(dsets: Dict[str, "Dataset"]) -> Tuple[pd.core.frame.DataFrame]:
    """Generate dataframes based on SISRE datasets

    The dataframe "df" has following columns:

        time_gps:       Time in GPS time scale given as datetime objects
        satellite:      Satellite identifiers
        system:         GNSS identifier
        <solution_1>:   First SISRE solution (e.g. E1)
        <solution_2>:   Second SISRE solution (e.g. E1/E5b)
        <solution_3>:   Second SISRE solution (e.g. E1/E5a)

    Example for "df" dictionary:
     
                           time_gps satellite system        E1    E1/E5b    E1/E5a
        0       2019-01-01 00:00:00       E01      E  0.173793  0.123220  0.171849
        1       2019-01-01 00:00:00       E02      E  0.048395  0.127028  0.108108
        2       2019-01-01 00:00:00       E03      E  0.089328  0.121884  0.079576
        3       2019-01-01 00:00:00       E04      E  0.110866  0.088446  0.092292
        4       2019-01-01 00:00:00       E05      E  0.348935  0.305333  0.258733


    "df_month_perc" is a dataframe with month as indices and SISRE 95% percentile values for each signal combination
     as columns.

    Example for "df_month_perc" dictionary:

                        E1    E1/E5b    E1/E5a
        Jan-2019  0.335688  0.297593  0.326859
        Feb-2019  0.380575  0.330701  0.352535
        Mar-2019  0.353586  0.314817  0.344597

    Example for "df_month_rms" dictionary:
        TODO

    Args:
        dsets: Dictionary with SISRE solution name as keys (e.g. cnes_inav_e1, cnes_inav_e1e5b, cnes_fnav_e1e5a) and
               the belonging Dataset as value

    Returns:
        Tuple with following entries:

        | Element              | Description                                                                          |
        |----------------------|--------------------------------------------------------------------------------------|
        | df                   | Given DAILY SISRE solutions are merged into one dataframe                            |
        | df_month_perc        | Dataframe with MONTHLY samples of 95th percentile SISRE (based on Galileo SDD v1.0   |
        |                      | version)                                                                             |
        | df_month_perc_rms    | Dataframe with MONTHLY samples of 95th percentile SISRE, which are based on epochwise|
        |                      | RMS SISRE solutions (based on Galileo SDD v1.1 version)                              |
        | df_month_rms         | Dataframe with MONTHLY samples of RMS SISRE                                          |

    """
    df = pd.DataFrame()
    signal_types = list()
    df_month_perc_rms = None
    df_month_rms = None

    for name, dset in dsets.items():

        if dset.num_obs == 0:
            log.warn(f"Dataset '{name}' is empty.")
            continue

        signal_type = _get_signal_type(dset.meta)
        signal_types.append(signal_type)
        df_tmp = dset.as_dataframe(fields=["satellite", "system", "sisre", "time.gps"])  # , index="time.gps")
        df_tmp = df_tmp.rename(columns={"sisre": signal_type})

        if df.empty:
            df = df_tmp
            continue
        df = df.merge(df_tmp, on=["satellite", "system", "time_gps"], how="outer")

    if df.empty:
        log.fatal(f"All given datasets are empty [{', '.join(dsets.keys())}].")

    # Generate monthly samples of 95th percentile SISRE (after SDD v1.0 version)
    df_month_perc = df.drop(columns=["satellite", "system"]).set_index("time_gps").resample("M").apply(lambda x: np.nanpercentile(x, q=95))
    df_month_perc.index = df_month_perc.index.strftime("%b-%Y")

    # Generate monthly samples of RMS SISRE
    df_month_rms = df.drop(columns=["satellite", "system"]).set_index("time_gps").resample("M").apply(lambda x: np.sqrt(np.nanmean(np.square(x))))
    df_month_rms.index = df_month_rms.index.strftime("%b-%Y")

    # Generate monthly samples of 95th percentile SISRE based on epochwise SISRE RMS solutions(after SDD v1.1 version)
    #
    # NOTE: Following solutions assumes that SISRE solution in dataframe 'df' is only given for one GNSS
    if len(set(df["system"])) == 1:
        epochs = sorted(set(df["time_gps"]))
        df_tmp = pd.DataFrame(index=epochs, columns=signal_types)

        ## Loop over observation epochs
        for epoch in epochs:
            idx = df["time_gps"] == epoch
            row = dict()

            # Determine RMS for each signal type over all given SISRE satellite solutions in each epoch
            for signal_type in signal_types:
                row[signal_type] = np.sqrt(np.nanmean(np.square(df[signal_type][idx])))
            df_tmp.loc[epoch] = pd.Series(row)

        df_month_perc_rms = df_tmp.resample("M").apply(lambda x: np.nanpercentile(list(x), q=95))
        df_month_perc_rms.index = df_month_perc_rms.index.strftime("%b-%Y")
        df_month_perc_rms = df_month_perc_rms.transpose()
 
    else:
        log.warn(
            f"Determination of 95th percentile SISRE based on epochwise SISRE RMS solutions can only be applied "
            f"for one given GNSS and not for {set(df['system'])} together."
        )

    return df, df_month_perc.transpose(), df_month_perc_rms, df_month_rms.transpose()
示例#24
0
def gnss_select_obs(dset: "Dataset") -> np.ndarray:
    """Select GNSS observations used in Where processing

    Args:
        dset (where.data.dataset.Dataset):  A Dataset containing model data.

    Returns:
        Array containing False for observations to throw away
    """
    remove_obstypes = set()
    keep_idx = np.full(dset.num_obs, True, dtype=bool)
    reject_nan_all_sys = None
    obstypes_all = dset.obs.fields

    cfg_obs_code = config.tech[_SECTION].obs_code.list
    cfg_obstypes = config.tech[_SECTION].obs_types.list
    cfg_systems = config.tech.systems.list

    # Remove GNSS, which are not defined in configuration file
    for sys in list(dset.meta["obstypes"]):
        if sys not in cfg_systems:
            del dset.meta["obstypes"][sys]

    for obs, sys in enumerate(dset.system):
        if sys not in cfg_systems:
            keep_idx[obs] = False

    if not np.any(keep_idx):
        log.fatal(
            f"No observations available for selected system(s): {' '.join(cfg_systems)}."
        )

    # Remove observation types, which are not given in configuration file. If no observation types are defined in
    # configuration file keep all observation types.
    if cfg_obstypes:
        for type_ in cfg_obstypes:
            if type_ not in dset.obs.fields:
                log.warn(
                    f"Selected observation type {type_} is not included in GNSS observation data."
                )
        log.debug(
            f"Remove undefined observation types in configuration file: {' '.join(set(obstypes_all) - set(cfg_obstypes))}."
        )
        remove_obstypes = set(obstypes_all) - set(cfg_obstypes)

    # Remove undefined observation codes related to given configuration
    keep_obs_code = list()
    for obs_code in sorted(cfg_obs_code):
        if obs_code not in OBS_CODE_DEF:
            log.fatal(
                f"Observation code '{obs_code}' is not valid in option 'obs_code='."
            )
        keep_obs_code.append(OBS_CODE_DEF[obs_code])

    remove_obs_code = set(OBS_CODE_DEF.values()) - set(keep_obs_code)
    if remove_obs_code:
        log.debug(
            f"Remove undefined observation codes: {' '.join(set(OBS_CODE_DEF.values()) - set(keep_obs_code))}."
        )
        remove_obs_pattern = f"^{'|^'.join(remove_obs_code)}"

        for type_ in obstypes_all:
            search_obj = re.search(remove_obs_pattern, type_)
            if search_obj is not None:
                remove_obstypes.add(search_obj.string)

    # Select observations based on priority list
    #   -> 1st step remove already unused observation types from Dataset to determine the basis for the priority list
    #      selection

    # Note: The order of the selected observations is important for selection of GNSS code observation type to
    #       determine satellite transmission time.
    if remove_obstypes:
        _remove_obstype_from_dset(dset, remove_obstypes)

    selected_obstypes, add_remove_obstypes = _select_observations(
        obstypes_all, dset.meta["obstypes"])

    if add_remove_obstypes:
        remove_obstypes.update(add_remove_obstypes)
        log.debug(
            f"Remove observation types after selection: {' '.join(add_remove_obstypes)}."
        )
        _remove_obstype_from_dset(dset, remove_obstypes)

    dset.meta["obstypes"] = selected_obstypes.copy()

    # Remove NaN values of selected observation types
    if config.tech[_SECTION].remove_nan.bool:

        # Note: An array 'reject_nan_all_sys' is created for all GNSS observation types. This array shows, if some
        #       elements are set to NaN for a GNSS observation type. At the end only NaN observations are removed, if
        #       these observations are NaN for all GNSS observation types (see np.bitwise_and.reduce(reject_nan_all_sys, 1)).
        #       An exception is if only one GNSS is selected, then all NaN values are removed (see
        #       np.bitwise_or.reduce(reject_nan_all_sys, 1)).
        for sys in dset.meta["obstypes"]:

            # Loop over selected observation types
            for type_ in dset.meta["obstypes"][sys]:

                reject_nan = np.full(dset.num_obs, False,
                                     dtype=bool)  # Initialize reject_nan
                reject_nan[keep_idx] = np.isnan(
                    dset.obs[type_][keep_idx])  # Determine NaN values

                if reject_nan_all_sys is None:
                    reject_nan_all_sys = reject_nan
                    continue

                if reject_nan_all_sys.ndim == 1:
                    reject_nan_all_sys = np.hstack(
                        (reject_nan_all_sys[:, None], reject_nan[:, None]))
                else:
                    reject_nan_all_sys = np.hstack(
                        (reject_nan_all_sys, reject_nan[:, None]))

        if reject_nan_all_sys.ndim > 1:
            if len(cfg_systems) == 1:  # only one GNSS is selected
                reject_nan_all_sys = np.bitwise_or.reduce(
                    reject_nan_all_sys, 1)
            else:
                reject_nan_all_sys = np.bitwise_and.reduce(
                    reject_nan_all_sys, 1)
        if np.any(reject_nan_all_sys):
            keep_idx[keep_idx] = np.logical_not(reject_nan_all_sys)[keep_idx]
            log.debug(f"Remove {np.sum(reject_nan_all_sys)} NaN values.")

    return keep_idx
示例#25
0
    def parse_observation(self, line, cache):
        """Parse observation record of RINEX file
        """
        # Ignore epochs based on sampling rate
        # TODO: Sampling of data should be done in 'edit' step!!!
        sec = cache["obs_sec"]
        if sec is None:
            return

        if cache["num_sat"] != cache["len_sat_list"]:
            log.fatal(
                "Number of satellites ({}) does not agree with number of satellites in satellite PRN list ({}) "
                "in observation epoch {}.",
                cache["num_sat"],
                cache["len_sat_list"],
                cache["obs_time"],
            )

        # Read line with maximal 5 observations
        for field in sorted([f for f in line if f.startswith("obs_")]):

            # Fit length of observation (should always be 16 characters long)
            #
            # NOTE: This is necessary, because missing observations are written as 0.0 or BLANK in RINEX format and loss
            #       of lock indicator (LLI) and signal strength can be blank. In this case the length of observation
            #       field is fitted to 16 characters as defined in the RINEX 2.11 format description
            #
            #       Each observation type is saved in a Dataset field. The observation type fields have the same length
            #       to be consistent with the time, system or satellite Dataset field. The problem is that some
            #       observation types are not observed for a certain satellite system, but these observation are
            #       included with zero values in the observation type field.
            line[field] = line[field].ljust(16)

            cache.setdefault("obs_values",
                             list()).append(_float(line[field][0:14]))
            cache.setdefault("cycle_slip",
                             list()).append(_int(line[field][14:15]))
            cache.setdefault("signal_strength",
                             list()).append(_int(line[field][15:16]))

        # Save all observation type entries for given satellite (all observation for a given epoch and satellite are
        # read)
        if len(cache["obs_values"]) >= self.meta["num_obstypes"]:

            sat = cache["sat_list"].pop(0)
            sys = sat[0]
            sat_num = int(sat[1:])
            for obs_type, obs, cycle_slip, signal_strength in zip(
                    self.meta["obstypes"], cache["obs_values"],
                    cache["cycle_slip"], cache["signal_strength"]):
                self.data["obs"][obs_type].append(obs)
                self.data["cycle_slip"][obs_type].append(cycle_slip)
                self.data["signal_strength"][obs_type].append(signal_strength)
            del cache["obs_values"]
            del cache["cycle_slip"]
            del cache["signal_strength"]

            self.data.setdefault("time", list()).append(cache["obs_time"])
            self.data.setdefault("epoch_flag",
                                 list()).append(cache["epoch_flag"])
            self.data.setdefault("rcv_clk_offset",
                                 list()).append(cache["rcv_clk_offset"])

            obs = {
                "station":
                self.meta["marker_name"].lower(),  # vars['station'],
                "site_id": self.meta["marker_name"].upper(),
                "system": sys,
                "satellite": sat,
                "satnum": sat_num,
            }
            for field, value in obs.items():
                self.data.setdefault("text",
                                     dict()).setdefault(field,
                                                        list()).append(value)
示例#26
0
def _plot_position_error(
    dfs_day: Dict[str, pd.core.frame.DataFrame],
    dfs_month: Dict[str, pd.core.frame.DataFrame],
    figure_dir: PosixPath,
    file_vars: Dict[str, Any],
) -> None:
    """Plot horizontal and vertical position error plots 

    Args:
        dfs_day:    Dictionary with function type as keys ('mean', 'percentile', 'rms', 'std') and a
                    dictionary as values. The dictionary has fields as keys (e.g. hpe, vpe) and the 
                    belonging dataframe as value with DAILY samples of 95th percentile and stations as
                    columns.
        dfs_month   Dictionary with function type as keys ('mean', 'percentile', 'rms', 'std') and a
                    dictionary as values. The dictionary has fields as keys (e.g. hpe, vpe) and the
                    belonging dataframe as value with MONTHLY samples of 95th percentile and stations as
                    columns.
       figure_dir:  Figure directory
    """

    ylabel_def = {
        "mean": "MEAN",
        "percentile": "95%",
        "rms": "RMS",
        "std": "STD",
    }

    opt_args = {
        "colormap": "tab20",
        "figsize": (7, 3),
        # "grid": True,
        "marker": "o",
        "markersize": "4",
        "linestyle": "solid",
        "plot_to": "file",
        "plot_type": "plot",
        # "statistic": ["rms", "mean", "std", "min", "max", "percentile"], #TODO: Is only shown for data, which are plotted at last.
        "title": config.tech.gnss_comparison_report.title.str.upper(),
    }

    colors = (config.tech.gnss_comparison_report.colors.list
              if config.tech.gnss_comparison_report.colors.list else
              ["orange", "red", "violet", "blue", "green"])

    colors = (config.tech.gnss_comparison_report.colors.list
              if config.tech.gnss_comparison_report.colors.list else
              ["orange", "red", "violet", "blue", "green"])

    # Loop over statistical solutions
    for type_ in dfs_day.keys():

        # Get used samples
        samples = dict()
        for sample in config.tech.gnss_comparison_report.samples.list:
            if "daily" == sample:
                samples["daily"] = dfs_day[type_]
            elif "monthly" == sample:
                samples["monthly"] = dfs_month[type_]
            else:
                log.fatal(
                    f"Sample '{sample}' is not defined. Only 'daily' and/or 'monthly' can be chosen as sample."
                )

        # Loop over sampled data
        for sample, sample_data in samples.items():

            # Loop over fields to plot
            for field in [
                    "east", "north", "up", "hpe", "vpe", "pos_3d", "pdop",
                    "hdop", "vdop"
            ]:

                # Get y-range limits
                if field == "hpe":
                    ylim = config.tech.gnss_comparison_report.ylim_hpe.list
                elif field == "vpe":
                    ylim = config.tech.gnss_comparison_report.ylim_vpe.list
                elif field == "pos_3d":
                    ylim = config.tech.gnss_comparison_report.ylim_pos_3d.list
                else:
                    ylim = config.tech.gnss_comparison_report.ylim.list

                opt_args["ylim"] = [float(ylim[0]),
                                    float(ylim[1])] if ylim else ylim

                # Generate x- and y-arrays for plotting
                x_arrays = []
                y_arrays = []
                labels = []

                for station in sample_data[field].columns:
                    #if sample == "monthly":
                    #    opt_args.update({"xlim": "auto", "ylim": "auto"})
                    x_arrays.append(list(sample_data[field].index))
                    y_arrays.append(list(sample_data[field][station]))
                    labels.append(station.upper())

                # Generate plot
                plot(
                    x_arrays=x_arrays,
                    y_arrays=y_arrays,
                    xlabel="Time [GPS]",
                    ylabel=f"3D {ylabel_def[type_]}" if field == "pos_3d" else
                    f"{field.upper()} {ylabel_def[type_]}",
                    y_unit="m",
                    labels=labels,
                    colors=colors,
                    figure_path=figure_dir /
                    f"plot_{type_}_{field}_{sample}_{file_vars['date']}_{file_vars['solution'].lower()}.{FIGURE_FORMAT}",
                    opt_args=opt_args,
                )
示例#27
0
文件: atm_tides.py 项目: uasau/where
def atmospheric_tides_station(dset):
    """Calculate the atmospheric tides corrections for a station

    Atmospheric tides corrections are returned in meters in the Geocentric Celestial Reference System for each
    observation.

    Args:
        dset:        A Dataset containing model data

    Returns:
        Numpy array with atmospheric tide corrections in meters.

    """
    coeff = apriori.get("atmospheric_tides")
    use_cmc = config.tech.atmospheric_tides_cmc.bool

    # S1 has a period of 1 cycle/day, S2 has a period of 2 cycle/day
    omega_1 = 2 * np.pi
    omega_2 = 4 * np.pi

    # Time argument is fraction of UT1 day, see [2].
    t = dset.time.ut1.jd_frac
    lat, lon, _ = dset.site_pos.pos.llh.T

    # Equation 7.19a and 7.19b from IERS Conventions 2010
    de = (coeff["A_d1_e"](lon, lat, grid=False) * np.cos(omega_1 * t) +
          coeff["B_d1_e"](lon, lat, grid=False) * np.sin(omega_1 * t) +
          coeff["A_d2_e"](lon, lat, grid=False) * np.cos(omega_2 * t) +
          coeff["B_d2_e"](lon, lat, grid=False) * np.sin(omega_2 * t))
    dn = (coeff["A_d1_n"](lon, lat, grid=False) * np.cos(omega_1 * t) +
          coeff["B_d1_n"](lon, lat, grid=False) * np.sin(omega_1 * t) +
          coeff["A_d2_n"](lon, lat, grid=False) * np.cos(omega_2 * t) +
          coeff["B_d2_n"](lon, lat, grid=False) * np.sin(omega_2 * t))
    du = (coeff["A_d1_u"](lon, lat, grid=False) * np.cos(omega_1 * t) +
          coeff["B_d1_u"](lon, lat, grid=False) * np.sin(omega_1 * t) +
          coeff["A_d2_u"](lon, lat, grid=False) * np.cos(omega_2 * t) +
          coeff["B_d2_u"](lon, lat, grid=False) * np.sin(omega_2 * t))
    denu = np.vstack([de, dn, du]).T * Unit.mm2m

    if position.is_position(dset.site_pos):
        pos_correction = position.PositionDelta(denu,
                                                system="enu",
                                                ref_pos=dset.site_pos,
                                                time=dset.time)
    elif position.is_posvel(dset.site_pos):
        # set velocity to zero
        denu = np.concatenate((denu, np.zeros(denu.shape)), axis=1)
        pos_correction = position.PosVelDelta(denu,
                                              system="enu",
                                              ref_pos=dset.site_pos,
                                              time=dset.time)
    else:
        log.fatal(
            f"dset.site_pos{dset.default_field_suffix} is not a PositionArray or PosVelArray."
        )

    # Add center of mass corrections
    if use_cmc:
        # Equation (7.20) in [1]
        coeff_cmc = apriori.get("atmospheric_tides_cmc")
        cmc = (coeff_cmc["A1"][None, :] * np.cos(omega_1 * t)[:, None] +
               coeff_cmc["B1"][None, :] * np.sin(omega_1 * t)[:, None] +
               coeff_cmc["A2"][None, :] * np.cos(omega_2 * t)[:, None] +
               coeff_cmc["B2"][None, :] * np.sin(omega_2 * t)[:, None])
        if position.is_position(dset.site_pos):
            cmc_correction = position.PositionDelta(cmc,
                                                    system="trs",
                                                    ref_pos=dset.site_pos,
                                                    time=dset.time)
        elif position.is_posvel(dset.site_pos):
            # set velocity to zero
            cmc = np.concatenate((cmc, np.zeros(cmc.shape)), axis=1)
            cmc_correction = position.PosVelDelta(cmc,
                                                  system="trs",
                                                  ref_pos=dset.site_pos,
                                                  time=dset.time)
        pos_correction = pos_correction.trs + cmc_correction.trs

    return pos_correction.gcrs
示例#28
0
def _plot_bar_dataframe_columns(fid,
                                figure_dir,
                                df,
                                field,
                                extra_row_names=None,
                                column="rms"):
    """Generate bar plot of given dataframe columns (colored and ordered by satellite type)

    Args:
       fid (_io.TextIOWrapper): File object.
       figure_dir (PosixPath):  Figure directory.
       df (DataFrame):          Dataframe with data to plot.
       field (str):             Dataset field to plot.
       extra_row_names (list):  List of extra rows removed from the dataframe.
       column (str):            Dataframe column to plot.
    """
    fontsize = 12

    if extra_row_names:
        df_reduced = df.drop(extra_row_names)  # Remove extra rows
    else:
        df_reduced = df

    # Assign to each satellite type a color
    colors = dict()
    # TODO: Better handling of color definition?
    # color_def = ['cornflowerblue', 'firebrick', 'violet', 'gold', 'limegreen', 'deepskyblue', 'orangered']
    color_def = [
        "red",
        "tomato",
        "lightsalmon",
        "navy",
        "mediumblue",
        "blue",
        "royalblue",
        "deepskyblue",
        "paleturquoise",
    ]
    # color_def = ['C'+str(idx) for idx in range(0,10)]
    if len(color_def) < len(set(df_reduced.type)):
        log.fatal(
            f"Not enough colours defined for number of satellite types (#num: {len(set(df_reduced.type))})."
        )
    for type_ in sorted(set(df_reduced.type)):
        colors.update({type_: color_def.pop()})

    # Generate bar plot
    df_color = df_reduced["type"].apply(lambda x: colors[x])
    fig_width = len(df_reduced.index) / 4 if len(
        df_reduced.index) > 30 else 6.4
    ax = df_reduced[column].plot(kind="bar",
                                 color=df_color,
                                 width=0.8,
                                 figsize=(fig_width, fig_width / 1.33))
    ax.set_xlabel("Satellite", fontsize=fontsize)
    ax.set_ylabel(f"{field.upper()} {column.upper()} [m]", fontsize=fontsize)

    # Make legend
    satellite_type_patch = [
        mpatches.Patch(color=v, label=k) for k, v in sorted(colors.items())
    ]
    ax.legend(handles=satellite_type_patch,
              bbox_to_anchor=(1.04, 1),
              loc=2,
              borderaxespad=0.,
              ncol=1)

    plt.tight_layout()
    plt.savefig(figure_dir / f"plot_bar_{field}_{column}.{FIGURE_FORMAT}",
                dpi=FIGURE_DPI)
    plt.clf()  # clear the current figure

    fid.write(
        f"![{field.upper()} {column.upper()} for all satellites sorted by satellite type]({figure_dir}/plot_bar_{field}_{column}.{FIGURE_FORMAT})\n"
    )
    fid.write("\n\\clearpage\n\n")
示例#29
0
def run(rundate, pipeline, session=""):
    """Run a Where pipeline for a given date and session

    Args:
        rundate:   Rundate of analysis.
        pipeline:  Pipeline used for analysis.
        session:   Session in analysis.
    """
    if not setup.has_config(rundate, pipeline, session):
        log.fatal(
            f"No configuration found for {pipeline.upper()} {session} {rundate.strftime(config.FMT_date)}"
        )

    # Set up session config
    config.init(rundate=rundate, tech_name=pipeline, session=session)

    # Set up prefix for console logger and start file logger
    log_cfg = config.where.log
    prefix = f"{pipeline.upper()} {session} {rundate:%Y-%m-%d}"
    log.init(log_level=log_cfg.default_level.str, prefix=prefix)
    if log_cfg.log_to_file.bool:
        log.file_init(
            file_path=files.path("log"),
            log_level=log_cfg.default_level.str,
            prefix=prefix,
            rotation=log_cfg.number_of_log_backups.int,
        )

    # Read which stages to skip from technique configuration file.
    skip_stages = config.tech.get("skip_stages", default="").list

    # Register filekey suffix
    filekey_suffix = config.tech.filekey_suffix.list
    if filekey_suffix:
        config.files.profiles = filekey_suffix

    # Find which stages we will run analysis for
    # TODO: Specify stage_list in config
    stage_list = [s for s in stages(pipeline) if s not in skip_stages]

    # Start file logging and reporting
    reports.report.init(sessions=[session])
    reports.report.start_session(session)
    reports.report.text("header", session.replace("_", " ").title())

    # Update analysis config and file variables
    config.set_analysis(rundate=rundate,
                        tech=pipeline,
                        analysis=pipeline,
                        session=session)
    config.set_file_vars(file_vars())

    # Log the name of the session
    log.blank()  # Empty line for visual clarity
    log.info(f"Start session {session}")
    session_timer = timer(f"Finish session {session} in")
    session_timer.start()

    # Run stages, keep track of previous stage
    dset = None
    dep_fast = config.where.files.dependencies_fast.bool
    for prev_stage, stage in zip([None] + stage_list, stage_list):

        # Skip stages where no dependencies have changed
        dep_path = files.path("depends", file_vars=dict(stage=stage))
        if not (dependencies.changed(dep_path, fast_check=dep_fast)
                or util.check_options("-F", "--force")):
            log.info(
                f"Not necessary to run {stage} for {pipeline.upper()} {rundate.strftime(config.FMT_date)}"
            )
            continue
        elif dset is None:
            # Create or read dataset
            empty = stage == stage_list[0]
            dset = dataset.Dataset(rundate,
                                   tech=pipeline,
                                   stage=prev_stage,
                                   dataset_name=session,
                                   dataset_id="last",
                                   empty=empty)

        # Report on the stage
        reports.report.start_section(stage)
        reports.report.text("header", stage.replace("_", " ").title())
        if prev_stage:
            log.blank()  # Empty line for visual clarity

        # Set up dependencies. Add dependencies to previous stage and config file
        dependencies.init(dep_path, fast_check=dep_fast)
        dependencies.add(files.path("depends",
                                    file_vars=dict(stage=prev_stage)),
                         label="depends")
        dependencies.add(*config.tech.sources, label="config")

        # Delete old datasets for this stage
        dset.delete_from_file(stage=stage, dataset_id="all")

        # Call the current stage. Skip rest of stages if current stage returns False (compare with is since by
        # default stages return None)
        plugins.call(package_name=__name__,
                     plugin_name=pipeline,
                     part=stage,
                     stage=stage,
                     dset=dset,
                     plugin_logger=log.info)
        dependencies.write()
        if dset.num_obs == 0:
            log.warn(
                f"No observations in dataset after {stage} stage. Exiting pipeline"
            )
            break
    else:  # Only done if loop does not break (all stages finish normally)
        # Publish files for session
        files.publish_files()

    session_timer.end()

    # Store configuration to library
    setup.store_config_to_library(rundate, pipeline, session)

    # Write reports specified in config
    reports.write(rundate, pipeline)

    # Write requirements to file for reproducibility
    util.write_requirements()
示例#30
0
文件: compare.py 项目: uasau/where
def main(date: "datedoy", tech: "pipeline", items: "option",
         specifier: "option"):
    log.init(log_level="info")
    dsets = dict()

    # Additional options
    stage = util.read_option_value("--stage")
    writer_names = util.read_option_value("--writers").replace(",",
                                                               " ").split()
    items_ = [s.strip() for s in items.split(",")]

    # Get optional options
    dataset_id = util.read_option_value("--dset_id", default="last")
    dataset_id = "last" if dataset_id == "last" else int(dataset_id)
    dataset_name = util.read_option_value("--dset_name", default="")
    session = util.read_option_value("--session", default="")
    id_ = "-" + util.read_option_value(
        "--id", default="") if util.read_option_value("--id",
                                                      default="") else ""

    # Read datasets for given specifier
    if specifier == "id":
        for id_ in items_:
            dset = data.Dataset(rundate=date,
                                tech=tech,
                                stage=stage,
                                dataset_name=dataset_name,
                                dataset_id=dataset_id,
                                id="-" + id_)
            if dset.num_obs == 0:
                log.warn(f"Dataset '{id_}' is empty.")
                continue
            dsets.update({id_: dset})

    elif specifier == "session":
        for session in items_:
            dset = data.Dataset(rundate=date,
                                tech=tech,
                                stage=stage,
                                dataset_name=session,
                                dataset_id=dataset_id,
                                id=id_)
            if dset.num_obs == 0:
                log.warn(f"Dataset '{session}' is empty.")
                continue
            dsets.update({session: dset})

    elif specifier == "stage":
        for stage in items_:
            dset = data.Dataset(rundate=date,
                                tech=tech,
                                stage=stage,
                                dataset_name=dataset_name,
                                dataset_id=dataset_id,
                                id=id_)
            if dset.num_obs == 0:
                log.warn(f"Dataset '{stage}' is empty.")
                continue
            dsets.update({stage: dset})
    else:
        log.fatal(
            f"Specifier {specifier} is not defined. It should be either 'id', 'session' or 'stage'."
        )

    if len(dsets) == 0:
        log.fatal(f"All given datasets are empty [{', '.join(dsets.keys())}].")
    elif len(dsets) == 1:
        log.warn(
            f"Nothing to compare. Only dataset '{list(dsets.keys())[0]}' is available."
        )

    # Loop over writers
    for writer in writer_names:
        write(writer, dset=dsets)