Exemplo n.º 1
0
    def _read_history(
            self) -> Dict[Tuple[datetime, datetime], "ReceiverSinex"]:
        """Read receiver site history from SINEX file

        Returns:
            Dictionary with (date_from, date_to) tuple as key. The values are ReceiverSinex objects.
        """
        if self.source_path is None:
            log.fatal("No SINEX file path is defined.")

        # Find site_id and read antenna history
        p = parsers.parse_file("gnss_sinex_igs", file_path=self.source_path)
        data = p.as_dict()
        if self.station in data:
            raw_info = data[self.station]["site_receiver"]
        elif self.station.upper() in data:
            raw_info = data[self.station.upper()]["site_receiver"]
        else:
            raise ValueError(
                f"Station {self.station!r} unknown in source '{self.source_path}'."
            )

        # Create list of receiver history
        history = dict()
        for receiver_info in raw_info:
            receiver = ReceiverSinex(self.station, receiver_info)
            interval = (receiver.date_installed, receiver.date_removed)
            history[interval] = receiver

        return history
Exemplo n.º 2
0
    def _read_history(self) -> Dict[Tuple[datetime, datetime], "AntennaSinex"]:
        """Read antenna site history from SINEX file

        Returns:
            Dictionary with (date_from, date_to) tuple as key. The values are AntennaSinex objects.
        """
        if self.source_path is None:
            log.fatal("No SINEX file path is defined.")

        # Find site_id and read antenna history
        p = parsers.parse_file("gnss_sinex_igs", file_path=self.source_path)
        data = p.as_dict()
        try:
            if self.station in data:
                raw_info = data[self.station]["site_antenna"]
            elif self.station.upper() in data:
                raw_info = data[self.station.upper()]["site_antenna"]
        except KeyError:
            raise ValueError(
                f"Station {self.station!r} unknown in source {self.source!r}.")

        # Create list of antenna history
        history = dict()
        for antenna_info in raw_info:
            antenna = AntennaSinex(self.station, antenna_info)
            interval = (antenna.date_installed, antenna.date_removed)
            history[interval] = antenna

        return history
Exemplo n.º 3
0
def not_implemented() -> None:
    """A placeholder for functions that are not implemented yet

    A note about the missing implementation is written to the log.
    """
    caller = sys._getframe(1)
    funcname = caller.f_code.co_name
    args = ", ".join([k + "=" + str(v) for k, v in caller.f_locals.items()])
    filename = caller.f_code.co_filename
    lineno = caller.f_lineno
    log.fatal(
        f"Function {funcname}({args}) is not implemented in {filename}, line {lineno}"
    )
Exemplo n.º 4
0
    def as_dict(self) -> Dict[str, Any]:
        """Return the parsed data as a dictionary

        Returns:
            Dictionary with following entries:
            

           | Key        | Type              | Description                                                  |
           |------------|-------------------|--------------------------------------------------------------|
           | data       | numpy.ndarray     | Grid data of dimension (longitude x latitude)                |
           | latitude   | numpy.ndarray     | Latitude values of grid in degree                            |
           | longitude  | numpy.ndarray     | Longitude values of grid in degree                           |
           
           If no data are available an empty dictionary is returned.
        """
        if not self.data:
            return dict()

        num_grid_lon = int(
            round((self.meta["grid_lon_max"] - self.meta["grid_lon_min"]) /
                  self.meta["grid_increment_lon"], 1) + 1)
        num_grid_lat = int(
            round((self.meta["grid_lat_max"] - self.meta["grid_lat_min"]) /
                  self.meta["grid_increment_lat"], 1) + 1)

        lon = np.linspace(
            self.meta["grid_lon_min"],
            self.meta["grid_lon_max"],
            num_grid_lon,
            endpoint=True,
        )
        lat = np.linspace(
            self.meta["grid_lat_max"],
            self.meta["grid_lat_min"],
            num_grid_lat,
            endpoint=True,
        )

        if len(self.data["griddata"]) != num_grid_lon * num_grid_lat:
            log.fatal("Wrong dimensions.")

        data = np.array(self.data["griddata"]).astype(float)
        #import IPython; IPython.embed()
        data = data.reshape(num_grid_lat, num_grid_lon)

        return dict(
            longitude=lon,
            latitude=lat,
            data=data,
        )
Exemplo n.º 5
0
def get_rinex_file_version(file_path: pathlib.PosixPath) -> str:
    """ Get RINEX file version for a given file path

    Args:
        file_path:  File path.
        
    Returns:
        RINEX file version
    """
    with files.open(file_path, mode="rt") as infile:
        try:
            version = infile.readline().split()[0]
        except IndexError:
            log.fatal(f"Could not find Rinex version in file {file_path}")

    return version
Exemplo n.º 6
0
def get_rinex2_or_rinex3(file_path: pathlib.PosixPath) -> "TODO":
    """Use either Rinex2NavParser or Rinex3NavParser for reading orbit files in format 2.11 or 3.03.

    Firstly the RINEX file version is read. Based on the read version number it is decided, which Parser should be
    used.

    Args:
        file_path (pathlib.PosixPath):  File path to broadcast orbit file.
    """
    version = gnss.get_rinex_file_version(file_path=file_path)
    if version.startswith("2"):
        parser_name = "rinex212_nav" if version == "2.12" else "rinex2_nav"
    elif version.startswith("3"):
        parser_name = "rinex3_nav"
    else:
        log.fatal(f"Unknown RINEX format {version} is used in file {file_path}")

    return parsers.parse_file(parser_name=parser_name, file_path=file_path, use_cache=True)
Exemplo n.º 7
0
def obstype_to_freq(sys: str, obstype: str) -> float:
    """Get GNSS frequency based on given GNSS observation type

    Args:
        sys:     GNSS identifier (e.g. 'E', 'G', ...)
        obstype: Observation type (e.g. 'L1', 'P1', 'C1X', ...)

    Return:
        GNSS frequency in [Hz]
    """
    try:
        freq = getattr(enums, "gnss_freq_" + sys)[getattr(
            enums, "gnss_num2freq_" + sys)["f" + obstype[1]]]
    except KeyError:
        log.fatal(
            f"Frequency for GNSS '{sys}' and observation type '{obstype}' is not defined."
        )

    return freq
Exemplo n.º 8
0
    def parse_data_handling(self, data):
        for d in data:
            start_time = datetime.min if d["start_time"] is None else d[
                "start_time"]
            end_time = datetime.max if d["end_time"] is None else d["end_time"]
            interval = (start_time, end_time)
            info = {"unit": d["unit"]}

            if d["e_value"]:
                try:
                    e_value = float(d["e_value"])
                except ValueError:
                    log.fatal(
                        "ILRS Data handling: Not able to convert value to float"
                    )
                info.update({"e_value": e_value})
            if d["std_dev"]:
                try:
                    std_dev = float(d["std_dev"])
                except ValueError:
                    log.fatal(
                        "ILRS Data handling: Not able to convert value to float"
                    )
                info.update({"std_dev": std_dev})

            # Unfortunately we have to deal with two different line formats.
            # Split the comments field in the second line format:
            # *CODE PT_ UNIT T _DATA_START_ __DATA_END__ M __E-VALUE___ STD_DEV ___COMMENTS______
            # *CODE PT_ UNIT T _DATA_START_ __DATA_END__ M __E-VALUE___ STD_DEV _E-RATE__ _CMNTS_
            try:
                info.update({
                    "comments": d["comments2"],
                    "e_rate": float(d["comments1"])
                })
            except ValueError:
                info.update({"comments": d["comments1"] + d["comments2"]})

            self.data.setdefault(d["site_code"],
                                 {}).setdefault(d["handling_code"], []).append(
                                     (interval, info))
Exemplo n.º 9
0
    def _time_system_correction(self) -> None:
        """Apply correction to given time system for getting GPS or UTC time scale

        Following relationship are given between GNSS time scale (either BeiDou, Galileo, IRNSS or QZSS)
        :math:`t_{GNSS}` and GPS time scale :math:`t_{GPS}` (see Section 2.1.4 in :cite:`teunissen2017`):
        .. math::
              t_{GPS}  = t_{GNSS} + \Delta t

        The time offset :math:`\Delta t` is 0 s for Galileo, IRNSS and QZSS and for BeiDou 14 s. All these time scales
        are related to the International Atomic Time (TAI) by a certain time offset. An exception is the GLONASS time
        scale, which is related to UTC:
        .. math::
              t_{UTC}  = t_{GLONASS} - 3h

        Note, that in the RINEX format (see section 8.2 in :cite:`rinex2`) GLONASS time has the same hours as UTC and
        not UTC + 3h as the original GLONASS system time, which is given in the Moscow time zone instead of Greenwich.

        In this routine the given observation time (epoch) will be transformed to GPS time scale for BeiDou, Galileo,
        QZSS and IRNSS and to UTC time scale for GLONASS.
        """
        system = self.meta["time_sys"]
        valid_time_systems = ["BDT", "GAL", "GPS", "GLO", "IRN", "QZS"]

        if system not in valid_time_systems:
            log.fatal(
                f"Time system {system!r} in file {self.file_path} is not handled in Where. "
                f"The following time systems can be used: {', '.join(valid_time_systems)}"
            )

        # Convert observation time entries of BeiDou to GPS time scale by adding system time offset
        if system == "BDT":
            self.data["time"] = [
                dateutil.parser.parse(t) + timedelta(
                    seconds=SYSTEM_TIME_OFFSET_TO_GPS_TIME.get(system, 0))
                for t in self.data["time"]
            ]

        # Change time scale to UTC for GLONASS
        elif system == "GLO":
            self.time_scale = "utc"
Exemplo n.º 10
0
    def _determine_pseudorange(self) -> None:
        """Determine pseudorange based on ION 2016 tutorial "Raw GNSS Measurements from Android Phones".
        """

        # Determine GPS week
        week = np.floor(-np.array(self.data["FullBiasNanos"]) *
                        Unit.nanosecond2second / 604800)

        # GNSS signal arriving time at measurement time (GPS time) referenced to GPS week
        tRxNanos = ((np.array(self.data["TimeNanos"], dtype=float) +
                     np.array(self.data["TimeOffsetNanos"], dtype=float)) -
                    (np.array(self.data["FullBiasNanos"], dtype=float) +
                     np.array(self.data["BiasNanos"], dtype=float)) -
                    (week * 604800e9))

        if np.all(tRxNanos >= 604800e9):
            log.fatal("tRxNanos should be <= GPS nanoseconds.")
        if np.all(tRxNanos <= 0.0):
            log.fatal("tRxNanos should be >= 0.")

        self.data["week"] = week
        self.data["tRxNanos"] = tRxNanos
        self.data["time"] = Time(val=week,
                                 val2=tRxNanos * Unit.nanosecond2second,
                                 fmt="gps_ws",
                                 scale="gps")

        # GNSS satellite transmission time at measurement time (GPS time) referenced to GPS week
        tTxNanos = np.array(self.data["ReceivedSvTimeNanos"], dtype=float)

        self.data["sat_time"] = Time(val=week,
                                     val2=tTxNanos * Unit.nanosecond2second,
                                     fmt="gps_ws",
                                     scale="gps")
        # TODO: Check GPS week rollover (see ProcessGnssMeas.m)

        self.data["pseudorange"] = (
            tRxNanos -
            tTxNanos) * Unit.nanosecond2second * constant.c  # in meters
Exemplo n.º 11
0
    def _parse_time_of_last_obs(self, line: Dict[str, str],
                                _: Dict[str, Any]) -> None:
        """Parse time of last observation given in RINEX header to instance variable `meta`.
        """
        if line["time_sys"] != "GPS":
            log.fatal(
                f"Time system {line['time_sys']} is not handled so far in Where"
            )

        if line["time_sys"]:
            self.meta["time_sys"] = line["time_sys"]

        if line["year"]:
            self.meta["time_last_obs"] = (
                "{year}-{month:02d}-{day:02d}T{hour:02d}:{minute:02d}:{second:010.7f}"
                "".format(
                    year=int(line["year"]),
                    month=int(line["month"]),
                    day=int(line["day"]),
                    hour=int(line["hour"]),
                    minute=int(line["minute"]),
                    second=float(line["second"]),
                ))
Exemplo n.º 12
0
    def as_dataset(self) -> "Dataset":
        """Return the parsed data as a Dataset

        Returns:
            Midgard Dataset where timeseries data are stored with following fields:

    
           | Field                 | Type              | Description                                                  |
           |-----------------------|-------------------|--------------------------------------------------------------|
           | amplitude             | numpy.array       | Amplitude                                                    |
           | azimuth               | numpy.array       | Azimuth in [rad]                                             |
           | frequency             | numpy.array       | GNSS frequency identifier                                    |
           | peak2noise            | numpy.array       | Peak to noise                                                |
           | satellite             | numpy.array       | Satellite number                                             |
           | reflection_height     | numpy.array       | Reflection height in [m]                                     |
           | time                  | Time              | Time                                                         |
               
        """

        freq_def = {
            1: "L1",  # G
            2: "L2",  # G
            5: "L5",  # G
            20: "L2C",  # G
            101: "L1",  # R
            102: "L2",  # R
            201: "E1",  # E 
            205: "E5a",  # E
            206: "E6",  # E
            207: "E5b",  # E
            208: "E5",  # E
            302: "B1_2",  # C
            306: "B3",  # C
            307: "B2b",  # C
        }

        float_fields = {
            "amplitude": None,
            "azimuth": "radian",
            "peak2noise": None,
            "reflection_height": "meter",
        }

        # Initialize dataset
        dset = dataset.Dataset()
        if not self.data:
            log.warn("No data in {self.file_path}.")
            return dset
        dset.num_obs = len(self.data["time"])

        # Add text fields
        satellite = list()
        system = list()
        for sat in self.data["satellite"]:
            if sat >= 1 and sat < 100:  # GPS satellites
                system.append("G")
                satellite.append("G" + str(int(sat)).zfill(2))
            elif sat >= 101 and sat < 200:  # GLONASS satellites
                system.append("R")
                satellite.append("R" + str(int(sat))[1:3])
            elif sat >= 201 and sat < 300:  # Galileo satellites
                system.append("E")
                satellite.append("E" + str(int(sat))[1:3])
            elif sat >= 301 and sat < 400:  # BeiDou satellites
                system.append("C")
                satellite.append("C" + str(int(sat))[1:3])
            else:
                log.fatal(
                    "GNSSREFL satellite number {sat} is not defined. Valid satellite numbers are between [1-399]."
                )

        dset.add_text(
            name="system",
            val=system,
            write_level="operational",
        )

        dset.add_text(
            name="satellite",
            val=satellite,
            write_level="operational",
        )

        dset.add_text(
            name="frequency",
            val=[freq_def[v] for v in self.data["frequency"]],
            write_level="operational",
        )

        # Add time field
        dset.add_time(
            name="time",
            val=self.data["time"],
            scale="utc",
            fmt="datetime",
            write_level="operational",
        )

        # Add float fields
        for field in float_fields.keys():
            if field not in self.data.keys():
                log.warn(
                    f"Field '{field}' does not exist in file {self.meta['__data_path__']}."
                )
                continue

            value = np.deg2rad(
                self.data[field]) if field == "azimuth" else self.data[field]
            unit = "" if float_fields[field] is None else float_fields[field]

            dset.add_float(name=field,
                           val=value,
                           unit=unit,
                           write_level="operational")

        return dset
Exemplo n.º 13
0
    def as_dataset(self) -> "Dataset":
        """Return the parsed data as a Dataset

        Returns:
            A dataset containing the data.
        """
        # Spring constellation definition
        system_def = {
            "0": "",  # Unknown
            "1": "G",  # GPS
            "2": "R",  # GLONASS
            "3": "S",  # SBAS
            "4": "E",  # Galileo
            "5": "C",  # BeiDou
            "6": "J",  # QZSS
        }

        field_spring_to_where = {
            "3DSpeed": "site_vel_3d",
            "Clock": "delay.gnss_satellite_clock",
            "EastSpeed": "site_vel_east",
            "GroupDelay": "delay.gnss_total_group_delay",
            "HSpeed": "site_vel_h",
            "IODE": "used_iode",
            "NorthSpeed": "site_vel_north",
            "PseudoRange": "delay.gnss_range",
            "SatInView": "num_satellite_available",
            "TropoDelay": "troposphere_dT",
            "UISD": "delay.gnss_ionosphere",
            "UsedSat": "num_satellite_used",
            "EastvsRef": "site_pos_vs_ref_east",
            "NorthvsRef": "site_pos_vs_ref_north",
            "VerticalvsRef": "site_pos_vs_ref_up",
            "VerticalSpeed": "site_vel_up",
            "XSpeed": "site_vel_x",
            "YSpeed": "site_vel_y",
            "ZSpeed": "site_vel_z",
        }

        # Initialize dataset
        dset = dataset.Dataset()
        if not self.data:
            log.warn("No data in {self.file_path}.")
            return dset
        dset.num_obs = len(self.data["GPSEpoch"])

        # Add time
        dset.add_time(
            "time",
            val=[
                dateutil.parser.parse(v.replace("UTC", ""))
                for v in self.data["UTCDateTime"]
            ],
            scale="utc",
            fmt="datetime",
            write_level="operational",
        )

        # Add system field based on Constellation column
        if "Constellation" in self.data.keys():
            dset.add_text("system",
                          val=[
                              system_def[str(value)]
                              for value in self.data["Constellation"]
                          ])

        # Add satellite field based on PRN column
        if "PRN" in self.data.keys():
            prn_data = []
            for prn in self.data["PRN"]:
                if prn >= 1 and prn <= 32:  # Handling of GPS satellites
                    prn_data.append("G" + str(prn).zfill(2))
                elif prn >= 38 and prn <= 70:  # Handling of GLONASS satellites
                    prn_data.append("R" + str(prn - 38).zfill(2))
                elif prn >= 71 and prn <= 140:  # Handling of Galileo satellites
                    prn_data.append("E" + str(prn - 70).zfill(2))
                elif prn >= 191 and prn <= 222:  # Handling of BeiDou satellites
                    prn_data.append("C" + str(prn - 191).zfill(2))
                else:
                    log.fatal(f"Spring PRN number '{prn}' is unknown.")

            dset.add_text("satellite", val=prn_data)
            dset.add_text("system", np.array(prn_data).astype("U1"))

        # Add position field based on Latitude, Longitude and Height column
        if "Latitude" in self.data.keys():
            pos = Position(
                val=np.vstack((self.data["Latitude"] * Unit.deg2rad,
                               self.data["Longitude"] * Unit.deg2rad,
                               self.data["Height"])).T,
                system="llh",
            )
            if "XPos" in self.data.keys():
                dset.add_position("sat_pos",
                                  val=pos.trs,
                                  system="trs",
                                  time=dset.time)
            else:
                dset.add_position("site_pos",
                                  val=pos.trs,
                                  system="trs",
                                  time=dset.time)

        # Define fields to save in dataset
        remove_time_fields = {
            "Constellation", "GPSEpoch", "GPSWeek", "GPSSecond", "PRN", "",
            "UTCDateTime"
        }
        fields = set(self.data.keys()) - remove_time_fields

        # Add text and float fields
        for field in fields:

            where_fieldname = field_spring_to_where[
                field] if field in field_spring_to_where.keys(
                ) else field.lower()

            if self.data[field].dtype.kind in {
                    "U", "S"
            }:  # Check if numpy type is string
                dset.add_text(where_fieldname, val=self.data[field])
                continue

            dset.add_float(where_fieldname, val=self.data[field])

        return dset
Exemplo n.º 14
0
    def plot_subplots(
        self,
        x_array: np.ndarray,
        y_arrays: List[np.ndarray],
        xlabel: str,
        ylabels: List[str],
        x_unit: str = "",
        y_units: Union[List[str], None] = None,
        colors: Union[List[str], None] = None,
        figure_path: str = "plot_subplot.png",
        subtitles: Union[List[List[str]], None] = None,
        options: Dict[str, Any] = None,
        events: Union[Dict[str, List[Any]], None] = None,
    ) -> None:
        """Generate subplots with one column
    
        The subplot has only one column. The number of rows is defined via the chosen number of y-axis data. Depending
        on the dimension of the y-axis several plots can be plotted in one subplot. For example:
            y_arrays = [np.array([1,2,3,4,5]), ...]     # ndim=1, only one plot plotted
            y_arrays = [ np.array(                      # ndim=2, 2 plots plotted in one subplot
                            [1,2,3,4,5],
                            [6,7,8,9,10]), ...]              
            y_arrays = [ np.array(                      # ndim=2, 3 plots plotted in one subplot
                            [1,2,3,4,5],
                            [6,7,8,9,10],
                            [11,12,13,14,15]), ...]   
                                                                                  
        Following **options** options can be overwritten:
    
        | Option             | Value            | Description                                                             |
        |--------------------|------------------|-------------------------------------------------------------------------|
        | colormap           | <type>           | Color map type for plotting events (e.g. viridis, jet, tab10, rainbow,  |
        |                    |                  | hsv, plasma)                                                            |
        | dpi                | <num>            | Resolution of file in dots per inch                                     |
        | figsize            | (num, num)       | Figure size given by (width, height) in inches                          |
        | fsize_subtitle     | <num>            | Fontsize of subplot title (statistical information)                     |
        | grid               | <True|False>     | Plot grid                                                               |
        | histogram          | <x, y>           | Plot x-axis histogram on top, y-axis histogram on right or for both     |
        |                    |                  | axis on scatter plot                                                    |
        | histogram_binwidth | <num>            | Histogram bin width                                                     |
        | histogram_size     | <num>            | Histogram y-axis size                                                   |
        | legend             | <True|False>     | Plot legend                                                             |
        | legend_location    | <right, bottom>  | Legend location                                                         |
        | legend_ncol        | <num>            | The number of legend columns                                            |
        | marker             | <'.'|'-'>        | Marker type                                                             |
        | plot_to            | <console|file>   | Plot figure on console or file                                          |
        | plot_type          | <scatter|plot>   | Choose either "scatter" or "plot" type                                  |
        | reg_line           | <True|False>     | Regression line flag                                                    |
        | sharex             | <True|False>     | Share x-axis                                                            |
        | sharey             | <True|False>     | Share y-axis                                                            |
        | statistic          | <rms, mean, ...> | Plot statistical information. Following function can be defined: 'max', |
        |                    |                  | 'mean', 'min', 'rms', 'std', 'percentile' (see function _get_statistic  |
        |                    |                  | for more information)                                                   |
        | tick_labelsize     | <(axis, size)>   | Change label size of x- and y-axis tick labels. This can be done either |
        |                    |                  | for x-axis, y-axis or both axis via specifying 'x', 'y' or both'.       |
        | title              | <text>           | Main title of subplots                                                  |
        | xlim               | <[num, num]|     | Define x-axis limit by defining a list with [left, right] range. If     |
        |                    |  auto>]          | xlim=auto, then x-axis limit is automatically chosen                    |
        | xticks             | <[num, ...]>     | Define x-axis ticks by defining a list with ticks                       |
        | xticklabels        | <[text, ...]>    | Define x-axis ticks labels by defining a list with labels               |
        | ylim               | <[num, num]>     | Define y-axis limit by defining a list with [bottom, top] range         |
        | yticks             | <[num, ...]>     | Define y-axis ticks by defining a list with ticks                       |
        | yticklabels        | <[text, ...]>    | Define y-axis ticks labels by defining a list with labels               |
    
        Args:
           x_array:        Array with x-axis data to plot.
           y_arrays:       List of arrays with y-axis data to plot. 
           xlabel:         X-axis label.
           ylabels:        List with y-axis labels. It should corresponds to given number of y-axis arrays.
           x_unit:         X-axis unit.
           y_units:        List with y-axis units. It should corresponds to given number of y-axis arrays. 
           colors:         List with colors for each plot. It should corresponds to given number of y-axis arrays. 
           figure_path:    Figure path.
           options:        Dictionary with options, which overwrite default plot configuration.
           subtitles:      List with title for each subplot. It should corresponds to given number of y-axis arrays.
           events:         Dictionary with event labels as key and lists of events as value. The events has to be related to
                           x-axis data. Event colors are automatically chosen based on 'colormap'. 
        """

        # Overwrite options with argument definition
        if options:
            self.set_options(options)  
    
        # Generate subplot
        fig, axes = plt.subplots(
            nrows=len(y_arrays), 
            ncols=1, 
            sharex=self.options["sharex"], 
            sharey=self.options["sharey"], 
            figsize=self.options["figsize"],
        )
        fig.suptitle(f"{self.options['title']}", y=1.0)
    
        # Get event and label colors
        if events:
            legend_labels, cmap = self._get_label_color(len(events), cmap=self.options["colormap"])
    
        if colors is None:
            if y_arrays[0].ndim == 2:
                colors = np.full((len(y_arrays), y_arrays[0].shape[0]), None)
            else:
                colors =  np.full((1, len(y_arrays)), None)
      
        if y_units is None:
            y_units = [None for ii in range(0, len(y_arrays))]

        if subtitles is None:
            subtitles = [list() for ii in range(0, len(y_arrays))]
    
        # Make 'axes' iterable (needed for 'zip')
        if not isinstance(axes, np.ndarray):
            axes = np.array([axes])
    
        # Plot each subplot row
        for ax, y_array, ylabel, color, y_unit, subtitle in zip(axes, y_arrays, ylabels, colors, y_units, subtitles):

            if y_array.ndim == 2:
                for row in range(0, y_array.shape[0]):
                    self.plot_subplot_row(
                            ax, 
                            x_array, 
                            y_array[row,:], 
                            xlabel, 
                            ylabel, 
                            x_unit=x_unit, 
                            y_unit=y_unit, 
                            color=color[row],
                            subtitle=subtitle,
                            options=self.options, 
                    )                                     

            else:
                self.plot_subplot_row(
                        ax, 
                        x_array, 
                        y_array, 
                        xlabel, 
                        ylabel, 
                        x_unit=x_unit, 
                        y_unit=y_unit, 
                        color=color,
                        subtitle=subtitle,
                        options=self.options, 
                ) 
    
            # Plot vertical line for events in each subplot
            if events:
                for idx, (label, entries) in enumerate(sorted(events.items())):
                    [ax.axvline(x=e, label=label, color=cmap(idx)) for e in entries]
    
        # Change tick labelsize
        if self.options["tick_labelsize"]:
            ax.tick_params(axis=self.options["tick_labelsize"][0], labelsize=self.options["tick_labelsize"][1])
    
        # Plot x-axis label only once below the last subplot row
        ax.set(xlabel=xlabel)
    
        # Plot event legend
        if events:
            self.options["legend_location"] = "bottom" if self.options["legend_location"] == None else self.options["legend_location"]
            self._plot_legend(legend_labels, labels=events.keys())
    
        # Rotates and right aligns the x labels, and moves the bottom of the axes up to make room for them
        if isinstance(x_array[0], datetime):
            fig.autofmt_xdate()
    
        # Automatically adjusts subplot params so that the subplot(s) fits in to the figure area
        fig.tight_layout()
    
        # Adjust plot axes (to place title correctly)
        fig.subplots_adjust(top=0.92)
    
        # Save plot as file or show it on console
        if self.options["plot_to"] == "console":
            plt.show()
        elif self.options["plot_to"] == "file":
            plt.savefig(figure_path, dpi=self.options["dpi"])
        else:
            log.fatal(f"Option <plot_to> is wrong with '{self.options['plot_to']}', expected 'console' or 'file'.")
    
        # Clear the current figure
        plt.clf()
Exemplo n.º 15
0
    def as_dataset(self) -> "Dataset":
        """Store Gipsy time dependent parameter data in a dataset

        Returns:
            Midgard Dataset where time dependent parameter data are stored with following fields:


       | Field               | Type              | Description                                                        |
       |---------------------|-------------------|--------------------------------------------------------------------|
       | receiver_clock      | numpy.ndarray     | Receiver clock parameter                                           |
       | satellite           | numpy.ndarray     | Satellite PRN number together with GNSS identifier (e.g. G07)      |
       | satellite_clock     | numpy.ndarray     | Satellite clock parameter                                          |
       | satellite_ant_pco   | PositionTable     | Satellite antenna phase center offset                              |
       | site_posvel         | PosVel            | Station coordinates and velocities                                 |
       | source_id           | numpy.ndarray     | Source ID                                                          |
       | station             | numpy.ndarray     | Station name list                                                  |
       | system              | numpy.ndarray     | GNSS identifier (e.g. G or E)                                      |
       | time                | Time              | Parameter time given as TimeTable object                           |
       | troposphere_zhd     | numpy.ndarray     | Zenith hydrostatic troposphere delay parameter                     |
       | troposphere_zwd     | numpy.ndarray     | Zenith hydrostatic troposphere delay parameter                     |
       | troposphere_ge      | numpy.ndarray     | Horizontal delay gradient in the East direction                    |
       | troposphere_gn      | numpy.ndarray     | Horizontal delay gradient in the North direction                   |
       
       The fields above are given for 'apriori', 'value' and 'sigma' Dataset collections.
        
        """
        # TODO: Handling of unit. Should be added to dataset fields.

        field = {
            "Clk Bias":
            DatasetField(
                None, None,
                "float"),  # can be either receiver or satellite clock bias
            "Antennas Antenna1 MapCenterOffset All Z":
            DatasetField("satellite_ant_pco", "Satellite", "position"),
            "State Pos Z":
            DatasetField("site_posvel", "Station", "posvel"),
            "Source":
            DatasetField("source_id", "Source", "float"),
            "Trop GradEast":
            DatasetField("troposphere_ge", "Station", "float"),
            "Trop GradNorth":
            DatasetField("troposphere_gn", "Station", "float"),
            "Trop DryZ":
            DatasetField("troposphere_zhd", "Station", "float"),
            "Trop WetZ":
            DatasetField("troposphere_zwd", "Station", "float"),
        }

        not_used_parameter = [
            "Antennas Antenna1 MapCenterOffset All X",
            "Antennas Antenna1 MapCenterOffset All Y",
            "State Pos X",
            "State Pos Y",
            "State Vel X",
            "State Vel Y",
            "State Vel Z",
        ]

        dset = dataset.Dataset(num_obs=len(self.data["time_past_j2000"]))
        dset.meta.update(self.meta)

        # Note: GipsyX uses continuous seconds past Jan. 1, 2000 11:59:47 UTC time format in TDP files. That means,
        #       GipsyX does not follow convention of J2000:
        #           1.01.2000 12:00:00     TT  (TT = GipsyX(t) + 13s)
        #           1.01.2000 11:59:27.816 TAI (TAI = TT - 32.184s)
        #           1.01.2000 11:58:55.816 UTC (UTC = TAI + leap_seconds = TAI - 32s)
        #           1.01.2000 11:59:08.816 GPS (GPS = TAI - 19s)
        #
        #       Therefore Time object initialized with TT time scale has to be corrected about 13 seconds.
        #
        # TODO: Introduce j2000 = 2451545.0 as constant or unit?
        dset.add_time(
            "time",
            val=Time((self.data["time_past_j2000"] + 13.0) * Unit.second2day +
                     2451545.0,
                     scale="tt",
                     fmt="jd").gps,
        )
        keep_idx = np.ones(dset.num_obs, dtype=bool)
        collections = ["apriori", "value", "sigma"]

        # Loop over all existing parameter names
        for name in set(self.data["name"]):
            category, identifier, parameter = name.replace(
                ".", " ").split(maxsplit=2)

            if parameter in not_used_parameter:
                continue

            # Add station and satellite field to Dataset by first occurence
            if "Satellite" in category:
                if "satellite" not in dset.fields:
                    dset.add_text("satellite",
                                  val=np.repeat(None, dset.num_obs))
                    dset.add_text("system", val=np.repeat(None, dset.num_obs))

            if "Station" in category:
                if "station" not in dset.fields:
                    dset.add_text("station",
                                  val=np.repeat(identifier.lower(),
                                                dset.num_obs))

            if "Source" in category:
                idx = name == self.data["name"]

                for collection in collections:
                    field_name = f"{collection}.{field['Source'].name}"
                    dset.add_float(field_name,
                                   val=np.full(dset.num_obs, np.NaN))
                    dset[field_name][idx] = self.data["value"][idx]
                continue

            # Add parameter solution to Dataset
            if parameter in field.keys():

                idx = name == self.data["name"]

                if category == "Satellite":
                    sys = enums.get_value("gnss_3digit_id_to_id",
                                          identifier[0:3])
                    dset.system[idx] = sys
                    dset.satellite[idx] = sys + identifier[3:5]

                # Loop over 'apriori', 'value' and 'sigma' solutions, which are saved in separated Dataset collections
                for collection in collections:
                    field_name = f"{collection}.{field[parameter].name}"
                    log.debug(
                        f"Add dataset field '{field_name}' for parameter '{parameter}' and identifier '{identifier}'."
                    )

                    # Add float fields to Dataset
                    if field[parameter].dtype == "float":

                        # Note: "Clk Bias" parameter exists for receiver and satellite, therefore it has to be
                        #       distinguished based on the length of the 'identifier' (e.g. USNO or GPS64).
                        if parameter == "Clk Bias":
                            field_name = (f"{collection}.satellite_clock"
                                          if len(identifier) == 5 else
                                          f"{collection}.receiver_clock")

                        if field_name not in dset.fields:
                            dset.add_float(field_name,
                                           val=np.full(dset.num_obs, np.NaN))
                            dset[field_name][idx] = self.data[collection][idx]

                    # Add position fields to Dataset
                    elif field[parameter].dtype == "position":

                        if field_name not in dset.fields:
                            dset.add_position(field_name,
                                              time=dset.time,
                                              system="trs",
                                              val=np.full((dset.num_obs, 3),
                                                          np.NaN))

                        # Fill position field with data
                        tmp_sol = dict()

                        for item in [".X", ".Y", ".Z"]:
                            idx_item = name.replace(".Z",
                                                    item) == self.data["name"]
                            tmp_sol[item] = self.data["value"][idx_item]
                            # Note: Only .Z dataset indices are used for saving position field in Dataset. .X and .Y are
                            #       not necessary anymore and are removed from Dataset by using "keep_idx" variable.
                            if not item == ".Z":
                                keep_idx[idx_item] = False

                        dset[field_name][idx] = np.vstack(
                            (tmp_sol[".X"], tmp_sol[".Y"], tmp_sol[".Z"])).T

                    # Add posvel fields to Dataset
                    elif field[parameter].dtype == "posvel":

                        if field_name not in dset.fields:
                            dset.add_posvel(field_name,
                                            time=dset.time,
                                            system="trs",
                                            val=np.full((dset.num_obs, 6),
                                                        np.NaN))

                        # Fill position field with data
                        tmp_sol = dict()
                        for item in [
                                "State.Pos.X",
                                "State.Pos.Y",
                                "State.Pos.Z",
                                "State.Vel.X",
                                "State.Vel.Y",
                                "State.Vel.Z",
                        ]:
                            idx_item = name.replace("State.Pos.Z",
                                                    item) == self.data["name"]
                            tmp_sol[item] = self.data["value"][idx_item]
                            if not item == "State.Pos.Z":
                                keep_idx[idx_item] = False

                        dset[field_name][idx] = np.vstack((
                            tmp_sol["State.Pos.X"],
                            tmp_sol["State.Pos.Y"],
                            tmp_sol["State.Pos.Z"],
                            tmp_sol["State.Vel.X"],
                            tmp_sol["State.Vel.Y"],
                            tmp_sol["State.Vel.Z"],
                        )).T

            else:
                log.fatal(f"Parameter {parameter} is not defined.")

        dset.subset(
            keep_idx)  # Remove unnecessary entries (e.g. '.X' and '.Y' )

        return dset
Exemplo n.º 16
0
    def plot(
        self,
        x_arrays: List[np.ndarray],
        y_arrays: List[np.ndarray],
        xlabel: str = "",
        ylabel: str = "",
        x_unit: str = "",
        y_unit: str = "",
        colors: Union[List[str], None] = None,
        labels: Union[List[str], None] = None,
        figure_path: str = "plot.png",
        options: Dict[str, Any] = None,
        events: Union[Dict[str, List[Any]], None] = None,
    ) -> None:
        """Generate scatter/plot plot
    
        Several scatter/plot plots can be plotted on one plot. This is defined via the chosen number of y_arrays data.
        Histogram is only plotted for the last given y-array in "y_arrays".
        
        Following **options** options can be overwritten:
    
        | Option             | Value            | Description                                                             |
        |--------------------|------------------|-------------------------------------------------------------------------|
        | colormap           | <type>           | Color map type for plotting either events or labels (e.g. viridis, jet, |
        |                    |                  | tab10, rainbow, hsv, plasma)                                            |
        | dpi                | <num>            | Resolution of file in dots per inch                                     |
        | figsize            | (num, num)       | Figure size given by (width, height) in inches                          |
        | fsize_subtitle     | <num>            | Fontsize of subplot title (statistical information)                     |
        | grid               | <True|False>     | Plot grid                                                               |
        | histogram          | <x, y>           | Plot x-axis histogram on top, y-axis histogram on right or for both     |
        |                    |                  | axis on scatter plot                                                    |
        | histogram_binwidth | <num>            | Histogram bin width                                                     |
        | histogram_size     | <num>            | Histogram y-axis size                                                   |
        | legend             | <True|False>     | Plot legend                                                             |
        | legend_location    | <right, bottom>  | Legend location                                                         |
        | legend_ncol        | <num>            | The number of legend columns                                            |
        | linestyle          | <style>          | Line style for plot type (e.g. 'solid', 'dashed')                       |
        | marker             | <'.'|'-'>        | Marker type                                                             |
        |                    |                  | if in one scatter subplot several plots should be plotted.              |
        | plot_to            | <console|file>   | Plot figure on console or file                                          |
        | plot_type          | <scatter|plot>   | Choose either "scatter" or "plot" type                                  |
        | projection         | <type>           | Projection type of plot (e.g. 'polar')                                  |
        | reg_line           | <True|False>     | Regression line flag                                                    |
        | statistic          | <rms, mean, ...> | Plot statistical information. Following function can be defined: 'max', |
        |                    |                  | 'mean', 'min', 'rms', 'std', 'percentile' (see function _get_statistic  |
        |                    |                  | for more information)                                                   |
        | tick_labelsize     | <(axis, size)>   | Change label size of x- and y-axis tick labels. This can be done either |
        |                    |                  | for x-axis, y-axis or both axis via specifying 'x', 'y' or both'.       |
        | title              | <text>           | Main title of subplots                                                  |
        | xlim               | <[num, num]|     | Define x-axis limit by defining a list with [left, right] range. If     |
        |                    |  auto>]          | xlim=auto, then x-axis limit is automatically chosen                    |
        | xticks             | <[num, ...]>     | Define x-axis ticks by defining a list with ticks                       |
        | xticklabels        | <[text, ...]>    | Define x-axis ticks labels by defining a list with labels               |
        | ylim               | <[num, num]>     | Define y-axis limit by defining a list with [bottom, top] range         |
        | yticks             | <[num, ...]>     | Define y-axis ticks by defining a list with ticks                       |
        | yticklabels        | <[text, ...]>    | Define y-axis ticks labels by defining a list with labels               |
    
        Args:
           x_arrays:       List of arrays with x-axis data to plot.
           y_arrays:       List of arrays with y-axis data to plot.
           xlabel:         X-axis label.
           ylabel:         Y-axis label. 
           x_unit:         X-axis unit.
           y_unit:         Y-axis unit.
           colors:         List with colors for each plot. It should corresponds to given number of y-axis arrays. 
                           Overwrites automatically chosen 'events'/'labels' colors.
           labels:         List with labels for each plot. It should corresponds to given number of y-axis arrays. 
                           Label colors are automatically chosen based on 'colormap'. 'colors' option overwrites 
                           automatically chosen label colors. NOTE: 'labels' and 'events' can not be chosen together, 
                           either 'labels' or 'events' should be defined.
           figure_path:    Figure path.
           options:        Dictionary with options, which overwrite default plot configuration.
           events:         Dictionary with event labels as key and lists of events as value. The events has to be 
                           related to x-axis data. Event colors are automatically chosen based on 'colormap'. NOTE: 
                           'labels' and 'events' can not be chosen together, either 'labels' or 'events' should be 
                           defined.
        """
    
        cmap = None

        # Overwrite options with argument definition
        if options:
            self.set_options(options)    
        original_histogram_option = self.options["histogram"]
    
        # Convert x_arrays, y_arrays to list
        x_arrays = [x_arrays] if not isinstance(x_arrays, list) else x_arrays
        y_arrays = [y_arrays] if not isinstance(y_arrays, list) else y_arrays
    
        # Generate scatter plot by using subplot function
        fig, ax = plt.subplots(
            nrows=1, ncols=1, figsize=self.options["figsize"], subplot_kw={"projection": self.options["projection"]}
        )
        fig.suptitle(f"{self.options['title']}", y=1.0)
    
        # Get event and label colors
        if events:
            legend_labels, cmap = self._get_label_color(len(events), colors, cmap=self.options["colormap"])
    
        if labels:
            legend_labels, cmap = self._get_label_color(len(labels), colors, cmap=self.options["colormap"])
    
        if colors is None:
            if cmap is None:
                colors = [None for ii in range(0, len(y_arrays))]
            else:
                colors = [cmap(ii) for ii in range(0, len(y_arrays))]
    
        # Plot several plots depending on number of y-arrays
        for idx, (x_array, y_array, color) in enumerate(zip(x_arrays, y_arrays, colors)):
    
            # Plot histogram only for the last scatter plot
            if self.options["histogram"]:
                if idx == (len(y_arrays) - 1):
                    self.options["histogram"] = original_histogram_option
                else:
                    self.options["histogram"] = ""
    
            # Plot figure
            self.subplot_row(
                ax, x_array, y_array, xlabel, ylabel, x_unit=x_unit, y_unit=y_unit, color=color
            )
    
            # Plot vertical line for events in plot
            if events:
                for idx, (label, entries) in enumerate(sorted(events.items())):
                    [ax.axvline(x=e, label=label, color=cmap(idx)) for e in entries]
    
        # Change tick labelsize
        if self.options["tick_labelsize"]:
            ax.tick_params(axis=self.options["tick_labelsize"][0], labelsize=self.options["tick_labelsize"][1])
    
        # Plot x-axis label
        ax.set(xlabel=xlabel)
    
        # Set polar plot self.options
        if self.options["projection"] == "polar":
            ax.set_theta_zero_location("N")  # sets 0(deg) to North
            ax.set_theta_direction(-1)  # sets plot clockwise
    
        # Plot legend
        if events:
            self.options["legend_location"] = "bottom" if self.options["legend_location"] == None else self.options["legend_location"]
            self._plot_legend(legend_labels, labels, self.options)
    
        if labels:
            if self.options["projection"] == "polar":
                self.options["legend_location"] = "bottom" if self.options["legend_location"] == None else self.options["legend_location"]
            else:
                self.options["legend_location"] = "right" if self.options["legend_location"] == None else self.options["legend_location"]
    
            self._plot_legend(legend_labels, labels, self.options)
    
        # Rotates and right aligns the x labels, and moves the bottom of the axes up to make room for them
        if isinstance(x_arrays[0][0], datetime):
            fig.autofmt_xdate()
    
        # Automatically adjusts subplot params so that the subplot(s) fits in to the figure area
        fig.tight_layout()
    
        # Adjust plot axes (to place title correctly)
        if self.options["projection"] == "polar":
            fig.subplots_adjust(top=0.83)
        else:
            fig.subplots_adjust(top=0.92)
    
        # Save plot as file or show it on console
        if self.options["plot_to"] == "console":
            plt.show()
        elif self.options["plot_to"] == "file":
            plt.savefig(figure_path, dpi=self.options["dpi"])
        else:
            log.fatal(f"Option <plot_to> is wrong with '{self.options['plot_to']}', expected 'console' or 'file'.")
    
        # Clear the current figure
        plt.clf()
Exemplo n.º 17
0
 def plot_bar_dataframe_columns(
     self,
     df: "Dataframe",
     column: str,
     path: PosixPath,
     xlabel: str = "",
     ylabel: str = "",
     label: str = "label",
     colors: Union[List[str], None] = None,
     options: Union[Dict[str, Any], None] = None,
 ) -> None:
     """Generate bar plot of given dataframe columns
 
     If 'label' column is given in Dataframe (as 'df.label'), then the bars are color coded based on the defined labels.
     In addition a legend is added with information about the labels.
 
     Following **options** options can be overwritten:
 
     | Option         | Value            | Description                                                                |
     |----------------|------------------|----------------------------------------------------------------------------|
     | colormap       | <type>           | Color map type for plotting labels (e.g. viridis, jet, tab10, rainbow,     |
     |                |                  | hsv, plasma)                                                               |
     | dpi            | <num>            | Resolution of file in dots per inch                                        |
     | figsize        | (num, num)       | Figure size                                                                |
     | fontsize       | <num>            | Fontsize of x- and y-axis                                                  |
     | legend         | <True|False>     | Plot legend                                                                |
     | legend_location| <right, bottom>  | Legend location                                                            |
     | legend_ncol    | <num>            | The number of legend columns                                               |
     | plot_to        | <console|file>   | Plot figure on console or file                                             |
 
     Args:
        df:          Dataframe with data to plot.
        column:      Dataframe column to plot.
        path:        Figure path.
        xlabel:      x-axis label.
        ylabel:      y-axis label.
        label:       Dataframe column, which should be used as label.
        colors:      List with colors for defined label in "label" column. This option overwrites automatically chosen 
                     colors.
        options:     Dictionary with options, which overwrite default plot configuration.
     """
    
     # Overwrite options with argument definition
     if options:
         self.set_options(options)   
 
     # Assign to each label a color
     if label in df.columns:
         legend_labels, cmap = self._get_label_color(len(set(df[label])), colors=colors, cmap=self.options["colormap"])
 
         colors_dict = dict()
         for idx, label_ in enumerate(self._ordered_set(df[label])):
             colors_dict.update({label_: cmap(idx)})
         color = df[label].apply(lambda x: colors_dict[x])
     else:
         color = "steelblue"
 
     # Define figure size
     if "figsize" not in self.options.keys():
         fig_width = len(df.index) / 3 if len(df.index) > 30 else 6.4
         fig_height = fig_width / 1.33
         self.options["figsize"] = tuple((fig_width, fig_height))
 
     # Generate bar plot
     ax = df[column].plot(kind="bar", color=color, width=0.8, figsize=self.options["figsize"])
     # TODO
     # color=['green', 'red', 'yellow', 'blue', 'brown']
     # df_to_plot = df[column] if column else df
     # ax = df_to_plot.plot(kind="bar", color=color, width=0.8, figsize=self.options["figsize"])
     ax.set_xlabel(xlabel, fontsize=self.options["fontsize"])
     ax.set_ylabel(ylabel, fontsize=self.options["fontsize"])
 
     # Make legend
     if label in df.columns and self.options["legend"]:
         self.options["legend_location"] = "right" if self.options["legend_location"] == None else self.options["legend_location"]
         self._plot_legend(legend_labels, self._ordered_set(df[label]), self.options)
 
     # Automatically adjusts
     plt.tight_layout()
 
     # Save plot as file or show it on console
     if self.options["plot_to"] == "console":
         plt.show()
     elif self.options["plot_to"] == "file":
         plt.savefig(path, dpi=self.options["dpi"])
     else:
         log.fatal(f"Option <plot_to> is wrong with '{self.options['plot_to']}', expected 'console' or 'file'.")
 
     # Clear the current figure
     plt.clf()
Exemplo n.º 18
0
    def download_xml(
        self,
        latitude: float,
        longitude: float,
        from_date: datetime,
        to_date: datetime,
        url: Optional[str] = None,
        reference_level: Optional[str] = "chart_datum",
    ) -> None:
        """Download XML file from url

        Args:
            latitude:        Latitude of position in [deg] 
            longitude:       Longitude of position in [deg] 
            from_date:       Starting date of data period
            to_date:         Ending date of data period
            url:             URL to download from, if None use self.URL instead.
            reference_level: Choose reference, which can be chart_datum, mean_sea_level or nn1954
        """
        reference_level_def = {
            "chart_datum": "cd",
            "mean_sea_level": "msl",
            "nn1954": "nn1954",
        }

        # Get URL
        url = self.URL if url is None else url

        try:
            args = dict(
                lat=latitude,
                lon=longitude,
                fromtime=from_date.strftime("%Y-%m-%dT%H:%M"),
                totime=to_date.strftime("%Y-%m-%dT%H:%M"),
                datatype="all",
                refcode=reference_level_def[reference_level],
                place="",
                file="",
                lang="nn",
                interval=10,
                dst=0,  # summer time is not used
                tzone=0,  # UTC
                tide_request="locationdata",
            )
        except AttributeError:
            log.fatal(
                "Following arguments has to be set: latitude, longitude, from_date and to_date"
            )

        url = f"{url}?{'&'.join([f'{k}={v}' for k, v in args.items()])}"
        print(f"Downloading {url} to {self.file_path}")

        # Read data from API to file path
        with files.open(self.file_path, mode="wb") as fid:
            c = pycurl.Curl()
            c.setopt(c.URL, url)
            c.setopt(c.WRITEDATA, fid)
            try:
                c.perform()
            finally:
                c.close()

        self.meta["__url__"] = url
Exemplo n.º 19
0
    def _parse_observation(self, line: Dict[str, str],
                           cache: Dict[str, Any]) -> None:
        """Parse observation record of RINEX file
        """
        # Ignore epochs based on sampling rate
        sec = cache["obs_sec"]
        if sec is None:
            return

        if cache["num_sat"] != cache["len_sat_list"]:
            log.fatal(
                f"Number of satellites ({cache['num_sat']}) does not agree with number of satellites "
                f"in satellite PRN list ({cache['len_sat_list']}) in observation epoch {cache['obs_time']}"
            )

        # Read line with maximal 5 observations
        for field in sorted([f for f in line if f.startswith("obs_")]):

            # Fit length of observation (should always be 16 characters long)
            #
            # NOTE: This is necessary, because missing observations are written as 0.0 or BLANK in RINEX format and loss
            #       of lock indicator (LLI) and signal strength can be blank. In this case the length of observation
            #       field is fitted to 16 characters as defined in the RINEX 2.11 format description
            #
            #       Each observation type is saved in a Dataset field. The observation type fields have the same length
            #       to be consistent with the time, system or satellite Dataset field. The problem is that some
            #       observation types are not observed for a certain satellite system, but these observation are
            #       included with zero values in the observation type field.
            line[field] = line[field].ljust(16)

            cache.setdefault("obs_values",
                             list()).append(_float(line[field][0:14]))
            cache.setdefault("cycle_slip",
                             list()).append(_float(line[field][14:15]))
            cache.setdefault("signal_strength",
                             list()).append(_float(line[field][15:16]))

        # Save all observation type entries for given satellite (all observation for a given epoch and satellite are
        # read)
        if len(cache["obs_values"]) >= self.meta["num_obstypes"]:

            sat = cache["sat_list"].pop(0)
            sys = sat[0]
            sat_num = int(sat[1:])
            for obs_type, obs, cycle_slip, signal_strength in zip(
                    self.meta["obstypes"], cache["obs_values"],
                    cache["cycle_slip"], cache["signal_strength"]):
                self.data["obs"][obs_type].append(obs)
                self.data["cycle_slip"][obs_type].append(cycle_slip)
                self.data["signal_strength"][obs_type].append(signal_strength)
            del cache["obs_values"]
            del cache["cycle_slip"]
            del cache["signal_strength"]

            self.data.setdefault("time", list()).append(cache["obs_time"])
            self.data.setdefault("epoch_flag",
                                 list()).append(cache["epoch_flag"])
            self.data.setdefault("rcv_clk_offset",
                                 list()).append(cache["rcv_clk_offset"])

            obs = {
                "station":
                self.meta["marker_name"].lower(),  # vars['station'],
                "site_id": self.meta["marker_name"].upper(),
                "system": sys,
                "satellite": sat,
                "satnum": sat_num,
            }
            for field, value in obs.items():
                self.data.setdefault("text",
                                     dict()).setdefault(field,
                                                        list()).append(value)
Exemplo n.º 20
0
    def _parse_observation_epoch(self, line: Dict[str, str],
                                 cache: Dict[str, Any]) -> None:
        """Parse observation epoch information of RINEX observation record

        Only the last 2-digits of the year is given in the observation epoch, therefore it is necessary to get the
        complete 4-digit year based on the `TIME OF FIRST OBS` and `TIME OF LAST OBS` the RINEX header entries.

        In addition the RINEX observation are decimated based on the given sampling rate.
        """
        # Reject empty lines
        line["year"] = line["year"].strip()
        if (not line["year"].isnumeric()) and (not line["sat_list"]):
            return

        # Reject comment lines
        if line["sat_list"][28:29].isalpha():
            return

        # Read observation epoch entry
        if line["year"]:

            # Get correct 4-digit year (in observation epoch only 2-digit year is given)
            first_obs_year = self.meta["time_first_obs"][0:4]
            year = int(first_obs_year[0:2] + line["year"].zfill(2))

            # Check if 'year' is unique in the complete RINEX file
            if "time_last_obs" in self.meta:
                last_obs_year = self.meta["time_last_obs"][0:4]

                if first_obs_year != last_obs_year:
                    log.fatal(
                        f"Different year for first and last observation is given in RINEX "
                        f"({first_obs_year} and {last_obs_year}). RINEX routine has to be improved"
                    )

            cache["sat_list"] = list()
            cache[
                "obs_time"] = "{year}-{month:02d}-{day:02d}T{hour:02d}:{minute:02d}:{second:010.7f}" "".format(
                    year=year,
                    month=int(line["month"]),
                    day=int(line["day"]),
                    hour=int(line["hour"]),
                    minute=int(line["minute"]),
                    second=float(line["second"]),
                )
            cache["obs_sec"] = (int(line["hour"]) * Unit.hour2second +
                                int(line["minute"]) * Unit.minute2second +
                                float(line["second"]))
            cache["epoch_flag"] = int(line["epoch_flag"])
            cache["rcv_clk_offset"] = _float(line["rcv_clk_offset"])

            # Decimate RINEX observation defined by sampling rate [seconds]
            if self.sampling_rate:
                if cache["obs_sec"] % self.sampling_rate != 0:
                    cache["obs_sec"] = None  # Ignore epoch

            cache["num_sat"] = int(line["num_sat"])

        if (line["epoch_flag"].strip() != "0") and line["epoch_flag"].strip():
            log.fatal(
                f"Epoch {cache['obs_time']} is not ok, which is indicated by epoch flag {line['epoch_flag']}.\n"
                f"TODO: How should it be handled in Where?"
            )  # TODO: Handle flagged epochs

        # Generate satellite list for given epoch
        for i in range(0, len(line["sat_list"]), 3):
            sat = line["sat_list"][i:i + 3].rstrip()
            if sat:
                sat = sat[0].replace(" ", "G") + sat[1].replace(
                    " ", "0") + sat[2]  # Blank satellite system
                cache["sat_list"].append(sat)  # identifier indicates GPS ('G')

        cache["len_sat_list"] = len(cache["sat_list"])