Beispiel #1
0
    def _parse_observation(self, line: Dict[str, str],
                           cache: Dict[str, Any]) -> None:
        """Parse observations of COST records
        """

        # Skip slant sample lines
        if not line["hour"]:
            if float(line["minute"]) > 0:
                log.debug("Parsing of slant sample data is not implemented.")
            return

        if line["hour"][0].isalpha():
            return

        # Save data in cache
        for key, value in line.items():
            value = float(value) if value.replace('.', '').replace(
                '-', '').isnumeric() else value
            if key in UNIT_DEF.keys():
                value = value * Unit(UNIT_DEF[key].from_, UNIT_DEF[key].to_)

            cache.setdefault(f"data_{key}", list()).append(value)

        cache.setdefault(f"data_time", list()).append(
            datetime(
                cache["date_data"].year,
                cache["date_data"].month,
                cache["date_data"].day,
                int(line["hour"]),
                int(line["minute"]),
                int(line["second"]),
            ))
Beispiel #2
0
    def read(cls, file_path: Union[str, pathlib.Path]) -> "Dataset":
        """Read a dataset from file"""

        log.debug(f"Read dataset from {file_path}")

        # Dictionary to keep track of references in the data structure
        # key: field_name, value: object (TimeArray, PositionArray, etc)
        memo = {}

        # Read fields from file
        with h5py.File(file_path, mode="r") as h5_file:
            num_obs = h5_file.attrs["num_obs"]
            dset = cls(num_obs=num_obs)
            dset.vars.update(_h5utils.decode_h5attr(h5_file.attrs["vars"]))

            # Read fields
            for fieldname, fieldtype in _h5utils.decode_h5attr(
                    h5_file.attrs["fields"]).items():
                field = fieldtypes.function(fieldtype).read(
                    h5_file[fieldname], memo)
                dset._fields[fieldname] = field
                memo[fieldname] = field.data

            # Read meta
            dset.meta.read(h5_file["__meta__"])
        return dset
Beispiel #3
0
def get_field(dset: "Dataset", field: str, attrs: Tuple[str], unit: str) -> np.ndarray:
    """Get field values of a Dataset specified by the field attributes

    If necessary the unit of the data fields are corrected to the defined 'output' unit.

    Args:
        dset:     Dataset, a dataset containing the data.
        field:    Field name.
        attrs:    Field attributes (e.g. for Time object: (<scale>, <time format>)).
        unit:     Unit used for output.

    Returns:
        Array with Dataset field values
    """
    f = dset[field]
    for attr in attrs:
        f = getattr(f, attr)
        
    # Convert 'unit' if necessary
    if unit:
        field_attrs = field if len(attrs) == 0 else f"{field}.{'.'.join(attrs)}"
        
        try:
            field_unit = dset.unit(field_attrs)[0]
        except (exceptions.UnitError, TypeError) as e:
            log.debug(f"Skip unit conversion for field '{field_attrs}'.")
            return f # Skip unit conversion for text fields, which do not have a unit.
        
        try:
            log.debug(f"Convert dataset field {field} from unit {field_unit} to {unit}.")
            f = f * Unit(field_unit).to(unit).m
        except (exceptions.UnitError):
            log.warn(f"Cannot convert from '{field_unit}' to '{unit}' for field {field}.")

    return f
Beispiel #4
0
    def write(self,
              file_path: Union[str, pathlib.Path],
              write_level: Optional[enums.WriteLevel] = None) -> None:
        """Write a dataset to file"""
        write_level = (min(enums.get_enum("write_level"))
                       if write_level is None else enums.get_value(
                           "write_level", write_level))
        log.debug(f"Write dataset to {file_path} with {write_level}")

        # Make sure directory exists
        file_path = pathlib.Path(file_path).resolve()
        file_path.parent.mkdir(parents=True, exist_ok=True)

        memo = self._construct_memo()
        with h5py.File(file_path, mode="w") as h5_file:

            # Write each field
            for field_name, field in self._fields.items():
                if field.write_level >= write_level:
                    h5_group = h5_file.create_group(field_name)
                    field.write(h5_group, memo, write_level=write_level)

            # Write meta-information
            self.meta.write(h5_file.create_group("__meta__"))

            # Write information about dataset
            fields = {
                fn: f.fieldtype
                for fn, f in self._fields.items()
                if f.write_level >= write_level
            }
            h5_file.attrs["fields"] = _h5utils.encode_h5attr(fields)
            h5_file.attrs["num_obs"] = self.num_obs
            h5_file.attrs["vars"] = _h5utils.encode_h5attr(self.vars)
            h5_file.attrs["version"] = self.version
Beispiel #5
0
def get_paths_with_label(file_path: Union[str, pathlib.Path],
                         label_pattern: str) -> List[pathlib.Path]:
    """Find all paths with the given label

    Args:
        file_path:      Path to dependency file.
        label_pattern:  String with label or regular expression (e.g. 'gnss_rinex_nav_[MGE]' or 'gnss_rinex_nav_.').

    Returns:
        List:  List of file paths.
    """
    label_re = re.compile(
        f"^{label_pattern}$")  # ^ and $ is used to match the whole string

    # Make sure dependency file exists
    file_path = pathlib.Path(file_path)
    if not file_path.exists():
        log.debug(f"Dependency file {file_path} does not exist")
        return []

    # Find dependencies with the given label
    dependencies = Configuration.read_from_file("dependencies", file_path)
    paths = list()
    for file_path in dependencies.section_names:
        label = dependencies[file_path].label.str
        if label_re.match(label):
            paths.append(pathlib.Path(file_path))
    return paths
Beispiel #6
0
def changed(file_path: Union[str, pathlib.Path],
            fast_check: bool = True) -> bool:
    """Check if the dependencies have changed

    Returns True if any of the files listed in the dependency file have
    changed, or if the dependency file itself does not exist.

    Args:
        file_path:   Path to dependency file.
        fast_check:  Fast check uses timestamps, slow check uses md5 checksums.

    Returns:
        True if any file has changed or if the dependecy file does not exist, False otherwise.
    """
    # Make sure dependency file exists
    file_path = pathlib.Path(file_path)
    if not file_path.exists():
        log.debug(f"Dependency file {file_path} does not exist")
        return True

    # Check if any dependencies have changed
    dependencies = Configuration.read_from_file("dependencies", file_path)
    for file_path in dependencies.section_names:
        previous_checksum = dependencies[file_path].checksum.str
        current_checksum = _file_info(file_path,
                                      fast_check=fast_check)["checksum"]
        if current_checksum != previous_checksum:
            log.debug(
                f"Dependency {file_path} changed from {previous_checksum} to {current_checksum}"
            )
            return True

    return False
Beispiel #7
0
def compute_dops(az: np.ndarray, el: np.ndarray) -> Tuple[np.ndarray, ...]:
    """Compute dilution of precision (DOP) for an observation epoch

    It should be noted, that the weight of observations is not considered. The observation weight matrix is assumed to
    be an identity matrix. The cofactor matrix Q is related to a topocentric coordinate system (north, east, up):

                | q_nn q_ne q_nu q_nt |
            Q = | q_ne q_ee q_eu q_et |
                | q_nu q_eu q_nn q_nt |
                | q_nt q_et q_nt q_tt |

    Reference: Banerjee, P. and Bose, A. (1996): "Evaluation of GPS PDOP from elevation and azimuth of satellites",
        Indian Journal of Radio & Space Physics, Vol. 25, April 1996, pp. 110-113

    Args:
        az:  Satellite azimuth angle (radians)
        el:  Satellite elevation angle (radians)

    Returns:
        Tuple with GDOP, PDOP, TDOP, HDOP and VDOP
    """

    hdop = np.array([0.0])
    vdop = np.array([0.0])

    # Construct the design matrix H based on observed & valid satellites
    #
    #       | -cos(e1) * cos(a1)   -cos(e1) * sin(a1)   -sin(e1)   1  |
    #       | -cos(e2) * cos(a2)   -cos(e2) * sin(a2)   -sin(e2)   1  |
    #       | -cos(e3) * cos(a3)   -cos(e3) * sin(a3)   -sin(a3)   1  |
    #  H =  | -cos(e4) * cos(a4)   -cos(e4) * sin(a4)   -sin(e4)   1  |
    #       |         ..                   ..              ..     ..  |
    #       | -cos(en) * cos(an)   -cos(en) * sin(an)   -sin(an)   1  |
    # H = np.stack((np.cos(el) * np.sin(az), np.cos(el) * np.cos(az), np.sin(el), np.ones(el.shape)), axis=1)
    H = np.stack(
        (-np.cos(el) * np.cos(az), -np.cos(el) * np.sin(az), -np.sin(el),
         np.ones(el.shape)),
        axis=1)
    Q = H.T @ H  # H^t*H

    # User info
    log.debug("Q=H^t*H::")
    log.debug(Q)

    # Check if the inverse of Q exists by computing the conditional number (or computation of the detereminant)
    if not np.isfinite(np.linalg.cond(Q)):
        log.warn(
            "Error by computing the inverse of the co-factor matrix Q (DOP determination)."
        )
        return None, None, None, None, None

    else:
        Q = np.linalg.inv(Q)  # (H*H^t)^{-1}
        gdop = np.sqrt(np.trace(Q))  # GDOP
        pdop = np.sqrt(np.trace(Q[0:3]))  # PDOP
        hdop = np.sqrt(np.trace(Q[0:2]))  # HDOP
        vdop = np.sqrt(Q[2, 2])  # VDOP
        tdop = np.sqrt(Q[3, 3])  # TDOP

    return gdop, pdop, tdop, hdop, vdop
Beispiel #8
0
def init(file_path: Union[str, pathlib.Path], fast_check: bool = True) -> None:
    """Start a clean list of dependencies

    The file_path is to the file in which dependencies are stored. This is
    cached, so after init() is run, the other functions do not need to specify
    the file_path.

    Args:
        file_path:   Path to dependency file.
        fast_check:  Fast check uses timestamps, slow check uses md5 checksums.
    """
    file_path = pathlib.Path(file_path)

    # Store current dependencies to disk
    write()

    # Update and cache variables
    _DEPENDENCY_CACHE.clear()
    _DEPENDENCY_CACHE["file_path"] = file_path
    _DEPENDENCY_CACHE["fast_check"] = fast_check

    # Delete any existing dependency file
    try:
        file_path.unlink()
        log.debug(f"Removing old dependency file {file_path}")
    except FileNotFoundError:
        pass  # If dependency file does not exist, we don't do anything

    # Register _write in case program exits without writing all dependencies to disk
    atexit.register(_write)
Beispiel #9
0
    def _unit_conversion(self) -> None:
        """Carrier-phase and Doppler observations are converted to meter
        
        Carrier-phase observations are given in cycles and Doppler observation in Hertz in RINEX observation file. 
        Exception: unit conversion for GLONASS observations is not implemented.
        """
        if self.convert_unit:

            for sys in set(self.data["text"]["system"]):
                if sys == "R":  # Frequency handling for GLONASS satellites is not implemented.
                    continue

                idx = sys == np.array(self.data["text"]["system"])

                for obstype in self.meta["obstypes"][sys]:

                    if not obstype[0] in [
                            "L", "D"
                    ]:  # Skip pseudorange and SNR observations
                        continue
                    log.debug(
                        f"Conversion from observation type {obstype} (for GNSS: '{sys}') to meter."
                    )
                    self.data["obs"][obstype] = np.array(
                        self.data["obs"][obstype])
                    self.data["obs"][obstype][idx] = (
                        constant.c / obstype_to_freq(sys, obstype) *
                        self.data["obs"][obstype][idx])
Beispiel #10
0
def get_field_by_attrs(dset: "Dataset", attrs: Tuple[str],
                       unit: str) -> np.ndarray:
    """Get field values of a Dataset specified by the field attributes

    If necessary the unit of the data fields are corrected to the defined 'output' unit.

    Args:
        dset:     Dataset, a dataset containing the data.
        attrs:    Field attributes (e.g. for Time object: (<scale>, <time format>)).
        unit:     Unit used for output.

    Returns:
        Array with Dataset field values
    """
    f = dset
    for attr in attrs:
        f = getattr(f, attr)

    # Convert 'unit' if necessary
    if unit:
        field = f"{'.'.join(attrs)}"
        if dset.unit(field):
            field_unit = dset.unit(field)[0]
            try:
                log.debug(
                    f"Convert dataset field {field} from unit {field_unit} to {unit}."
                )
                f = f * Unit(field_unit).to(unit).m
            except exceptions.UnitError:
                log.warn(f"Cannot convert from '{field_unit}' to '{unit}'.")

    return f
Beispiel #11
0
def register(func: Callable,
             name: Optional[str] = None,
             sort_value: int = 0) -> Callable:
    """Decorator used to register a plug-in

    Plug-ins are registered based on the name of the module (file) they are
    defined in, as well as the package (directory) which contains
    them. Typically all plug-ins of a given type are collected in a package,
    e.g. models, techniques, parsers, etc. The path to the source code file is
    also stored. This is used to be able to add the source code as a dependency
    file when the plug-in is called.

    If `name` is given, the plug-in is registered based on this name instead of
    the name of the module. The name of the module is still registered as a
    part that can be used to distinguish between similar plug-ins in different
    files (see for instance how `session` is used in `midgard.pipelines`).

    Args:
        func:        The function that is being registered.
        name:        Alternative name of plug-in. Used by `register_named`.
        sort_value:  The value used when sorting plug-ins. Used by `register_ordered`.

    Returns:
        The function that is being registered.
    """
    # Get information from the function being registered
    package_name, _, plugin_name = func.__module__.rpartition(".")
    package_name = _PLUGINS["__aliases__"].get(package_name, package_name)
    file_path = pathlib.Path(sys.modules[func.__module__].__file__)

    # Store Plugin-object in _PLUGINS dictionary
    _PLUGINS["__packages__"].setdefault(package_name, [package_name])
    plugin_info = _PLUGINS.setdefault(package_name,
                                      dict()).setdefault(plugin_name, dict())
    if name is None:
        name = func.__name__  # Name of function is used as default name
        plugin_info.setdefault("__parts__", list()).append(
            name)  # Only unnamed parts are added to list

    plugin = Plugin(f"{plugin_name}.{name}", func, file_path, sort_value)
    plugin_info[name] = plugin
    log.debug(
        f"Registering {plugin.name} ({plugin.file_path}) as a {package_name}-plugin"
    )

    # Add first registered unnamed part as default
    if "__parts__" in plugin_info:
        plugin_info["__default__"] = plugin_info[plugin_info["__parts__"][0]]

    return func
Beispiel #12
0
def _log_file_open(file_path, description="", mode="r"):
    """Write a message to the log about a file being opened

    Args:
        file_path (Path/String):  The path to file being opened.
        description (String):     Description used for logging.
        mode (String):            Same as for the built-in open, usually 'r' or 'w'.
    """
    # Add space at end to handle empty descriptions
    description += " " if description else ""

    # Pick a log message based on the mode being used to open the file
    log_text = f"Read {description}from {file_path}"
    if "w" in mode:
        log_text = f"Write {description}to {file_path}"
        if file_path.is_file():
            log_text = f"Overwrite {description}on {file_path}"
    if "a" in mode:
        log_text = f"Append {description}to {file_path}"
    log.debug(log_text)
Beispiel #13
0
    def download_html(self, url: Optional[str] = None) -> None:
        """Download html file from url

        TODO: Move this to files/url.py

        Args:
            url:  URL to download from, if None use self.URL instead.
        """
        url = self.URL if url is None else url
        log.debug(f"Downloading {url} to {self.file_path}")
        with open(self.file_path, mode="wb") as fid:
            c = pycurl.Curl()
            c.setopt(c.URL, url)
            c.setopt(c.WRITEDATA, fid)
            try:
                c.perform()
            finally:
                c.close()

        self.meta["__url__"] = url
Beispiel #14
0
def sol_validation(residuals: np.ndarray,
                   alpha_siglev: float,
                   n_params: int = 4) -> bool:
    """Validating the GNSS solution is carried out using Chi-square test

    Use Chi-square test for outlier detection and rejection. 

    Args:
        residuals:      Postfit residuals for all satellites in each epoch 
        alpha_siglev:   Alpha significance level
        n_params:       Number of parameters (states), normally 4 parameters for station coordinates and receiver clock

    Returns:
        Array containing False for observations to throw away.
    """

    # Regular checks
    num_obs = len(residuals)
    df = num_obs - n_params - 1
    if df < 0:
        log.warn(
            f"sol_validattion():: degree of freedom < 0 (df = {df}) --> TEST NOT PASSED"
        )
        return False

    # Chi-square validation of residuals
    vv = np.dot(residuals, residuals)  # sum (v(i) * v(i))
    chi_sqr = stats.chi2.ppf(1 - alpha_siglev, df=df)

    if vv > chi_sqr:
        log.debug(
            f"sol_validattion():: number of valid obs={num_obs:03} vv={vv:.2f} chi-square value={chi_sqr:.2f}--> TEST NOT PASSED"
        )
        return False

    else:
        log.debug(
            f"sol_validation():: number of valid obs={num_obs:02} vv={vv:.2f} < chi-square value={chi_sqr:.2f} --> TEST PASSED for alpha significance level= {(1.0-alpha_siglev)*100:.2f} %"
        )
        return True
    def get_pco_rcv(
            self, 
            system: str,
            frequency: str,
            antenna: str,
            radome: str = "NONE",
    ) -> Union[None, List[float]]:
        """Get antenna PCO of receiver in topocentric (local) reference system

        Args:
            system:     GNSS identifier (e.g. E=Galileo, G=GPS, ...)
            frequency:  GNSS frequency related to given 'system' argument (e.g. E1, E5a, L1)
            antenna:    Antenna type of receiver.
            radome:     4-digit radome type name of antenna

        Returns:
            Antenna PCO of receiver in topocentric (local) reference system or None if no entries could be found
        """
        antex_freq = self._gnss_to_antex_freq(system,frequency)
        if antex_freq is None:
            return None
        
        antenna_type = f"{antenna:15s} {radome}"
        if antenna_type not in self.data.keys():
            raise ValueError(f"Antenna type {antenna_type!r} is not available in ANTEX file {self.file_path}.")
            return None
        
        if antex_freq not in self.data[antenna_type].keys():
            frequencies = set(self.data[antenna_type].keys()) - {"azimuth", "elevation"}
            raise ValueError(f"Frequency {system}:{frequency} (ANTEX: {antex_freq}) is not available for antenna "
                      f"{antenna_type!r} in ANTEX file {self.file_path}. Following ANTEX frequencies are " 
                      f"available: {', '.join(frequencies)})")
            return None
    
        # Get antenna phase center offset (PCO) of receiver given in topocentric (local) reference system
        pco_rcv = self.data[antenna_type][antex_freq]["neu"]

        log.debug(f"PCO of receiver antenna {antenna_type!r} for frequency {system}:{frequency}: {pco_rcv}.")

        return pco_rcv
Beispiel #16
0
    def __init__(
        self,
        *args: Tuple[Any],
        sampling_rate: Union[None, float] = None,
        convert_unit: bool = False,
        **kwargs: Dict[Any, Any],
    ) -> None:
        """Initialize Rinex2-parser

        Args:
            args:           Parameters without keyword.
            sampling_rate:  Sampling rate in seconds.
            kwargs:         Keyword arguments.
        """
        super().__init__(*args, **kwargs)
        self.obstypes_all = list()
        self.time_scale = "gps"
        self.sampling_rate = sampling_rate
        self.convert_unit = convert_unit
        log.debug(
            f"Sampling rate for RINEX observations is {self.sampling_rate} second(s)."
        )
Beispiel #17
0
def add(*file_paths: Union[str, pathlib.Path], label: str = "") -> None:
    """Add a list of files to the list of dependencies

    Records the current time stamp or md5 hash of the files specified by file
    paths, and stores as dependencies on the dependency file.

    Before adding dependencies, a call to `init()` has to be done, to set up
    where to store the dependencies.

    Args:
        file_paths:  List of file paths to add to the dependency file.
        label:       Optional label for dependencies.
    """
    # Ignore dependency if no dependency file has been set (init() has not been called)
    if not _DEPENDENCY_CACHE:
        return

    # Add or update dependency information
    fast_check = _DEPENDENCY_CACHE["fast_check"]
    for file_path in file_paths:
        file_info = _file_info(file_path, fast_check, label=label)
        _CURRENT_DEPENDENCIES[str(file_path)] = file_info
        log.debug(f"Adding dependency: {file_path} ({file_info['checksum']})")
Beispiel #18
0
    def as_dataset(self) -> "Dataset":
        """Store Gipsy time dependent parameter data in a dataset

        Returns:
            Midgard Dataset where time dependent parameter data are stored with following fields:


       | Field               | Type              | Description                                                        |
       |---------------------|-------------------|--------------------------------------------------------------------|
       | receiver_clock      | numpy.ndarray     | Receiver clock parameter                                           |
       | satellite           | numpy.ndarray     | Satellite PRN number together with GNSS identifier (e.g. G07)      |
       | satellite_clock     | numpy.ndarray     | Satellite clock parameter                                          |
       | satellite_ant_pco   | PositionTable     | Satellite antenna phase center offset                              |
       | site_posvel         | PosVel            | Station coordinates and velocities                                 |
       | source_id           | numpy.ndarray     | Source ID                                                          |
       | station             | numpy.ndarray     | Station name list                                                  |
       | system              | numpy.ndarray     | GNSS identifier (e.g. G or E)                                      |
       | time                | Time              | Parameter time given as TimeTable object                           |
       | troposphere_zhd     | numpy.ndarray     | Zenith hydrostatic troposphere delay parameter                     |
       | troposphere_zwd     | numpy.ndarray     | Zenith hydrostatic troposphere delay parameter                     |
       | troposphere_ge      | numpy.ndarray     | Horizontal delay gradient in the East direction                    |
       | troposphere_gn      | numpy.ndarray     | Horizontal delay gradient in the North direction                   |
       
       The fields above are given for 'apriori', 'value' and 'sigma' Dataset collections.
        
        """
        # TODO: Handling of unit. Should be added to dataset fields.

        field = {
            "Clk Bias":
            DatasetField(
                None, None,
                "float"),  # can be either receiver or satellite clock bias
            "Antennas Antenna1 MapCenterOffset All Z":
            DatasetField("satellite_ant_pco", "Satellite", "position"),
            "State Pos Z":
            DatasetField("site_posvel", "Station", "posvel"),
            "Source":
            DatasetField("source_id", "Source", "float"),
            "Trop GradEast":
            DatasetField("troposphere_ge", "Station", "float"),
            "Trop GradNorth":
            DatasetField("troposphere_gn", "Station", "float"),
            "Trop DryZ":
            DatasetField("troposphere_zhd", "Station", "float"),
            "Trop WetZ":
            DatasetField("troposphere_zwd", "Station", "float"),
        }

        not_used_parameter = [
            "Antennas Antenna1 MapCenterOffset All X",
            "Antennas Antenna1 MapCenterOffset All Y",
            "State Pos X",
            "State Pos Y",
            "State Vel X",
            "State Vel Y",
            "State Vel Z",
        ]

        dset = dataset.Dataset(num_obs=len(self.data["time_past_j2000"]))
        dset.meta.update(self.meta)

        # Note: GipsyX uses continuous seconds past Jan. 1, 2000 11:59:47 UTC time format in TDP files. That means,
        #       GipsyX does not follow convention of J2000:
        #           1.01.2000 12:00:00     TT  (TT = GipsyX(t) + 13s)
        #           1.01.2000 11:59:27.816 TAI (TAI = TT - 32.184s)
        #           1.01.2000 11:58:55.816 UTC (UTC = TAI + leap_seconds = TAI - 32s)
        #           1.01.2000 11:59:08.816 GPS (GPS = TAI - 19s)
        #
        #       Therefore Time object initialized with TT time scale has to be corrected about 13 seconds.
        #
        # TODO: Introduce j2000 = 2451545.0 as constant or unit?
        dset.add_time(
            "time",
            val=Time((self.data["time_past_j2000"] + 13.0) * Unit.second2day +
                     2451545.0,
                     scale="tt",
                     fmt="jd").gps,
        )
        keep_idx = np.ones(dset.num_obs, dtype=bool)
        collections = ["apriori", "value", "sigma"]

        # Loop over all existing parameter names
        for name in set(self.data["name"]):
            category, identifier, parameter = name.replace(
                ".", " ").split(maxsplit=2)

            if parameter in not_used_parameter:
                continue

            # Add station and satellite field to Dataset by first occurence
            if "Satellite" in category:
                if "satellite" not in dset.fields:
                    dset.add_text("satellite",
                                  val=np.repeat(None, dset.num_obs))
                    dset.add_text("system", val=np.repeat(None, dset.num_obs))

            if "Station" in category:
                if "station" not in dset.fields:
                    dset.add_text("station",
                                  val=np.repeat(identifier.lower(),
                                                dset.num_obs))

            if "Source" in category:
                idx = name == self.data["name"]

                for collection in collections:
                    field_name = f"{collection}.{field['Source'].name}"
                    dset.add_float(field_name,
                                   val=np.full(dset.num_obs, np.NaN))
                    dset[field_name][idx] = self.data["value"][idx]
                continue

            # Add parameter solution to Dataset
            if parameter in field.keys():

                idx = name == self.data["name"]

                if category == "Satellite":
                    sys = enums.get_value("gnss_3digit_id_to_id",
                                          identifier[0:3])
                    dset.system[idx] = sys
                    dset.satellite[idx] = sys + identifier[3:5]

                # Loop over 'apriori', 'value' and 'sigma' solutions, which are saved in separated Dataset collections
                for collection in collections:
                    field_name = f"{collection}.{field[parameter].name}"
                    log.debug(
                        f"Add dataset field '{field_name}' for parameter '{parameter}' and identifier '{identifier}'."
                    )

                    # Add float fields to Dataset
                    if field[parameter].dtype == "float":

                        # Note: "Clk Bias" parameter exists for receiver and satellite, therefore it has to be
                        #       distinguished based on the length of the 'identifier' (e.g. USNO or GPS64).
                        if parameter == "Clk Bias":
                            field_name = (f"{collection}.satellite_clock"
                                          if len(identifier) == 5 else
                                          f"{collection}.receiver_clock")

                        if field_name not in dset.fields:
                            dset.add_float(field_name,
                                           val=np.full(dset.num_obs, np.NaN))
                            dset[field_name][idx] = self.data[collection][idx]

                    # Add position fields to Dataset
                    elif field[parameter].dtype == "position":

                        if field_name not in dset.fields:
                            dset.add_position(field_name,
                                              time=dset.time,
                                              system="trs",
                                              val=np.full((dset.num_obs, 3),
                                                          np.NaN))

                        # Fill position field with data
                        tmp_sol = dict()

                        for item in [".X", ".Y", ".Z"]:
                            idx_item = name.replace(".Z",
                                                    item) == self.data["name"]
                            tmp_sol[item] = self.data["value"][idx_item]
                            # Note: Only .Z dataset indices are used for saving position field in Dataset. .X and .Y are
                            #       not necessary anymore and are removed from Dataset by using "keep_idx" variable.
                            if not item == ".Z":
                                keep_idx[idx_item] = False

                        dset[field_name][idx] = np.vstack(
                            (tmp_sol[".X"], tmp_sol[".Y"], tmp_sol[".Z"])).T

                    # Add posvel fields to Dataset
                    elif field[parameter].dtype == "posvel":

                        if field_name not in dset.fields:
                            dset.add_posvel(field_name,
                                            time=dset.time,
                                            system="trs",
                                            val=np.full((dset.num_obs, 6),
                                                        np.NaN))

                        # Fill position field with data
                        tmp_sol = dict()
                        for item in [
                                "State.Pos.X",
                                "State.Pos.Y",
                                "State.Pos.Z",
                                "State.Vel.X",
                                "State.Vel.Y",
                                "State.Vel.Z",
                        ]:
                            idx_item = name.replace("State.Pos.Z",
                                                    item) == self.data["name"]
                            tmp_sol[item] = self.data["value"][idx_item]
                            if not item == "State.Pos.Z":
                                keep_idx[idx_item] = False

                        dset[field_name][idx] = np.vstack((
                            tmp_sol["State.Pos.X"],
                            tmp_sol["State.Pos.Y"],
                            tmp_sol["State.Pos.Z"],
                            tmp_sol["State.Vel.X"],
                            tmp_sol["State.Vel.Y"],
                            tmp_sol["State.Vel.Z"],
                        )).T

            else:
                log.fatal(f"Parameter {parameter} is not defined.")

        dset.subset(
            keep_idx)  # Remove unnecessary entries (e.g. '.X' and '.Y' )

        return dset
    def get_pco_sat(
            self, 
            date: Union[datetime.datetime, datetime.date],
            system: str,
            frequency: Union[str, List[str]],
            satellite: str, 
    ) -> Union[None, List[float]]:
        """Get satellite PCO in satellite reference system

        If two frequencies are given over the 'sys_freq' argument, then the PCOs are determined as an ionospheric linear
        combination.

        Args:
            date:       Given date used for finding corresponding satellite PCOs in ANTEX file
            system:     GNSS identifier (e.g. E=Galileo, G=GPS, ...)
            frequency:  GNSS frequency related to given 'system' argument, which can be a single frequency (e.g. E1) or a frequency combination (e.g. E1, E5a)
            satellite:  Satellite identifier.

        Returns:
            Satellite PCO in satellite reference system or None if no entries could be found
        """
        frequency = [frequency] if type(frequency) == str else frequency  # Convert str to list type
 
        # Get used date
        used_date = self._used_date(date, satellite)
        if used_date is None:
            return None
        
        antex_freq_1 = self._gnss_to_antex_freq(system, frequency[0])
        if antex_freq_1 is None:
            return None

        # Get satellite PCO for one frequency
        if len(frequency) == 1:

            # Get satellite phase center offset (PCO) given in satellite reference system
            pco_sat = np.array(self.data[satellite][used_date][antex_freq_1]["neu"])

            log.debug(f"PCO of satellite {satellite} for frequency {system}:{frequency[0]}: {pco_sat}.")

        # Get satellite PCO for ionospheric-free linear combination based on two-frequencies
        elif len(frequency) == 2:
            
            antex_freq_2 = self._gnss_to_antex_freq(system, frequency[1])
            if antex_freq_2 is None:
                return None

            # Coefficient of ionospheric-free linear combination
            f1 = getattr(enums, "gnss_freq_" + system)[frequency[0]]  # Frequency of 1st band
            f2 = getattr(enums, "gnss_freq_" + system)[frequency[1]]  # Frequency of 2nd band
            n = f1 ** 2 / (f1 ** 2 - f2 ** 2)
            m = -f2 ** 2 / (f1 ** 2 - f2 ** 2)

            # Get satellite phase center offset (PCO) given in satellite reference system
            pco_sat_f1 = np.array(self.data[satellite][used_date][antex_freq_1]["neu"])
            pco_sat_f2 = np.array(self.data[satellite][used_date][antex_freq_2]["neu"])

            # Generate ionospheric-free linear combination
            pco_sat = n * pco_sat_f1 + m * pco_sat_f2

            log.debug(
                f"Ionospheric-free linear combination PCOs of satellite {satellite} for frequency combination "
                f"{system}:{frequency[0]}_{frequency[1]}:  {pco_sat}."
            )

        else:
            raise ValueError(
                f"Wrong frequency type '{system}:{'_'.join(frequency)}'. Only single or dual frequencies can be handled."
            )

        return list(pco_sat)