Exemplo n.º 1
0
def compute_dops(az: np.ndarray, el: np.ndarray) -> Tuple[np.ndarray, ...]:
    """Compute dilution of precision (DOP) for an observation epoch

    It should be noted, that the weight of observations is not considered. The observation weight matrix is assumed to
    be an identity matrix. The cofactor matrix Q is related to a topocentric coordinate system (north, east, up):

                | q_nn q_ne q_nu q_nt |
            Q = | q_ne q_ee q_eu q_et |
                | q_nu q_eu q_nn q_nt |
                | q_nt q_et q_nt q_tt |

    Reference: Banerjee, P. and Bose, A. (1996): "Evaluation of GPS PDOP from elevation and azimuth of satellites",
        Indian Journal of Radio & Space Physics, Vol. 25, April 1996, pp. 110-113

    Args:
        az:  Satellite azimuth angle (radians)
        el:  Satellite elevation angle (radians)

    Returns:
        Tuple with GDOP, PDOP, TDOP, HDOP and VDOP
    """

    hdop = np.array([0.0])
    vdop = np.array([0.0])

    # Construct the design matrix H based on observed & valid satellites
    #
    #       | -cos(e1) * cos(a1)   -cos(e1) * sin(a1)   -sin(e1)   1  |
    #       | -cos(e2) * cos(a2)   -cos(e2) * sin(a2)   -sin(e2)   1  |
    #       | -cos(e3) * cos(a3)   -cos(e3) * sin(a3)   -sin(a3)   1  |
    #  H =  | -cos(e4) * cos(a4)   -cos(e4) * sin(a4)   -sin(e4)   1  |
    #       |         ..                   ..              ..     ..  |
    #       | -cos(en) * cos(an)   -cos(en) * sin(an)   -sin(an)   1  |
    # H = np.stack((np.cos(el) * np.sin(az), np.cos(el) * np.cos(az), np.sin(el), np.ones(el.shape)), axis=1)
    H = np.stack(
        (-np.cos(el) * np.cos(az), -np.cos(el) * np.sin(az), -np.sin(el),
         np.ones(el.shape)),
        axis=1)
    Q = H.T @ H  # H^t*H

    # User info
    log.debug("Q=H^t*H::")
    log.debug(Q)

    # Check if the inverse of Q exists by computing the conditional number (or computation of the detereminant)
    if not np.isfinite(np.linalg.cond(Q)):
        log.warn(
            "Error by computing the inverse of the co-factor matrix Q (DOP determination)."
        )
        return None, None, None, None, None

    else:
        Q = np.linalg.inv(Q)  # (H*H^t)^{-1}
        gdop = np.sqrt(np.trace(Q))  # GDOP
        pdop = np.sqrt(np.trace(Q[0:3]))  # PDOP
        hdop = np.sqrt(np.trace(Q[0:2]))  # HDOP
        vdop = np.sqrt(Q[2, 2])  # VDOP
        tdop = np.sqrt(Q[3, 3])  # TDOP

    return gdop, pdop, tdop, hdop, vdop
Exemplo n.º 2
0
    def parse_blocks(self, fid: Iterable[bytes]) -> None:
        """Parse contents of Sinex blocks

        Contents of Sinex blocks are stored as separate numpy-arrays in
        self._sinex

        Args:
            fid:  Pointer to file being read.
        """
        # Get set of interesting Sinex blocks, index them by marker
        sinex_blocks = {b.marker: b for b in self.sinex_blocks}

        # Iterate until all interesting Sinex blocks have been found or whole file is read
        try:
            while sinex_blocks:
                # Find next block (line that starts with +)
                fid = itertools.dropwhile(lambda ln: not ln.startswith(b"+"), fid)
                block_header = next(fid).decode(self.file_encoding or "utf-8")
                marker, *params = block_header[1:].strip().split()
                if marker not in sinex_blocks:
                    continue

                # Find lines in block, remove comments and parse lines, store parameters for later
                lines = [
                    ln for ln in itertools.takewhile(lambda ln: not ln.startswith(b"-"), fid) if ln.startswith(b" ")
                ]
                self._sinex[marker] = self.parse_lines(lines, sinex_blocks[marker].fields)
                if params:
                    self._sinex.setdefault("__params__", dict())[marker] = params
                del sinex_blocks[marker]

        except StopIteration:  # File ended without reading all sinex_blocks
            missing = ", ".join(sinex_blocks)
            log.warn(f"SinexParser {self.parser_name!r} did not find Sinex blocks {missing} in file {self.file_path}")
Exemplo n.º 3
0
def get_field_by_attrs(dset: "Dataset", attrs: Tuple[str],
                       unit: str) -> np.ndarray:
    """Get field values of a Dataset specified by the field attributes

    If necessary the unit of the data fields are corrected to the defined 'output' unit.

    Args:
        dset:     Dataset, a dataset containing the data.
        attrs:    Field attributes (e.g. for Time object: (<scale>, <time format>)).
        unit:     Unit used for output.

    Returns:
        Array with Dataset field values
    """
    f = dset
    for attr in attrs:
        f = getattr(f, attr)

    # Convert 'unit' if necessary
    if unit:
        field = f"{'.'.join(attrs)}"
        if dset.unit(field):
            field_unit = dset.unit(field)[0]
            try:
                log.debug(
                    f"Convert dataset field {field} from unit {field_unit} to {unit}."
                )
                f = f * Unit(field_unit).to(unit).m
            except exceptions.UnitError:
                log.warn(f"Cannot convert from '{field_unit}' to '{unit}'.")

    return f
Exemplo n.º 4
0
    def glob_variable(self, file_key, variable, pattern, file_vars=None):
        """Find all possible values of variable
        """
        # Find available paths
        file_vars = dict() if file_vars is None else dict(file_vars)
        file_vars[variable] = "*"
        search_paths = self.glob_paths(file_key, file_vars)

        # Set up the regular expression
        re_vars = {**file_vars, variable: f"(?P<{variable}>__pattern__)"}
        path_pattern = str(self.path(file_key, file_vars=re_vars, default=".*")).replace("\\", "\\\\")
        for i in itertools.count():
            # Give unique names to each occurance of variable
            path_pattern = path_pattern.replace(f"<{variable}>", f"<{variable}__{i}>", 1)
            if f"<{variable}>" not in path_pattern:
                break
        re_pattern = re.compile(path_pattern.replace("__pattern__", pattern))

        # Find each match
        values = set()
        for search_path in search_paths:
            match = re_pattern.search(str(search_path))
            if match:
                matches = set(match.groupdict().values())
                if len(matches) > 1:
                    log.warn(f"Found multiple values for {variable!r} in {search_path}: {', '.join(matches)}")
                values |= matches
        return values
Exemplo n.º 5
0
    def _parse_raw(self, line: Dict[str, str], _: Dict[str, Any]) -> None:
        """Parse 'Raw' entries of GNSS raw data file to instance variable 'data'.
        """
        # TODO: Make parsing depending of 'State' value. What do the 'State' numbers mean?
        constellationType = {
            "0": None,
            "1": "G",
            "2": "S",
            "3": "R",
            "4": "J",
            "5": "C",
            "6": "E"
        }

        for k, v in line.items():
            if k == "dummy":
                continue
            if k == "Svid":
                system = constellationType[line["ConstellationType"]]
                if system == None:
                    log.warn("GNSS is unknown.")
                    continue
                self.data.setdefault("system", list()).append(system)
                self.data.setdefault("satellite",
                                     list()).append(system + str(v).zfill(2))
            if v == "":
                v = float("nan")

            self.data.setdefault(k, list()).append(float(v))
Exemplo n.º 6
0
def get_field(dset: "Dataset", field: str, attrs: Tuple[str], unit: str) -> np.ndarray:
    """Get field values of a Dataset specified by the field attributes

    If necessary the unit of the data fields are corrected to the defined 'output' unit.

    Args:
        dset:     Dataset, a dataset containing the data.
        field:    Field name.
        attrs:    Field attributes (e.g. for Time object: (<scale>, <time format>)).
        unit:     Unit used for output.

    Returns:
        Array with Dataset field values
    """
    f = dset[field]
    for attr in attrs:
        f = getattr(f, attr)
        
    # Convert 'unit' if necessary
    if unit:
        field_attrs = field if len(attrs) == 0 else f"{field}.{'.'.join(attrs)}"
        
        try:
            field_unit = dset.unit(field_attrs)[0]
        except (exceptions.UnitError, TypeError) as e:
            log.debug(f"Skip unit conversion for field '{field_attrs}'.")
            return f # Skip unit conversion for text fields, which do not have a unit.
        
        try:
            log.debug(f"Convert dataset field {field} from unit {field_unit} to {unit}.")
            f = f * Unit(field_unit).to(unit).m
        except (exceptions.UnitError):
            log.warn(f"Cannot convert from '{field_unit}' to '{unit}' for field {field}.")

    return f
Exemplo n.º 7
0
    def as_dataset(self) -> "Dataset":
        """Return the parsed data as a Dataset

        Returns:
            A dataset containing the data.
        """

        # Initialize dataset
        dset = dataset.Dataset()
        if not self.data:
            log.warn("No data in {self.file_path}.")
            return dset
        dset.num_obs = len(self.data["year"])

        # Add time
        epochs = list()
        for year, doy, seconds in zip(self.data["year"], self.data["doy"],
                                      self.data["seconds"]):
            epochs.append(
                datetime.strptime("{:.0f} {:.0f}".format(year, doy), "%Y %j") +
                timedelta(seconds=seconds))

        dset.add_time(name="time",
                      val=epochs,
                      scale="gps",
                      fmt="datetime",
                      write_level="operational")

        # Add system field
        if "system" in self.data.keys():
            systems = []
            for system in self.data["system"]:
                systems.append(enums.gnss_name_to_id[system.lower()].value)

            dset.add_text("system", val=systems)

        # Add system field
        if "satellite" in self.data.keys():
            satellites = []
            for system, satellite in zip(dset.system, self.data["satellite"]):
                satellites.append(system + str(satellite).zfill(2))

            dset.add_text("satellite", val=satellites)

        # Add text and float fields
        fields = set(self.data.keys()) - {
            "year", "doy", "seconds", "system", "satellite"
        }
        for field in fields:
            if self.data[field].dtype.kind in {
                    "U", "S"
            }:  # Check if numpy type is string
                dset.add_text(field, val=self.data[field])
                continue

            dset.add_float(field, val=self.data[field])

        return dset
Exemplo n.º 8
0
    def parse_matrix_func(self: "SinexParser",
                          data: np.array,
                          lower_upper: str,
                          type: str = "") -> Dict[str, Any]:
        """Parser for {marker} data

        Converts the input data to a symmetric matrix and adds it to
        self.data['{marker}'].

        The NEQ-Matrix Row/Column Number correspond to the Estimated Parameters
        Index in the {size_marker} block.  Missing elements in the matrix are
        assumed to be zero (0); consequently, zero elements may be omitted to
        reduce the size of this block.

        Args:
            data:         Input data, raw data for {marker} block.
            lower_upper:  Either 'L' or 'U', indicating whether the matrix is given in lower or upper form.
            type:         Information about the type of matrix, optional.

        Returns:
            Dictionary with symmetric matrix as a numpy array.
        """
        # Size of matrix is given by {size_marker}-block, initialize to all zeros
        try:
            n = len(self._sinex[size_marker])
        except KeyError:
            n = max(data["row_idx"])
            log.warn(
                f"{size_marker!r}-block was not parsed. Guessing at size of normal equation matrix (n={n})."
            )
        matrix = np.zeros((n, n))

        # Loop through each line of values and put it in the correct place in the matrix (cannot simply reshape as
        # elements may have been omitted)
        values = np.stack((data["value_0"], data["value_1"], data["value_2"]),
                          axis=1)
        for row, col, vals in zip(data["row_idx"], data["column_idx"], values):
            vals = vals[~np.isnan(vals)]
            idx = slice(row - 1, row), slice(col - 1, col - 1 + len(vals))
            matrix[idx] = vals

        # Add symmetrical elements, depending on whether the matrix being represented in lower or upper form
        if lower_upper.upper() == "L":
            matrix = np.tril(matrix) + np.tril(matrix, k=-1).T
        elif lower_upper.upper() == "U":
            matrix = np.triu(matrix) + np.triu(matrix, k=1).T
        else:
            log.warn(
                f"'L' or 'U' not specified for {marker}. Trying to create a symmetric matrix anyway."
            )
            matrix = matrix + matrix.T - np.diag(np.diag(matrix))

        return {"matrix": matrix, "type": type}
Exemplo n.º 9
0
    def _parse_coord_comparison(self, line: Dict[str, str],
                                cache: Dict[str, Any]) -> None:
        """Parse station coordinate comparison table
        """

        if line["station"].strip().lower():
            cache["station"] = line["station"].strip().lower()

        station = cache["station"]
        self.data.setdefault(station, dict())

        coord_def = {
            "N": "north",
            "E": "east",
            "U": "up",
        }
        coord_key = coord_def[line['flag_coord'].strip()]

        self.data[station][f"coord_comp_rms_{coord_key}"] = float(
            line["rms"]) * Unit.millimeter2meter

        if not f"coord_comp_rms_{coord_key}" in self.fields:
            self.fields.append(f"coord_comp_rms_{coord_key}")

        # Parse values line
        #----+----1----+----2----+----3----+----4----+-----
        #   1.21  -1.85 -0.41  0.90 -1.27  0.67  1.39  0.56
        #   5.03        -7.11 -1.71 -0.84        5.30  4.37
        #   0.00                           0.00
        if not "num_coord_files" in self.meta:
            log.warn(
                "Number of coordinate files are unknown. Daily comparison values can not be read."
            )
            return

        len_values = self.meta[
            "num_coord_files"] * 6  # length of line depends on number of files
        line_values = line["values"].ljust(len_values)
        values = [line_values[i:i + 6] for i in range(0, len_values, 6)]

        for idx, value in enumerate(values):
            value = float(
                "inf") if value.strip() == "******" else value.strip()
            if value:
                values[idx] = float(value) * Unit.millimeter2meter
            else:
                values[idx] = float('nan')

        self.data[station][f"coord_comp_{coord_key}"] = values

        if not f"coord_comp_{coord_key}" in self.fields:
            self.fields.append(f"coord_comp_{coord_key}")
Exemplo n.º 10
0
    def parse(self) -> "SinexParser":
        """Parse data

        Override default parse() due to special handling of setup_parser for Sinex files
        """
        if self.data_available:
            self.read_data()

        if not self.data_available:  # May have been set to False by self.read_data()
            log.warn(f"No data found by {self.__class__.__name__} in {self.file_path}")
            return self

        self.postprocess_data()

        return self
Exemplo n.º 11
0
    def as_dataset(self, ref_pos: Union[np.ndarray, List[float]]) -> "Dataset":
        """Return the parsed data as a Dataset

        Args:
            ref_pos: Reference position given in terrestrial reference system and meters

        Returns:
            A dataset containing the data.
        """

        # Initialize dataset
        dset = dataset.Dataset()
        if not self.data:
            log.warn("No data in {self.file_path}.")
            return dset
        dset.num_obs = len(self.data["date"])

        # Add position
        ref_pos = position.Position(np.repeat(np.array([ref_pos]),
                                              dset.num_obs,
                                              axis=0),
                                    system="trs")
        dset.add_position_delta(
            name="pos",
            val=np.stack(
                (self.data["east"], self.data["north"], self.data["vertical"]),
                axis=1) * Unit.millimeter2meter,
            system="enu",
            ref_pos=ref_pos,
        )

        # Add position sigma
        sigma = np.stack((self.data["east_sigma"], self.data["north_sigma"],
                          self.data["vertical_sigma"]),
                         axis=1)
        dset.add_sigma(name="pos_sigma",
                       val=dset.pos.val,
                       sigma=sigma * Unit.millimeter2meter,
                       unit="meter")

        # Add time
        dset.add_time(name="time",
                      val=self.data["year"],
                      scale="utc",
                      fmt="decimalyear",
                      write_level="operational")

        return dset
Exemplo n.º 12
0
    def parse_header_line(self, header_line: bytes) -> None:
        """Parse header of Sinex file

        Header information is stored in `self.meta`.

        Args:
            header_line:  First line of Sinex file.
        """
        if not header_line.startswith(b"%=SNX"):
            log.warn(
                f"The file '{self.file_path}' does not contain a valid SINEX header: {header_line.decode().strip()!r}"
            )
            return

        # Add header information to self.meta
        header_data = self.parse_lines([header_line], self.header_fields)
        self.meta.update({n: header_data[n][()] for n in header_data.dtype.names})
Exemplo n.º 13
0
def _get_doc(obj_name, obj, module):
    """Get and format the documentation for an object"""
    list_headers = ["Args", "Attributes", "Returns", "Examples"]
    member_docs = list()

    if not callable(obj):
        return f"\n### {obj_name} ({obj.__class__.__name__})\n`{obj_name} = {obj!r}`\n"

    if not obj.__doc__:
        log.warn(f"No docstring found for {obj.__qualname__}")

    doc = textwrap.dedent((" " * 100) + (obj.__doc__ or "")).lstrip()

    paragraphs = doc.split("\n\n")
    doc_w_lists = list()
    for paragraph in paragraphs:
        if paragraph.split(":")[0] in list_headers:
            paragraph = re.sub(r"^(\w+:)", r"**\1**\n",
                               paragraph)  # Bold header
            paragraph = re.sub(r"\n    (\w+):", r"\n- `\1`:", paragraph)
            paragraph = re.sub(r"\n    (\w)", r"\n\1", paragraph)
        doc_w_lists.append(paragraph)
    doc = "\n\n".join(doc_w_lists)

    if inspect.isclass(obj) and not isinstance(obj, type):
        for member_name in dir(obj):
            member = getattr(obj, member_name)
            if not _do_doc(member_name, member, module):
                continue
            member_doc = _get_doc(member_name, member, module)
            member_doc = re.sub(r"\n### ", rf"\n#### {obj_name}.", member_doc)
            member_docs.append(member_doc)
        doc += "\n".join(member_docs)

    try:
        signature = inspect.signature(obj)
    except (ValueError, TypeError):
        signature = "()"

    headline = f"### **{obj_name}**{'' if inspect.isclass(obj) else '()'}"
    qualname = f"Full name: `{module.__name__}.{obj_name}`"
    signature_str = f"Signature: `{signature}`"

    return f"\n{headline}\n\n{qualname}\n\n{signature_str}\n\n{doc}"
Exemplo n.º 14
0
    def as_dataset(self) -> "Dataset":
        """Return the parsed data as a Dataset

        GipsyX summary results are added to Dataset 'meta' variable. 

        Args:
            dset: Dataset.

        Returns:
            A dataset containing the data.
        """
        dset = dataset.Dataset(num_obs=0)

        if not self.data:
            log.warn("No data in {self.file_path}.")
            return dset

        dset.meta["summary"] = self.data
        return dset
Exemplo n.º 15
0
    def _match_pattern(self, search_paths, path_pattern, variable, pattern):
        """Look for variable matching pattern in the given search paths."""
        for i in itertools.count():
            # Give unique names to each occurance of variable
            path_pattern = path_pattern.replace(f"<{variable}>", f"<{variable}__{i}>", 1)
            if f"<{variable}>" not in path_pattern:
                break
        re_pattern = re.compile(path_pattern.replace("__pattern__", pattern))

        # Find each match
        values = set()
        for search_path in search_paths:
            match = re_pattern.search(str(search_path))
            if match:
                matches = set(match.groupdict().values())
                if len(matches) > 1:
                    log.warn(f"Found multiple values for {variable!r} in {search_path}: {', '.join(matches)}")
                values |= matches
        return values
Exemplo n.º 16
0
def main():
    """Main program for testing solution validation implementation

    #TODO: This should be done via midgard/tests/gnss !!!
    """
    log.init(log_level="info")

    # Upper bound for GDOP
    max_gdops = 30.0

    # Read command line arguments
    parser = get_my_parser()
    results = parser.parse_args()
    no_print = lambda _: None

    # User info/test
    log.info(f" number of arguments ={len(sys.argv):d}")
    log.info(f" program name        ={sys.argv}")

    # Testing the implemented function
    if len(sys.argv) == 1:
        i_res_cnt = 9
        n_val_sats = i_res_cnt
        alpha_siglev = 0.01
        n_params = 5
        my_residuals = np.random.normal(0.0, 1, size=i_res_cnt)
        az, el = np.random.normal(60, 30,
                                  size=(2 * n_val_sats)).reshape(-1, 2).T
        my_result = sol_validation(my_residuals, alpha_siglev, n_params)

        dops_vals = compute_dops(az, el)

        if dops_vals[0] <= 0.0 or dops_vals[0] > max_gdops:
            log.warn(
                f"sol_validation():: compute_dops(): not valid solution, number of valid sats={n_val_sats:02d} and  GDOP={dops_vals[0]:.2f}"
            )

        log.info(f" DOPS results::")
        log.info(f" compute_dops(): GDOP={dops_vals[0]:.2f}")
        log.info(f" compute_dops(): PDOP={dops_vals[1]:.2f}")
        log.info(f" compute_dops(): HDOP={dops_vals[2]:.2f}")
        log.info(f" compute_dops(): PDOP={dops_vals[3]:.2f}")
Exemplo n.º 17
0
    def parse(self) -> "Parser":
        """Parse data

        This is a basic implementation that carries out the whole pipeline of
        reading and parsing datafiles including calculating secondary data.

        Subclasses should typically implement (at least) the `read_data`-method.
        """
        self.setup_parser()
        if self.data_available:
            self.read_data()

        if not self.data_available:  # May have been set to False by self.read_data()
            log.warn(
                f"No data found by {self.__class__.__name__} in {self.file_path}"
            )
            return self

        self.postprocess_data()

        return self
Exemplo n.º 18
0
def sol_validation(residuals: np.ndarray,
                   alpha_siglev: float,
                   n_params: int = 4) -> bool:
    """Validating the GNSS solution is carried out using Chi-square test

    Use Chi-square test for outlier detection and rejection. 

    Args:
        residuals:      Postfit residuals for all satellites in each epoch 
        alpha_siglev:   Alpha significance level
        n_params:       Number of parameters (states), normally 4 parameters for station coordinates and receiver clock

    Returns:
        Array containing False for observations to throw away.
    """

    # Regular checks
    num_obs = len(residuals)
    df = num_obs - n_params - 1
    if df < 0:
        log.warn(
            f"sol_validattion():: degree of freedom < 0 (df = {df}) --> TEST NOT PASSED"
        )
        return False

    # Chi-square validation of residuals
    vv = np.dot(residuals, residuals)  # sum (v(i) * v(i))
    chi_sqr = stats.chi2.ppf(1 - alpha_siglev, df=df)

    if vv > chi_sqr:
        log.debug(
            f"sol_validattion():: number of valid obs={num_obs:03} vv={vv:.2f} chi-square value={chi_sqr:.2f}--> TEST NOT PASSED"
        )
        return False

    else:
        log.debug(
            f"sol_validation():: number of valid obs={num_obs:02} vv={vv:.2f} < chi-square value={chi_sqr:.2f} --> TEST PASSED for alpha significance level= {(1.0-alpha_siglev)*100:.2f} %"
        )
        return True
Exemplo n.º 19
0
    def as_dataset(
        self,
        ref_pos: Union[np.ndarray, List[float]] = [0.0, 0.0,
                                                   0.0]) -> "Dataset":
        """Return the parsed data as a Dataset

        Args:
            ref_pos: Reference position given in terrestrial reference system and meters

        Returns:
            Midgard Dataset where timeseries data are stored with following fields:

    
           | Field               | Type              | Description                                                    |
           |---------------------|-------------------|----------------------------------------------------------------|
           | obs.dpos            | PositionDelta     | Position delta object referred to a reference position         |
           | obs.dpos_sigma_east | numpy.array       | Standard deviation of east position                            |
           | obs.dpos_sigma_north| numpy.array       | Standard deviation of north position                           |
           | obs.dpos_sigma_up   | numpy.array       | Standard deviation of up position                              |
           | time                | Time              | Parameter time given as TimeTable object                       |
        """

        # Initialize dataset
        dset = dataset.Dataset()
        if not self.data:
            log.warn("No data in {self.file_path}.")
            return dset
        dset.num_obs = len(self.data["decimalyear"])
        dset.meta.update(self.meta)

        # Add position
        ref_pos = position.Position(np.repeat(np.array([ref_pos]),
                                              dset.num_obs,
                                              axis=0),
                                    system="trs")
        dset.add_position_delta(
            name="obs.dpos",
            val=np.stack(
                (self.data["east"], self.data["north"], self.data["vertical"]),
                axis=1),
            system="enu",
            ref_pos=ref_pos,
        )

        # TODO: sigma functionality has to be improved: dpos_sigma.enu.east, dpos_sigma.trs.x
        ## Add position sigma
        # sigma = np.stack((self.data["east_sigma"], self.data["north_sigma"], self.data["vertical_sigma"]), axis=1)
        # dset.add_sigma(name="dpos_sigma", val=dset.dpos.val, sigma=sigma, unit="meter")
        dset.add_float(name="obs.dpos_sigma_east",
                       val=self.data["east_sigma"],
                       unit="meter")
        dset.add_float(name="obs.dpos_sigma_north",
                       val=self.data["north_sigma"],
                       unit="meter")
        dset.add_float(name="obs.dpos_sigma_up",
                       val=self.data["vertical_sigma"],
                       unit="meter")

        # Add time
        dset.add_time(name="time",
                      val=self.data["decimalyear"],
                      scale="utc",
                      fmt="decimalyear",
                      write_level="operational")

        return dset
Exemplo n.º 20
0
    def write_to_dataset(self, dset) -> "Dataset":
        """Return the parsed data as a Dataset

        Args:
            dset (Dataset): The Dataset. Depending on the Spring CSV following dataset fields can be available:

        | Field                 | Former name | Description                                                            |
        |-----------------------|--------------------------------------------------------------------------------------|
        | adjbgd-dcb_mean       |             |                                                                        |
        | adjbgd-dcb_med        |             |                                                                        |
        | clk_diff_dt_mean      | dB_mean     | MEAN clock offset determined in each epoch                             |
        | clk_diff_with_dt_mean | dH_mean     | Satellite clock correction difference corrected for satellite bias and |
        |                       |             | the MEAN constellation clock offset in each epoch                      |    
        | dr                    |             | Satellite coordinate difference between broadcast and precise ephemeris|
        |                       |             | in radial direction in [m]                                             |
        | dx                    |             | Satellite coordinate difference between broadcast and precise ephemeris|
        |                       |             | for x-coordinate                                                       |
        | dy                    |             | Satellite coordinate difference between broadcast and precise ephemeris|
        |                       |             | for y-coordinate                                                       |
        | dz                    |             | Satellite coordinate difference between broadcast and precise ephemeris|
        |                       |             | for z-coordinate                                                       |
        | dh_med                |             | Satellite clock correction difference corrected for satellite bias and |
        |                       |             | the MEDIAN clock offset in each epoch                                  |
        | db_med                |             | MEDIAN constellation clock offset determined in each epoch             |
        | dbgd_mean             |             |                                                                        |
        | dbgd_med              |             |                                                                        |
        | orb_diff_3d           | d3D         | 3D orbit difference                                                    |
        | satellite             | SVID        | Satellite number                                                       |
        | sqrt_a2_c2            | dAC         | sqrt(a^2 + c^2)                                                        |
        | system                |             | System identifier                                                      |
        | sisre                 | URE_Av_mean | Global average user range error (signal-in-space range error) with use |
        |                       |             | of MEAN constellation clock offset                                     |
        | ure_av_med            |             | Global average user range error (signal-in-space range error) with use |
        |                       |             | of MEDIAN constellation clock offset                                   |
        | ure_wul_mean          |             | Global average user range error for worst user location with use of    |
        |                       |             | MEAN constellation clock offset                                        |
        | ure_wul_med           |             | Global average user range error for worst user location with use of    |
        |                       |             | MEDIAN constellation clock offset                                      |
        """

        field_ure_control_tool_to_where = {
            "dAC(m)": "sqrt_a2_c2",
            "dB_mean(m)": "clk_diff_dt_mean",
            "dH_mean(m)": "clk_diff_with_dt_mean",
            "dR(m)": "dradial",
            "d3D(m)": "orb_diff_3d",
            "SVID": "satellite",
            "URE_Av_mean(m)": "sisre",
        }

        # Initialize dataset
        if not self.data:
            log.warn("No data in {self.file_path}.")
            return dset
        dset.num_obs = len(self.data["dX(m)"])

        # Add time
        dset.add_time(
            "time",
            val=[
                dateutil.parser.parse(self.data["YYYY/MM/DD"][i] + " " +
                                      self.data["HH:MM:SS"][i])
                for i in range(0, dset.num_obs)
            ],
            scale="gps",
            fmt="datetime",
            write_level="operational",
        )

        # Add system field
        dset.add_text("system", val=[s[0:1] for s in self.data["SVID"]])

        # Add position field
        dset.add_position("orb_diff",
                          itrs=np.vstack(
                              (self.data["dX(m)"], self.data["dY(m)"],
                               self.data["dZ(m)"])).T,
                          time="time")

        # Define fields to save in dataset
        remove_fields = {"YYYY/MM/DD", "HH:MM:SS", "dX(m)", "dY(m)", "dZ(m)"}
        fields = set(self.data.keys()) - remove_fields

        # Add text and float fields
        for field in fields:

            where_fieldname = (field_ure_control_tool_to_where[field] if field
                               in field_ure_control_tool_to_where.keys() else
                               field.lower())
            where_fieldname = where_fieldname.replace(
                "(m)", "")  # Remove unit (m) from field name

            if self.data[field].dtype.kind in {
                    "U", "S"
            }:  # Check if numpy type is string
                dset.add_text(where_fieldname, val=self.data[field])
                continue

            dset.add_float(where_fieldname, val=self.data[field], unit="meter")
Exemplo n.º 21
0
    def as_dataset(
            self,
            ref_pos: Union[np.ndarray, List[float], None] = None) -> "Dataset":
        """Return the parsed data as a Dataset

        Args:
            ref_pos: Reference position given in terrestrial reference system and meters

        Returns:
            Midgard Dataset where GALAT result data are stored with following fields:

    
           | Field                    | Type              | Description                                               |
           |--------------------------|-------------------|-----------------------------------------------------------|
           | hpe                      | np.ndarray        | Horizontal Position Error of site position vs. reference  |
           |                          |                   | position                                                  |
           | num_satellite_available  | np.ndarray        | Number of available satellites                            |
           | num_satellite_used       | np.ndarray        | Number of used satellites                                 |
           | pdop                     | np.ndarray        | Position dilution of precision                            |
           | site_pos                 | Position          | Site position                                             |
           | site_pos_vs_ref          | PositionDelta     | Site position versus reference coordinate                 |
           | site_vel_3d              | np.ndarray        | 3D site velocity                                          |
           | time                     | Time              | Parameter time given as TimeTable object                  |
           | vpe                      | np.ndarray        | Vertical Position Error of site position vs. reference    |
           |                          |                   | position                                                  |
        """
        fields = {
            #"hpe": "meter", # Recalculated based on site position and given reference coordinate
            #"vpe": "meter", # Recalculated based on site position and given reference coordinate
            "site_vel_3d": "meter/second",
            "pdop": "",
            "num_satellite_available": "",
            "num_satellite_used": "",
        }

        # Initialize dataset
        dset = dataset.Dataset()
        if not self.data:
            log.warn("No data in {self.file_path}.")
            return dset
        dset.num_obs = len(self.data["time"])

        # Add time field
        dset.add_time(
            "time",
            val=self.data["time"],
            scale="gps",
            fmt="datetime",
        )

        # Add float fields
        for field in fields.keys():
            dset.add_float(name=field,
                           val=self.data[field],
                           unit=fields[field])

        # Add site position field
        dset.add_position(
            "site_pos",
            val=np.stack((
                self.data["latitude"] * Unit.deg2rad,
                self.data["longitude"] * Unit.deg2rad,
                self.data["height"],
            ),
                         axis=1),
            system="llh",
        )

        # Use either reference position from RINEX header or given argument as reference position
        if ref_pos is None:
            ref_pos = position.Position(
                np.repeat(
                    np.array([[
                        self.meta["pos_x"], self.meta["pos_y"],
                        self.meta["pos_z"]
                    ]]),
                    dset.num_obs,
                    axis=0,
                ),
                system="trs",
            )
        else:
            ref_pos = position.Position(np.repeat(np.array([ref_pos]),
                                                  dset.num_obs,
                                                  axis=0),
                                        system="trs")

        # Add relative position
        dset.add_position_delta(
            name="site_pos_vs_ref",
            val=(dset.site_pos.trs - ref_pos.trs).val,
            system="trs",
            ref_pos=ref_pos,
        )

        # Add HPE and VPE to dataset
        dset.add_float(
            "hpe",
            val=np.sqrt(dset.site_pos_vs_ref.enu.east**2 +
                        dset.site_pos_vs_ref.enu.north**2),
            unit="meter",
        )
        dset.add_float("vpe",
                       val=np.absolute(dset.site_pos_vs_ref.enu.up),
                       unit="meter")

        return dset
Exemplo n.º 22
0
    def as_dataset(self) -> "Dataset":
        """Return the parsed data as a Dataset

        Returns:
            A dataset containing the data.
        """
        # Spring constellation definition
        system_def = {
            "0": "",  # Unknown
            "1": "G",  # GPS
            "2": "R",  # GLONASS
            "3": "S",  # SBAS
            "4": "E",  # Galileo
            "5": "C",  # BeiDou
            "6": "J",  # QZSS
        }

        field_spring_to_where = {
            "3DSpeed": "site_vel_3d",
            "Clock": "delay.gnss_satellite_clock",
            "EastSpeed": "site_vel_east",
            "GroupDelay": "delay.gnss_total_group_delay",
            "HSpeed": "site_vel_h",
            "IODE": "used_iode",
            "NorthSpeed": "site_vel_north",
            "PseudoRange": "delay.gnss_range",
            "SatInView": "num_satellite_available",
            "TropoDelay": "troposphere_dT",
            "UISD": "delay.gnss_ionosphere",
            "UsedSat": "num_satellite_used",
            "EastvsRef": "site_pos_vs_ref_east",
            "NorthvsRef": "site_pos_vs_ref_north",
            "VerticalvsRef": "site_pos_vs_ref_up",
            "VerticalSpeed": "site_vel_up",
            "XSpeed": "site_vel_x",
            "YSpeed": "site_vel_y",
            "ZSpeed": "site_vel_z",
        }

        # Initialize dataset
        dset = dataset.Dataset()
        if not self.data:
            log.warn("No data in {self.file_path}.")
            return dset
        dset.num_obs = len(self.data["GPSEpoch"])

        # Add time
        dset.add_time(
            "time",
            val=[
                dateutil.parser.parse(v.replace("UTC", ""))
                for v in self.data["UTCDateTime"]
            ],
            scale="utc",
            fmt="datetime",
            write_level="operational",
        )

        # Add system field based on Constellation column
        if "Constellation" in self.data.keys():
            dset.add_text("system",
                          val=[
                              system_def[str(value)]
                              for value in self.data["Constellation"]
                          ])

        # Add satellite field based on PRN column
        if "PRN" in self.data.keys():
            prn_data = []
            for prn in self.data["PRN"]:
                if prn >= 1 and prn <= 32:  # Handling of GPS satellites
                    prn_data.append("G" + str(prn).zfill(2))
                elif prn >= 38 and prn <= 70:  # Handling of GLONASS satellites
                    prn_data.append("R" + str(prn - 38).zfill(2))
                elif prn >= 71 and prn <= 140:  # Handling of Galileo satellites
                    prn_data.append("E" + str(prn - 70).zfill(2))
                elif prn >= 191 and prn <= 222:  # Handling of BeiDou satellites
                    prn_data.append("C" + str(prn - 191).zfill(2))
                else:
                    log.fatal(f"Spring PRN number '{prn}' is unknown.")

            dset.add_text("satellite", val=prn_data)
            dset.add_text("system", np.array(prn_data).astype("U1"))

        # Add position field based on Latitude, Longitude and Height column
        if "Latitude" in self.data.keys():
            pos = Position(
                val=np.vstack((self.data["Latitude"] * Unit.deg2rad,
                               self.data["Longitude"] * Unit.deg2rad,
                               self.data["Height"])).T,
                system="llh",
            )
            if "XPos" in self.data.keys():
                dset.add_position("sat_pos",
                                  val=pos.trs,
                                  system="trs",
                                  time=dset.time)
            else:
                dset.add_position("site_pos",
                                  val=pos.trs,
                                  system="trs",
                                  time=dset.time)

        # Define fields to save in dataset
        remove_time_fields = {
            "Constellation", "GPSEpoch", "GPSWeek", "GPSSecond", "PRN", "",
            "UTCDateTime"
        }
        fields = set(self.data.keys()) - remove_time_fields

        # Add text and float fields
        for field in fields:

            where_fieldname = field_spring_to_where[
                field] if field in field_spring_to_where.keys(
                ) else field.lower()

            if self.data[field].dtype.kind in {
                    "U", "S"
            }:  # Check if numpy type is string
                dset.add_text(where_fieldname, val=self.data[field])
                continue

            dset.add_float(where_fieldname, val=self.data[field])

        return dset
Exemplo n.º 23
0
    def as_dataset(self) -> "Dataset":
        """Return the parsed data as a Dataset

        Returns:
            Midgard Dataset where timeseries data are stored with following fields:

    
           | Field                 | Type              | Description                                                  |
           |-----------------------|-------------------|--------------------------------------------------------------|
           | amplitude             | numpy.array       | Amplitude                                                    |
           | azimuth               | numpy.array       | Azimuth in [rad]                                             |
           | frequency             | numpy.array       | GNSS frequency identifier                                    |
           | peak2noise            | numpy.array       | Peak to noise                                                |
           | satellite             | numpy.array       | Satellite number                                             |
           | reflection_height     | numpy.array       | Reflection height in [m]                                     |
           | time                  | Time              | Time                                                         |
               
        """

        freq_def = {
            1: "L1",  # G
            2: "L2",  # G
            5: "L5",  # G
            20: "L2C",  # G
            101: "L1",  # R
            102: "L2",  # R
            201: "E1",  # E 
            205: "E5a",  # E
            206: "E6",  # E
            207: "E5b",  # E
            208: "E5",  # E
            302: "B1_2",  # C
            306: "B3",  # C
            307: "B2b",  # C
        }

        float_fields = {
            "amplitude": None,
            "azimuth": "radian",
            "peak2noise": None,
            "reflection_height": "meter",
        }

        # Initialize dataset
        dset = dataset.Dataset()
        if not self.data:
            log.warn("No data in {self.file_path}.")
            return dset
        dset.num_obs = len(self.data["time"])

        # Add text fields
        satellite = list()
        system = list()
        for sat in self.data["satellite"]:
            if sat >= 1 and sat < 100:  # GPS satellites
                system.append("G")
                satellite.append("G" + str(int(sat)).zfill(2))
            elif sat >= 101 and sat < 200:  # GLONASS satellites
                system.append("R")
                satellite.append("R" + str(int(sat))[1:3])
            elif sat >= 201 and sat < 300:  # Galileo satellites
                system.append("E")
                satellite.append("E" + str(int(sat))[1:3])
            elif sat >= 301 and sat < 400:  # BeiDou satellites
                system.append("C")
                satellite.append("C" + str(int(sat))[1:3])
            else:
                log.fatal(
                    "GNSSREFL satellite number {sat} is not defined. Valid satellite numbers are between [1-399]."
                )

        dset.add_text(
            name="system",
            val=system,
            write_level="operational",
        )

        dset.add_text(
            name="satellite",
            val=satellite,
            write_level="operational",
        )

        dset.add_text(
            name="frequency",
            val=[freq_def[v] for v in self.data["frequency"]],
            write_level="operational",
        )

        # Add time field
        dset.add_time(
            name="time",
            val=self.data["time"],
            scale="utc",
            fmt="datetime",
            write_level="operational",
        )

        # Add float fields
        for field in float_fields.keys():
            if field not in self.data.keys():
                log.warn(
                    f"Field '{field}' does not exist in file {self.meta['__data_path__']}."
                )
                continue

            value = np.deg2rad(
                self.data[field]) if field == "azimuth" else self.data[field]
            unit = "" if float_fields[field] is None else float_fields[field]

            dset.add_float(name=field,
                           val=value,
                           unit=unit,
                           write_level="operational")

        return dset
Exemplo n.º 24
0
    def download_file(
        self,
        file_key: str,
        file_vars: Optional[Dict[str, str]] = None,
        file_path: Optional[pathlib.Path] = None,
        create_dirs: bool = True,
        **path_args: Any,
    ) -> Optional[pathlib.Path]:
        """Download a file from the web and save it to disk

        Use pycurl (libcurl) to do the actual downloading. Requests might be
        nicer for this, but turned out to be much slower (and in practice
        unusable for bigger files) and also not really supporting
        ftp-downloads.

        Args:
            file_key:     File key that should be downloaded.
            file_vars:    File variables used to find path from file_key.
            file_path:    Path where file will be saved, default is to read from configuration.
            create_dirs:  Create directories as necessary before downloading file.
            path_args:    Arguments passed on to .path() to find file_path.

        Returns:
            Path to downloaded file, None if no file was downloaded.
        """
        # Do not download anything if download_missing class variable is False
        if not self.download_missing:
            return None

        # Do not download anything if url is not given in configuration
        if "url" not in self[file_key] or not self[file_key].url.str:
            return None

        # Get file_path from configuration if it's not given explicitly
        file_url = self.url(file_key, file_vars=file_vars, **path_args)
        is_zipped = self.is_path_zipped(file_url)
        path_args.update(is_zipped=is_zipped)

        if file_path is None:
            file_path = self.path(file_key, file_vars=file_vars, download_missing=False, **path_args)
        file_path = file_path.with_name(file_url.name)

        if create_dirs:
            file_path.parent.mkdir(parents=True, exist_ok=True)

        log.info(f"Download {file_key} from '{file_url}' to '{file_path}'")
        with builtins.open(file_path, mode="wb") as fid:
            c = pycurl.Curl()
            c.setopt(c.URL, file_url)
            c.setopt(c.WRITEDATA, fid)
            try:
                c.perform()
                if not (200 <= c.getinfo(c.HTTP_CODE) <= 299):
                    raise pycurl.error()
            except pycurl.error:
                log.error(f"Problem downloading file: {c.getinfo(c.EFFECTIVE_URL)} ({c.getinfo(c.HTTP_CODE)})")
                if file_path.exists():  # Print first 10 lines to console
                    head_of_file = f"Contents of '{file_path}':\n" + "\n".join(file_path.read_text().split("\n")[:10])
                    log.info(console.indent(head_of_file, num_spaces=8))
                    file_path.unlink()
                log.warn(f"Try to download '{file_url}' manually and save it at '{file_path}'")
            else:
                log.info(f"Done downloading {file_key}")
            finally:
                c.close()
        return file_path