Пример #1
0
    def _read_data_snx(self):
        """Read data needed by ITRF Sinex for calculating positions of sites

        Returns:
            Dict:  Dictionary containing data about each site defined in this reference frame.
        """
        return parsers.parse_file("trf_snx", file_path=self.file_paths["snx"]).as_dict()
Пример #2
0
    def _read_data_ssc(self):
        """Read data needed by ITRF SSC for calculating positions of sites

        Returns:
            Dict:  Dictionary containing data about each site defined in this reference frame.
        """
        data_trf = parsers.parse_file(
            "trf_ssc", file_path=self.file_paths["ssc"]).as_dict()
        data_ssc = {k.lower(): v for k, v in data_trf.items()}
        return data_ssc
Пример #3
0
    def _read_data_snx(self):
        """Read data needed by ITRF Sinex for calculating positions of sites

        Returns:
            Dict:  Dictionary containing data about each site defined in this reference frame.
        """
        paths = self.file_paths
        data_trf = parsers.parse_file("trf_snx",
                                      file_path=paths["snx"]).as_dict()
        data_snx = {k.lower(): v for k, v in data_trf.items()}
        # Time epoch intervals are in a separate file
        data_soln = parsers.parse_file("trf_snx_soln",
                                       file_path=paths["soln"]).as_dict()
        for site_key, site_dict in data_soln.items():
            site_id = site_key.lower()
            for soln, interval in site_dict.items():
                if soln in data_snx[site_id]["pos_vel"]:
                    data_snx[site_id]["pos_vel"][soln]["start"] = interval[
                        "start"]
                    data_snx[site_id]["pos_vel"][soln]["end"] = interval["end"]
                elif soln - 1 in data_snx[site_id]["pos_vel"]:
                    # copy previous solution for extrapolation
                    data_snx[site_id]["pos_vel"][soln] = data_snx[site_id][
                        "pos_vel"][soln - 1].copy()
                    data_snx[site_id]["pos_vel"][soln]["start"] = interval[
                        "start"]
                    data_snx[site_id]["pos_vel"][soln]["end"] = interval["end"]

        # Post-seismic deformations (for 2014 and later)
        if self.solution >= "2014":
            data_psd = parsers.parse_file("trf_snx_psd",
                                          file_path=paths["psd"]).as_dict()
            for site_key, site_dict in data_psd.items():
                site_id = site_key.lower()
                if site_id in data_snx:
                    data_snx[site_id]["psd"] = site_dict["psd"]

        return data_snx
Пример #4
0
    def _parse_block(self, fid, block, name="", directory=""):
        # print("Parsing {} {}".format(block, name))
        for line in fid:
            if not line or line.startswith("!"):
                continue
            line = line.split()
            if line[0].lower().startswith("end") and line[1] == block:
                # print("Finished {} {}".format(block, name))
                return
            elif line[0].lower().startswith("begin"):
                # recursive call
                self._parse_block(fid,
                                  line[1],
                                  name=" ".join(line[2:]),
                                  directory=directory)
            elif line[0].lower().startswith("default_dir"):
                directory = line[1]
            elif line[0].endswith(".nc"):
                file_path = self.file_path.parents[0] / directory / line[0]
                if directory:
                    data = self.raw.setdefault(directory, {})
                else:
                    data = self.raw
                nc_name = file_path.stem.split("_")
                nc_stub = nc_name.pop(0)
                data = data.setdefault(nc_stub, {})
                for part in nc_name:
                    if part.startswith("b"):
                        data = data.setdefault(part[1:], {})

                # print("Parse {}".format(file_path))
                netcdf_data = parsers.parse_file(
                    "vlbi_netcdf", file_path=file_path).as_dict()
                if "TimeUTC" in file_path.stem:
                    self._parse_time(netcdf_data)
                data.update(netcdf_data)
            else:
                data = self.raw.setdefault(block, {})
                if name:
                    data = data.setdefault(name, {})
                data[line[0]] = " ".join(line[1:])
Пример #5
0
def read(stage, dset):
    """Read the GNSS RINEX data.

    Args:
        stage (str):          Name of current stage.
        dset (Dataset):       A dataset containing the data.
    """
    dset.vars.update(file_vars())
    station = dset.vars["station"]
    sampling_rate = config.tech.sampling_rate.float

    # Read GNSS observation data either from Android raw file or RINEX file
    # TODO: Maybe a gnss.py 'obs' modul should be added to ./where/obs?
    if config.tech.format.str == "android":
        parser = parsers.parse_key("gnss_android_raw_data",
                                   rundate=dset.analysis["rundate"],
                                   station=station)
    else:
        version, file_path = gnss.get_rinex_file_version("gnss_rinex_obs")
        log.info(f"Read RINEX file {file_path} with format version {version}.")
        if version.startswith("2"):
            parser = parsers.parse_key("rinex2_obs",
                                       file_path=file_path,
                                       sampling_rate=sampling_rate)
        elif version.startswith("3"):
            parser = parsers.parse_file("rinex3_obs",
                                        file_path=file_path,
                                        sampling_rate=sampling_rate)
        else:
            log.fatal(
                f"Unknown RINEX format {version} is used in file {file_path}")

    dset.update_from(parser.as_dataset())

    # Select GNSS observation to process
    cleaners.apply_remover("gnss_select_obs", dset)

    # Overwrite station coordinates given in RINEX header
    # TODO: Should be a apriori function with, where a station coordinate can be select for a given station.
    #      "check_coordinate"/"limit" -> station coordinate given in RINEX header and "database" could be checked
    #                                 -> warning could be given

    p = parsers.parse_key(parser_name="gnss_bernese_crd",
                          file_key="gnss_station_crd")
    sta_crd = p.as_dict()

    if station in sta_crd:
        pos = np.array([
            sta_crd[station]["pos_x"], sta_crd[station]["pos_y"],
            sta_crd[station]["pos_z"]
        ])

        # Check station coordinates against RINEX header station coordinates
        limit = 10
        diff = pos - dset.site_pos.trs[0].val
        if not (diff < limit).all():
            log.warn(
                f"Difference between station database (xyz: {pos[0]:.3f} m, {pos[1]:.3f} m, {pos[2]:.3f} m) "
                f"and RINEX header (xyz: {dset.site_pos.trs.x[0]:.3f} m, {dset.site_pos.trs.y[0]:.3f} m, "
                f"{dset.site_pos.trs.z[0]:.3f} m) station coordinates exceeds the limit of {limit} m "
                f"(xyz: {diff[0]:.3f} m, {diff[1]:.3f} m, {diff[2]:.3f} m).")

        # pos = apriori.get("gnss_station_coord", rundate=dset.analysis["rundate"], station=station)
        dset.site_pos[:] = np.repeat(pos[None, :], dset.num_obs, axis=0)
        dset.meta["pos_x"] = sta_crd[station]["pos_x"]
        dset.meta["pos_y"] = sta_crd[station]["pos_y"]
        dset.meta["pos_z"] = sta_crd[station]["pos_z"]

    # Write dataset to file
    dset.write_as(stage=stage)