Esempio n. 1
0
    def __init__(
        self,
        rundate: datetime.date,
        **kwargs: Dict[str, Any],
    ) -> None:
        """Set up a new AprioriOrbit object, does not parse any data

        TODO: Remove dependency on rundate, use time to read correct files. (What to do with dataset?)

        Args:
            rundate:    Date of model run.
        """
        # MURKS: Should it be done like that. The technique is normally not given for unittest routines (like
        #       test_broadcast.py).
        try:
            pipeline = config.analysis.pipeline.str
        except (AttributeError, exceptions.MissingSectionError):
            pipeline = None

        self.rundate = rundate
        self.pipeline = pipeline

        self._dset_raw = dataset.Dataset(rundate=rundate,
                                         pipeline=pipeline,
                                         stage=self.name,
                                         label="raw")
        self._dset_edit = dataset.Dataset(rundate=rundate,
                                          pipeline=pipeline,
                                          stage=self.name,
                                          label="edit")
        self._dset = None
Esempio n. 2
0
def _get_outliers_dataset(dset: "Dataset") -> "Dataset":
    """Get dataset with outliers

    Args:
       dset:        A dataset containing the data.

    Returns:
       Dataset with outliers or status 1 if no data for "calculate" stage are available
    """

    # Get Dataset where no outliers are rejected
    dset_vars = {**dset.vars, **dset.analysis}
    dset_vars["stage"] = "calculate"
    dset_complete = dataset.Dataset.read(**dset_vars)

    if dset_complete.num_obs == 0:
        # NOTE: This is the case for concatencated Datasets, where "calculate" stage data are not available.
        return 1

    # Get relative complement, which corresponds to "outlier" dataset
    #dset_outliers = dset_complete.complement_with(dset, complement_by=["time", "satellite"])
    dset_outliers = dataset.Dataset(
        num_obs=0
    )  #MURKS: complement_with does not exists so far in Dataset v3.

    return dset_outliers
Esempio n. 3
0
def _get_outliers_dataset(dset: "Dataset") -> "Dataset":
    """Get dataset with outliers

    Args:
       dset:        A dataset containing the data.

    Returns:
       Dataset with outliers or status 1 if no data for "calculate" stage are available
    """

    # Get Dataset where no outliers are rejected
    dset_complete = dataset.Dataset(
        rundate=dset.analysis["rundate"],
        pipeline=dset.vars["pipeline"],
        stage="calculate",
        station=dset.vars["station"],
    )

    if dset_complete.num_obs == 0:
        # NOTE: This is the case for concatencated Datasets, where "calculate" stage data are not available.
        return 1

    # Get relative complement, which corresponds to "outlier" dataset
    dset_outliers = dset_complete.complement_with(dset, complement_by=["time", "satellite"])

    return dset_outliers
Esempio n. 4
0
    def calculate_orbit(self, dset: "Dataset", time: str = "time") -> None:
        """Set Dataset representing calculated apriori orbit

        Args:
            dset:   A dataset containing the data.
            time:   Define time fields to be used. It can be for example 'time' or 'sat_time'. 'time' is related to 
                    observation time and 'sat_time' to satellite transmission time.
        """
        if not dset.num_obs:
            log.fatal(
                f"Dataset is empty. No observation epochs given for calculating orbits."
            )

        # TODO: Getting of 'id' and 'profile' -> Should it be done like that?
        try:
            profile = config.analysis.profile.str
        except exceptions.MissingEntryError:
            profile = None

        try:
            id_ = config.analysis.id.str
        except exceptions.MissingEntryError:
            id_ = None

        self._dset = dataset.Dataset(
            rundate=self.rundate,
            pipeline=self.pipeline,
            stage=self.name,
            label="orbit",
            profile=profile,
            id=id_,
        )
        self._calculate(self._dset, dset, time=time)
Esempio n. 5
0
def _get_outliers_dataset(dset: "Dataset") -> Union["Dataset", Enum]:
    """Get dataset with outliers

    Args:
       dset:        A dataset containing the data.

    Returns:
       Dataset with outliers or error exit status if no data for "calculate" stage are available
    """

    # Get Dataset where no outliers are rejected
    file_vars = {**dset.vars, **dset.analysis}
    file_vars["stage"] = "calculate"

    try:
        dset_complete = dataset.Dataset.read(**file_vars)
    except OSError:
        log.warn(f"Could not read dataset {config.files.path('dataset', file_vars=file_vars)}.")
        return enums.ExitStatus.error

    # Get relative complement, which corresponds to "outlier" dataset
    # dset_outliers = dset_complete.complement_with(dset, complement_by=["time", "satellite"])
    dset_outliers = dataset.Dataset(num_obs=0)  # MURKS: complement_with does not exists so far in Dataset v3.

    return dset_outliers
Esempio n. 6
0
    def __init__(self, rundate: datetime.date, **kwargs: Dict[str,
                                                              Any]) -> None:
        """Set up a new AprioriOrbit object, does not parse any data

        TODO: Remove dependency on rundate, use time to read correct files. (What to do with dataset?)

        Args:
            rundate:    Date of model run.
        """
        # MURKS: Should it be done like that. The technique is normally not given for unittest routines (like
        #       test_broadcast.py).
        try:
            pipeline = config.analysis.pipeline.str
        except exceptions.MissingEntryError:
            pipeline = None

        # TODO: Getting of 'id' and 'profile' -> Should it be done like that?
        try:
            profile = config.analysis.profile.str
        except exceptions.MissingEntryError:
            profile = None

        try:
            id_ = config.analysis.id.str
        except exceptions.MissingEntryError:
            id_ = None

        self.rundate = rundate
        self.pipeline = pipeline

        self._dset_raw = dataset.Dataset(
            rundate=rundate,
            pipeline=pipeline,
            stage=self.name,
            label="raw",
            profile=profile,
            id=id_,
        )
        self._dset_edit = dataset.Dataset(
            rundate=rundate,
            pipeline=pipeline,
            stage=self.name,
            label="edit",
            profile=profile,
            id=id_,
        )
        self._dset = None
Esempio n. 7
0
 def read_dset(rundate):
     with Timer(f"Finish read of day {rundate} in", logger=log.time):
         try:
             log.info(f"Reading data for {rundate}")
             return dataset.Dataset.read(**dict(dset_vars, rundate=rundate))
         except OSError as err:
             log.warn(f"Unable to read data for {rundate}: {err}")
             return dataset.Dataset()
Esempio n. 8
0
    def as_dataset(self) -> "Dataset":
        """Return the parsed data as a Dataset

        Returns:
            A dataset containing the data.
        """

        # Initialize dataset
        dset = dataset.Dataset()
        if not self.data:
            log.warn("No data in {self.file_path}.")
            return dset
        dset.num_obs = len(self.data["year"])

        # Add time
        epochs = list()
        for year, doy, seconds in zip(self.data["year"], self.data["doy"],
                                      self.data["seconds"]):
            epochs.append(
                datetime.strptime("{:.0f} {:.0f}".format(year, doy), "%Y %j") +
                timedelta(seconds=seconds))

        dset.add_time("time",
                      val=epochs,
                      scale="gps",
                      fmt="datetime",
                      write_level="operational")

        # Add system field
        if "system" in self.data.keys():
            systems = []
            for system in self.data["system"]:
                systems.append(enums.gnss_name_to_id[system.lower()].value)

            dset.add_text("system", val=systems)

        # Add satellite field
        if "satellite" in self.data.keys():
            satellites = []
            for system, satellite in zip(dset.system, self.data["satellite"]):
                satellites.append(system + str(satellite).zfill(2))

            dset.add_text("satellite", val=satellites)

        # Add text and float fields
        fields = set(self.data.keys()) - {
            "year", "doy", "seconds", "system", "satellite"
        }
        for field in fields:
            if self.data[field].dtype.kind in {
                    "U", "S"
            }:  # Check if numpy type is string
                dset.add_text(field, val=self.data[field])
                continue

            dset.add_float(field, val=self.data[field])

        return dset
Esempio n. 9
0
def _concatenate_datasets(from_date: date, to_date: date, dset_vars: Dict[str,
                                                                          str],
                          only_for_rundate: bool) -> np.ndarray:
    """Concatenate datasets

    Args:
        from_date:         Start date for reading Dataset.
        to_date:           End date for reading Dataset.
        dset_vars:         Common Dataset variables.
        only_for_rundate:  Concatenate only data for given rundate.
    """
    merged_vars = dset_vars.copy()
    merged_vars["id"] += "_concatenated"
    dset_merged = dataset.Dataset(
        **dict(merged_vars, rundate=from_date, empty=True))

    date_to_read = from_date
    while date_to_read <= to_date:
        dset = dataset.Dataset().read(**dict(dset_vars, rundate=date_to_read))

        current_date = date_to_read
        date_to_read += timedelta(days=1)

        if dset.num_obs == 0:
            log.info(f"No data to read for {current_date}")
            continue

        if only_for_rundate:
            _keep_data_only_for_rundate(dset)

            if dset.num_obs == 0:
                log.info(f"No data to read for {current_date}")
                continue

        log.info(f"Reading data for {current_date}")
        if not dset_merged:
            dset_merged.update_from(dset)
        else:
            dset_merged.extend(dset)

    return dset_merged
Esempio n. 10
0
    def __init__(
        self,
        rundate: datetime,
        sampling_rate: Union[float, None] = None,
        systems: Union[List[str], None] = None,
        satellites: Union[List[str], None] = None,
        site_pos: Union[List[float], None] = None,
        station: Union[str, None] = None,
    ):
        """Set up a new GnssSimulate object

        TODO: Remove dependency on rundate, use time to read correct files. (What to do with dataset?)

        Args:
            rundate (datetime.date):  Date of model run.
            time (TimeTable):         Time epochs at the satellite for which to calculate the apriori orbit.
            satellite (list):         Strings with names of satellites.
        """
        # MURKS: Should it be done like that. The technique is normally not given for unittest routines (like
        #       test_broadcast.py).
        try:
            pipeline = config.analysis.pipeline.str
        except:
            pipeline = "gnss"

        # +MURKS
        systems = ["G"]
        satellites = ["G01", "G02"]
        site_pos = [3348186.1150, 465040.8615, 5390738.0919]
        station = "krss"
        # -MURKS
        self.sampling_rate = sampling_rate if sampling_rate else config.tech.simulate.sampling_rate.float
        self.systems = systems if systems else config.tech.simulate.systems.list
        self.satellites = satellites if satellites else config.tech.simulate.satellites.list
        self.site_pos = np.array(site_pos) if site_pos else np.array(config.tech.simulate.site_pos.list)
        self.station = station if station else config.tech.simulate.station.str
        # TODO: Generate station position

        self._dset_raw = dataset.Dataset(rundate=rundate, pipeline=pipeline, stage="simulate", label="raw")
        self._dset_orbit = dataset.Dataset(rundate=rundate, pipeline=pipeline, stage="simulate", label="orbit")
        self._dset = dataset.Dataset(rundate=rundate, pipeline=pipeline, stage="simulate", label="simulate")
Esempio n. 11
0
    def dset(self) -> "Dataset":
        """Dataset representing calculated apriori orbit

        Calculates data from `dset_edit` if the data are not already present.
        """
        if self._dset == None:
            self._dset = dataset.Dataset(rundate=self.rundate,
                                         pipeline=self.pipeline,
                                         stage=self.name,
                                         label="orbit")
            self._calculate(self._dset, self._dset_edit)

        return self._dset
Esempio n. 12
0
def run_stage(rundate, pipeline, dset, stage, prev_stage, **kwargs):
    # Skip stages where no dependencies have changed
    dep_path = config.files.path("depends",
                                 file_vars={
                                     **kwargs, "stage": stage
                                 })
    if not (dependencies.changed(dep_path)
            or util.check_options("-F", "--force")):
        log.info(
            f"Not necessary to run {stage} for {pipeline.upper()} {rundate.strftime(config.FMT_date)}"
        )
        return

    if dset is None:
        try:
            # Read dataset from disk if it exists
            dset = dataset.Dataset.read(rundate=rundate,
                                        pipeline=pipeline,
                                        stage=prev_stage,
                                        label="last",
                                        **kwargs)
        except (OSError, ValueError):
            # Create emtpy dataset
            dset = dataset.Dataset(rundate=rundate,
                                   pipeline=pipeline,
                                   **kwargs)

    # Set up dependencies. Add dependencies to previous stage and config file
    dependencies.init(dep_path)
    if prev_stage is not None:
        dependencies.add(config.files.path("depends",
                                           file_vars={
                                               **kwargs, "stage": prev_stage
                                           }),
                         label="depends")
    dependencies.add(*config.tech.sources, label="config")
    # Delete old datasets for this stage
    dset.delete_stage(stage, **kwargs)

    # Call the current stage. Skip rest of stages if current stage returns False (compare with is since by
    # default stages return None)
    plugins.call(package_name=__name__,
                 plugin_name=pipeline,
                 part=stage,
                 stage=stage,
                 dset=dset,
                 plugin_logger=log.info)
    dependencies.write()

    return dset
Esempio n. 13
0
    def __init__(
        self,
        rundate: datetime.date,
        time: Union[None, "TimeTable"] = None,
        satellite: Union[None, List[str]] = None,
        **kwargs: Dict[str, Any],
    ) -> None:
        """Set up a new AprioriOrbit object, does not parse any data

        TODO: Remove dependency on rundate, use time to read correct files. (What to do with dataset?)

        Args:
            rundate:    Date of model run.
            time:       Time epochs at the satellite for which to calculate the apriori orbit.
            satellite:  Strings with names of satellites.
        """
        # MURKS: Should it be done like that. The technique is normally not given for unittest routines (like
        #       test_broadcast.py).
        try:
            pipeline = config.analysis.pipeline.str
        except AttributeError:
            pipeline = None

        self.rundate = rundate
        self.pipeline = pipeline
        self.time = time
        self.satellite = satellite

        self._dset_raw = dataset.Dataset(rundate=rundate,
                                         pipeline=pipeline,
                                         stage=self.name,
                                         label="raw")
        self._dset_edit = dataset.Dataset(rundate=rundate,
                                          pipeline=pipeline,
                                          stage=self.name,
                                          label="edit")
        self._dset = None
Esempio n. 14
0
    def _read(self, dset_raw):
        """Read SP3 orbit file data and save it in a Dataset

        In addition to the given date, we read data for the day before and after. This is needed to carry out correct
        orbit interpolation at the start and end of a day.

        TODO:
        How well fits the orbits from day to day? Is it necessary to align the orbits?

        Args:
            dset_raw (Dataset):   Dataset representing raw data from apriori orbit files
        """
        date_to_read = dset_raw.analysis["rundate"] - timedelta(
            days=self.day_offset)
        file_paths = list()

        # Loop over days to read
        while date_to_read <= dset_raw.analysis["rundate"] + timedelta(
                days=self.day_offset):
            if self.file_path is None:
                file_path = config.files.path(
                    self.file_key, file_vars=config.date_vars(date_to_read))
            else:
                file_path = self.file_path

            log.debug(f"Parse precise orbit file {file_path}")

            # Generate temporary Dataset with orbit file data
            dset_temp = dataset.Dataset(rundate=date_to_read,
                                        pipeline=dset_raw.vars["pipeline"],
                                        stage="temporary")
            parser = parsers.parse(parser_name="orbit_sp3",
                                   file_path=file_path,
                                   rundate=date_to_read)
            parser.write_to_dataset(dset_temp)
            file_paths.append(str(parser.file_path))
            dependencies.add(str(parser.file_path),
                             label=self.file_key)  # Used for output writing

            # Extend Dataset dset_raw with temporary Dataset
            date = date_to_read.strftime("%Y-%m-%d")
            dset_raw.update_from(
                dset_temp) if dset_raw.num_obs == 0 else dset_raw.extend(
                    dset_temp, meta_key=date)
            dset_raw.meta.add("file_path", file_paths, section="parser")

            date_to_read += timedelta(days=1)

        return dset_raw
Esempio n. 15
0
    def calculate_orbit(self, dset: "Dataset", time: str = "time") -> None:
        """Set Dataset representing calculated apriori orbit

        Args:
            dset:   A dataset containing the data.
            time:   Define time fields to be used. It can be for example 'time' or 'sat_time'. 'time' is related to 
                    observation time and 'sat_time' to satellite transmission time.
        """
        if not dset.num_obs:
            log.fatal(
                f"Dataset is empty. No observation epochs given for calculating orbits."
            )

        self._dset = dataset.Dataset(rundate=self.rundate,
                                     pipeline=self.pipeline,
                                     stage=self.name,
                                     label="orbit")
        self._calculate(self._dset, dset, time=time)
Esempio n. 16
0
    def _read(self, dset_raw, rundate, provider, version):
        """Read SP3 orbit file data and save it in a Dataset

        Naming convention correspond to end of arc, at midnight, hence we add day_offset,
        which is usually arc_length - 1

        Args:
            dset_raw (Dataset):   Dataset representing raw data from apriori orbit files
            rundate:              Date of run, datetime object
            provider:             Str: Orbit provider
            version:              Str: Orbit version
        """
        date_to_read = rundate + timedelta(days=self.day_offset)
        file_vars = config.date_vars(date_to_read)
        file_vars["provider"] = provider
        file_vars["version"] = version
        file_vars["sat_name"] = self.sat_name

        if self.file_path is None:
            file_path = config.files.path(self.file_key, file_vars)
        else:
            file_path = self.file_path

        log.debug(f"Parse precise orbit file {file_path}")

        # Generate dataset with external orbit file data
        dset_orbit = dataset.Dataset(tech=dset_raw.meta["tech"],
                                     stage="orbit",
                                     dataset_name="",
                                     dataset_id=0,
                                     empty=True)
        parser = parsers.parse(parser_name="orbit_sp3",
                               file_path=file_path,
                               rundate=date_to_read)
        parser.write_to_dataset(dset_orbit)

        return dset_orbit
Esempio n. 17
0
def add_to_full_timeseries(dset):
    """Write some key variables to the full timeseries

    Args:
        dset:  Dataset, data for a model run.
    """
    dset_id = config.tech.timeseries.dataset_id.str.format(**dset.vars)
    try:
        # Read existing dataset
        dset_ts = dataset.Dataset.read(
            rundate=date(1970, 1, 1),
            pipeline=dset.vars["pipeline"],
            stage="timeseries",
            label=dset_id,
            session="",
            use_options=False,
            id=dset.analysis["id"],
        )
    except OSError:
        # Start new timeseries dataset
        dset_ts = dataset.Dataset(
            rundate=date(1970, 1, 1),
            pipeline=dset.vars["pipeline"],
            stage="timeseries",
            label=dset_id,
            session="",
            use_options=False,
            id=dset.analysis["id"],
        )
    dset_session = dataset.Dataset()

    # Add data to dset_session
    idx_fields = config.tech[WRITER].index.list
    field_values = [["all"] + list(dset.unique(f)) for f in idx_fields]
    idx_values = dict(zip(idx_fields, zip(*itertools.product(*field_values))))
    # TODO: Remove combinations where filter leaves 0 observations

    num_obs = len(idx_values[idx_fields[0]])  # Length of any (in this case the first) field
    mean_epoch = dset.time.mean.utc
    rundate_str = dset.analysis["rundate"].strftime(config.FMT_date)
    session = dset.vars.get("session", "")
    status = dset.meta.get("analysis_status", "unchecked")
    session_type = dset.meta.get("input", dict()).get("session_type", "")

    dset_session.num_obs = num_obs
    dset_session.add_time("time", val=[mean_epoch] * num_obs, scale=mean_epoch.scale, fmt=mean_epoch.fmt)
    dset_session.add_text("rundate", val=[rundate_str] * num_obs)
    dset_session.add_text("session", val=[session] * num_obs)
    dset_session.add_text("status", val=[status] * num_obs)
    dset_session.add_text("session_type", val=[session_type] * num_obs)

    for field, value in idx_values.items():
        dset_session.add_text(field, val=value)

    default_dset_str = f"{dset.vars['stage']}/{dset.vars['label']}"
    dsets = {default_dset_str: dset}
    for method, cfg_entry in config.tech[WRITER].items():
        try:
            method_func = getattr(sys.modules[__name__], f"method_{method}")
        except AttributeError:
            log.warn(f"Method {method!r} is unknown")
            continue

        for field_cfg in cfg_entry.as_list(split_re=", *"):
            field_out = re.sub("[ -/:]", "_", field_cfg)
            func, _, field_dset = field_cfg.rpartition(":")
            field_in, _, dset_str = field_dset.partition("-")
            func = func if func else field_in
            dset_str = dset_str if dset_str else default_dset_str
            if dset_str not in dsets:
                stage, _, dset_id = dset_str.partition("/")
                dset_id = int(dset_id) if dset_id else "last"
                dsets[dset_str] = dataset.Dataset.read(
                    rundate=dset.analysis["rundate"],
                    pipeline=dset.vars["pipeline"],
                    stage=stage,
                    session=dset.vars["session"],
                    label=dset_id,
                    id=dset.analysis["id"],
                )

            val, adder, unit = method_func(dsets[dset_str], field_in, idx_values, func)
            if adder:
                add_func = getattr(dset_session, adder)
                add_func(field_out, val=val, unit=unit)

    # hack to get solved neq data into the time series:
    # TODO: unhack this :P Add as a method_neq instead?
    if "normal equation" in dset.meta:
        _add_solved_neq_fields(dset, dset_session, idx_values)

    # Filter timeseries dataset to remove any previous data for this rundate and session

    if dset_ts.num_obs > 0:
        keep_idx = np.logical_not(dset_ts.filter(rundate=rundate_str, session=session))
        dset_ts.subset(keep_idx)

    # Extend timeseries dataset with dset_session and write to disk
    dset_ts.extend(dset_session)
    dset_ts.write()
    log.info(f"Updating timeseries dataset")
Esempio n. 18
0
    def as_dataset(self) -> "Dataset":
        """Return the parsed data as a Dataset

        Returns:
            A dataset containing the data.
        """
        # Spring constellation definition
        system_def = {
            "0": "",  # Unknown
            "1": "G",  # GPS
            "2": "R",  # GLONASS
            "3": "S",  # SBAS
            "4": "E",  # Galileo
            "5": "C",  # BeiDou
            "6": "J",  # QZSS
        }

        field_spring_to_where = {
            "3DSpeed": "site_vel_3d",
            "Clock": "delay.gnss_satellite_clock",
            "EastSpeed": "site_vel_east",
            "GroupDelay": "delay.gnss_total_group_delay",
            "HSpeed": "site_vel_h",
            "IODE": "used_iode",
            "NorthSpeed": "site_vel_north",
            "PseudoRange": "delay.gnss_range",
            "SatInView": "num_satellite_available",
            "TropoDelay": "troposphere_dT",
            "UISD": "delay.gnss_ionosphere",
            "UsedSat": "num_satellite_used",
            "EastvsRef": "site_pos_vs_ref_east",
            "NorthvsRef": "site_pos_vs_ref_north",
            "VerticalvsRef": "site_pos_vs_ref_up",
            "VerticalSpeed": "site_vel_up",
            "XSpeed": "site_vel_x",
            "YSpeed": "site_vel_y",
            "ZSpeed": "site_vel_z",
        }

        # Initialize dataset
        dset = dataset.Dataset()
        if not self.data:
            log.warn("No data in {self.file_path}.")
            return dset
        dset.num_obs = len(self.data["GPSEpoch"])

        # Add time
        dset.add_time(
            "time",
            val=[
                dateutil.parser.parse(v.replace("UTC", ""))
                for v in self.data["UTCDateTime"]
            ],
            scale="utc",
            fmt="datetime",
            write_level="operational",
        )

        # Add system field based on Constellation column
        if "Constellation" in self.data.keys():
            dset.add_text("system",
                          val=[
                              system_def[str(value)]
                              for value in self.data["Constellation"]
                          ])

        # Add satellite field based on PRN column
        if "PRN" in self.data.keys():
            prn_data = []
            for prn in self.data["PRN"]:
                if prn >= 1 and prn <= 32:  # Handling of GPS satellites
                    prn_data.append("G" + str(prn).zfill(2))
                elif prn >= 38 and prn <= 70:  # Handling of GLONASS satellites
                    prn_data.append("R" + str(prn - 38).zfill(2))
                elif prn >= 71 and prn <= 140:  # Handling of Galileo satellites
                    prn_data.append("E" + str(prn - 70).zfill(2))
                elif prn >= 191 and prn <= 222:  # Handling of BeiDou satellites
                    prn_data.append("C" + str(prn - 191).zfill(2))
                else:
                    log.fatal(f"Spring PRN number '{prn}' is unknown.")

            dset.add_text("satellite", val=prn_data)
            dset.add_text("system", np.array(prn_data).astype("U1"))

        # Add position field based on Latitude, Longitude and Height column
        if "Latitude" in self.data.keys():
            pos = Position(
                val=np.vstack((self.data["Latitude"] * Unit.deg2rad,
                               self.data["Longitude"] * Unit.deg2rad,
                               self.data["Height"])).T,
                system="llh",
            )
            if "XPos" in self.data.keys():
                dset.add_position("sat_pos",
                                  val=pos.trs,
                                  system="trs",
                                  time=dset.time)
            else:
                dset.add_position("site_pos",
                                  val=pos.trs,
                                  system="trs",
                                  time=dset.time)

        # Define fields to save in dataset
        remove_time_fields = {
            "Constellation", "GPSEpoch", "GPSWeek", "GPSSecond", "PRN", "",
            "UTCDateTime"
        }
        fields = set(self.data.keys()) - remove_time_fields

        # Add text and float fields
        for field in fields:

            where_fieldname = field_spring_to_where[
                field] if field in field_spring_to_where.keys(
                ) else field.lower()

            if self.data[field].dtype.kind in {
                    "U", "S"
            }:  # Check if numpy type is string
                dset.add_text(where_fieldname, val=self.data[field])
                continue

            dset.add_float(where_fieldname, val=self.data[field])

        return dset
Esempio n. 19
0
def compare(date: "datedoy", pipeline: "pipeline", items: "option",
            specifier: "option"):
    log.init(log_level="info")
    dsets = dict()

    # Additional options
    stage = util.read_option_value("--stage")
    writer_names = util.read_option_value("--writers").replace(",",
                                                               " ").split()
    items_ = [s.strip() for s in items.split(",")]

    # Get optional options
    label = util.read_option_value("--label", default="None")
    # TODO label = "last" if label == "last" else label
    station = util.read_option_value("--station", default="")
    id_ = util.read_option_value("--id", default="")

    # Update configuration of Where analysis
    config.where.update_from_options(_clean_sys_argv(pipeline))

    # Get dataset variables
    dset_vars = config.create_file_vars(rundate=date, pipeline=pipeline)

    # Read datasets for given specifier
    if specifier == "id":
        for id_ in items_:
            try:
                dset = dataset.Dataset().read(rundate=date,
                                              pipeline=pipeline,
                                              stage=stage,
                                              label=label,
                                              id=id_,
                                              station=station)
            except OSError:
                log.warn(f"No data to read for Dataset id '{id_}'.")
                continue

            dset.vars.update(dset_vars)
            dset.vars["id"] = id_
            dsets.update({id_: dset})

    elif specifier == "station":
        for station in items_:

            try:
                dset = dataset.Dataset().read(rundate=date,
                                              pipeline=pipeline,
                                              stage=stage,
                                              label=label,
                                              id=id_,
                                              station=station)
            except OSError:
                log.warn(f"No data to read for Dataset station '{station}'.")
                continue

            dset.vars.update(dset_vars)
            dset.vars["station"] = station
            dsets.update({station: dset})

    elif specifier == "stage":
        for stage in items_:

            try:
                dset = dataset.Dataset().read(rundate=date,
                                              pipeline=pipeline,
                                              stage=stage,
                                              label=label,
                                              id=id_,
                                              station=station)
            except OSError:
                log.warn(f"No data to read for Dataset stage '{stage}'.")
                continue
            dset.vars.update(dset_vars)
            dset.vars["stage"] = stage
            dsets.update({stage: dset})
    else:
        log.fatal(
            f"Specifier {specifier} is not defined. It should be either 'id', 'station' or 'stage'."
        )

    if len(dsets) == 0:
        log.fatal(f"All given datasets are empty [{', '.join(dsets.keys())}].")
    elif len(dsets) == 1:
        log.warn(
            f"Nothing to compare. Only dataset '{list(dsets.keys())[0]}' is available."
        )

    # Loop over writers
    for writer in writer_names:
        write(writer, dset=dsets)
Esempio n. 20
0
    def satellite_clock_correction(self,
                                   dset: "Dataset",
                                   time: str = "time") -> np.ndarray:
        """Determine satellite clock correction based on precise satellite clock product

        The GNSS satellite clock bias is read from RINEX clock files. Afterwards the satellite clock bias is determined
        via a cubic interpolation for the observation time.

        TODO:
            * Beware of the extrapolation (bounds_error=False in interpolate).
            * Check for satellite clock interpolation in:
              "Hesselbarth, A.: Statische und kinematische GNSS-Auswertung mittels PPP, 2011"

        Args:
            dset: A Dataset containing model data.
            time: Define time fields to be used. It can be for example 'time' or 'sat_time'. 'time' is related to 
                  observation time and 'sat_time' to satellite transmission time.

        Returns:
            GNSS satellite clock corrections for each observation
        """
        correction = np.zeros(dset.num_obs)
        sat_transmission_time = dset[time].gps.gps_ws.seconds

        # Get precise GNSS satellite clock values
        clock_product = config.tech.get("clock_product", default="clk").str
        if clock_product == "sp3":
            all_sat_clk = self.dset_edit
        elif clock_product == "clk":

            # TODO: File path information has to be improved, because 3 consecutive days are read.
            log.info(
                f"Calculating satellite clock correction (precise) based on RINEX clock file "
                f"{config.files.path(file_key='gnss_rinex_clk')}")

            all_sat_clk = dataset.Dataset(rundate=dset.analysis["rundate"])
            parser = parsers.parse("rinex_clk",
                                   rundate=dset.analysis["rundate"])
            parser.write_to_dataset(
                all_sat_clk
            )  # TODO Read RINEX clock file, from day before and day after.
            #     Needed for interpolation. Add handling if these clk-files
            #     are not available. Remove first and last observations?
            #     If precise clock products are not available broadcast
            #     ephemeris should be used.
        else:
            log.fatal(
                f"Unknown clock product {clock_product!r}. "
                "Configuration option 'clock_product' can only be 'sp3' or 'clk'"
            )

        # Loop over all satellites given in configuration file
        for sat in dset.unique("satellite"):

            # Skip satellites, which are not given in RINEX clock file
            if sat not in all_sat_clk.unique("satellite"):
                # TODO: Maybe satellite is available in SP3 file, which includes also
                #      satellite clock bias, but only for every 15 min instead of
                #      every 5 min (or 30 s by use of igs<wwwwd>.clk_30s).
                continue

            idx = dset.filter(satellite=sat)
            clk_idx = all_sat_clk.filter(satellite=sat)

            # Interpolation of GNSS precise satellite clock values
            # TODO: Check if interpolation method is ok.
            sat_clock_bias_ip = interpolate.interp1d(
                all_sat_clk.time.gps.gps_ws.seconds[clk_idx],
                all_sat_clk.sat_clock_bias[clk_idx],
                axis=0,
                kind="cubic",
                bounds_error=False,
                fill_value=all_sat_clk.sat_clock_bias[clk_idx][-1],
            )
            correction[idx] = sat_clock_bias_ip(sat_transmission_time[idx])

        return correction
Esempio n. 21
0
def main(date: "datedoy", pipeline: "pipeline", items: "option",
         specifier: "option"):
    log.init(log_level="info")
    dsets = dict()

    # Additional options
    stage = util.read_option_value("--stage")
    writer_names = util.read_option_value("--writers").replace(",",
                                                               " ").split()
    items_ = [s.strip() for s in items.split(",")]

    # Get optional options
    label = util.read_option_value("--label", default="None")
    # TODO label = "last" if label == "last" else label
    station = util.read_option_value("--station", default="")
    id_ = util.read_option_value("--id", default="")

    # Get dataset variables
    dset_vars = dict(pipeline=pipeline,
                     stage=stage,
                     station=station,
                     label=label,
                     id=id_)
    dset_vars = config.create_file_vars(rundate=date, **dset_vars)

    # Read datasets for given specifier
    if specifier == "id":
        for id_ in items_:
            dset = dataset.Dataset().read(rundate=date,
                                          pipeline=pipeline,
                                          stage=stage,
                                          label=label,
                                          id=id_,
                                          station=station)
            if dset.num_obs == 0:
                log.warn(f"Dataset '{id_}' is empty.")
                continue
            dset_vars[
                "id"] = id_  #TODO: Better solution for handling of dataset variables?
            dset.vars.update(
                dset_vars
            )  # Necessary for example for getting correct file path in used writers.
            dsets.update({id_: dset})

    elif specifier == "station":
        for station in items_:
            dset = dataset.Dataset().read(rundate=date,
                                          pipeline=pipeline,
                                          stage=stage,
                                          label=label,
                                          id=id_,
                                          station=station)
            if dset.num_obs == 0:
                log.warn(f"Dataset '{station}' is empty.")
                continue
            dset_vars[
                "station"] = station  #TODO: Better solution for handling of dataset variables?
            dset.vars.update(
                dset_vars
            )  # Necessary for example for getting correct file path in used writers.
            dsets.update({station: dset})

    elif specifier == "stage":
        for stage in items_:
            dset = dataset.Dataset().read(rundate=date,
                                          pipeline=pipeline,
                                          stage=stage,
                                          label=label,
                                          id=id_,
                                          station=station)
            if dset.num_obs == 0:
                log.warn(f"Dataset '{stage}' is empty.")
                continue
            dset_vars[
                "stage"] = stage  #TODO: Better solution for handling of dataset variables?
            dset.vars.update(
                dset_vars
            )  # Necessary for example for getting correct file path in used writers.
            dsets.update({stage: dset})
    else:
        log.fatal(
            f"Specifier {specifier} is not defined. It should be either 'id', 'station' or 'stage'."
        )

    if len(dsets) == 0:
        log.fatal(f"All given datasets are empty [{', '.join(dsets.keys())}].")
    elif len(dsets) == 1:
        log.warn(
            f"Nothing to compare. Only dataset '{list(dsets.keys())[0]}' is available."
        )

    # Loop over writers
    for writer in writer_names:
        write(writer, dset=dsets)
Esempio n. 22
0
    def setUp(self):
        """

        The first test set up is based on the bc_velo.c program, which is published in :cite:`remondi2004` and
         following RINEX navigation file sample:

        /* Sample Broadcast Message in unit of radians, seconds, meters.
        20 01  7 23  2  0  0.0 -.857324339449D-04 -.272848410532D-11  .000000000000D+00
             .200000000000D+02  .886875000000D+02  .465376527657D-08  .105827953357D+01
             .457651913166D-05  .223578442819D-02  .177137553692D-05  .515379589081D+04
             .936000000000D+05  .651925802231D-07  .164046615454D+01 -.856816768646D-07
             .961685061380D+00  .344968750000D+03  .206374037770D+01 -.856928551657D-08
             .342514267094D-09  .000000000000D+00  .112400000000D+04  .000000000000D+00
             .200000000000D+01  .000000000000D+00 -.651925802231D-08  .276000000000D+03
             .865800000000D+05  .000000000000D+00  .000000000000D+00  .000000000000D+00
        */


        The second test set up compares results from Where against gLAB solution for satellite G20 and epoch
        2016-03-01 00:00:00.0.

        /* Sample Broadcast Message in unit of radians, seconds, meters for satellite G20 and
        /  epoch 2016-03-01 00:00:00.0
        20 16  3  1  0  0  0.0 0.396233052015D-03 0.261479726760D-11 0.000000000000D+00
            0.100000000000D+02-0.231562500000D+02 0.530236372187D-08 0.253477496869D+00
           -0.111199915409D-05 0.483385741245D-02 0.810064375401D-05 0.515369705963D+04
            0.172800000000D+06-0.141561031342D-06 0.304306271006D+00 0.372529029846D-08
            0.926615731710D+00 0.207250000000D+03 0.133849764271D+01-0.843427989304D-08
           -0.164292557730D-09 0.100000000000D+01 0.188600000000D+04 0.000000000000D+00
            0.200000000000D+01 0.000000000000D+00-0.838190317154D-08 0.100000000000D+02
            0.172770000000D+06 0.400000000000D+01 0.000000000000D+00 0.000000000000D+00

        The third test set up compares results from Where against CNES solution for satellite E01 and epoch
        2016-03-01 01:30:00.0.

        /* Sample Broadcast Message in unit of radians, seconds, meters for satellite E01 and
        /  epoch 2019-07-01 00:00:00.0
        E01 2019 07 01 00 00 00-6.374700460583D-04-8.085976332950D-12 0.000000000000D+00
             1.600000000000D+01 2.106250000000D+02 2.413671967697D-09 5.641607228729D-01
             9.929761290550D-06 1.870252890512D-04 7.383525371552D-06 5.440612319946D+03
             8.640000000000D+04-3.725290298462D-09 2.424721177655D-01 1.657754182816D-07
             9.878562635157D-01 1.958125000000D+02 3.073143357419D+00-5.291648989849D-09
             2.003654888840D-10 2.580000000000D+02 2.060000000000D+03 0.000000000000D+00
             3.120000000000D+00 0.000000000000D+00-1.862645149231D-09 0.000000000000D+00
             8.714000000000D+04 0.000000000000D+00 0.000000000000D+00 0.000000000000D+00 

        """
        # Initialize logging
        log.init(log_level="debug")

        # Get GNSS ephemeris data for testing
        if TEST == "test_1":
            file_name = "test2040.01n"
            year = 2001
            month = 7
            day = 23
            hour = 2
            minute = 0
            second = 0
            satellite = "G20"
            self.system = "G"  # GNSS identifier

            # Satellite transmission time
            self.t_sat_gpsweek = 1124.0
            self.t_sat_gpssec = 86400.00

        elif TEST == "test_2":
            file_name = "test0610.16n"
            year = 2016
            month = 3
            day = 1
            hour = 0
            minute = 0
            second = 0
            satellite = "G20"
            self.system = "G"  # GNSS identifier

            # Satellite transmission time
            self.t_sat_gpsweek = 1886.0
            self.t_sat_gpssec = 172799.92312317

        elif TEST == "test_3":
            file_name = "TEST00CNS_R_20191820000_01D_EN.rnx"
            year = 2019
            month = 7
            day = 1
            hour = 1
            minute = 30
            second = 0
            satellite = "E01"
            self.system = "E"  # GNSS identifier

            # Satellite transmission time
            self.t_sat_gpsweek = 2060.0
            self.t_sat_gpssec = 91800.0

        rundate = datetime(year, month, day, hour, minute, second)

        # Initialize configuration
        config.init(rundate=rundate, pipeline="gnss")

        # Generate observation datast
        self.dset = dataset.Dataset(num_obs=1, rundate=rundate)
        self.dset.add_time(name="time",
                           val=rundate,
                           scale="gps",
                           fmt="datetime")
        #self.dset.add_time(
        #    name="time", val=Time(val=[self.t_sat_gpsweek], val2=[self.t_sat_gpssec], fmt="gps_ws", scale="gps")
        #)
        self.dset.add_text(name="satellite", val=[satellite])

        # Get broadcast ephemeris
        self.brdc = apriori.get(
            "orbit",
            rundate=rundate,
            system=tuple({self.system}),
            station="test",
            apriori_orbit="broadcast",
            file_path=pathlib.Path(__file__).parent / "files" / file_name,
        )

        self.idx = 0  # Broadcast ephemeris index