Example #1
0
def parse_clock_breaks(dset):
    """Parses the clock breaks string from the edit file

    Args:
        dset:                A Dataset containing model data.
        clock_breaks_str:    A string with clock break information

    Returns:
        OrderedDict with clock breaks and total number of clock breaks
     """
    station_breaks = {
        s: [min(dset.time.utc), max(dset.time.utc) + TimeDelta(1, fmt="seconds", scale="utc")]
        for s in dset.unique("station")
    }
    
    clock_breaks = config.tech.get("clock_breaks", section=MODEL).as_list(split_re=", *")
    
    if clock_breaks:
        log.info(f"Applying clock breaks: {', '.join(clock_breaks)}")

    for cb in clock_breaks:
        # Station names may contain spaces
        cb = cb.split()
        cb_date = cb[-2:]
        cb_station = " ".join(cb[:-2])
        cb_time = Time(" ".join(cb_date), scale="utc", fmt="iso")
        if cb_station not in station_breaks:
            log.warn(
                f"Station {cb_station} with clock break unknown. Available options are {', '.join(station_breaks)}"
            )
            continue
        station_breaks[cb_station].append(cb_time)
        dset.meta.add_event(cb_time, "clock_break", cb_station)

    # Convert the station_breaks dict to lists of (station, (time_start, time_end))-tuples
    stations = list()
    time_intervals = list()
    for station in sorted(station_breaks.keys(), key=lambda s: (len(station_breaks[s]), s), reverse=True):
        station_times = sorted(station_breaks[station])
        for t_start, t_end in zip(station_times[:-1], station_times[1:]):
            stations.append(station)
            time_intervals.append((t_start, t_end))

    return stations, time_intervals
Example #2
0
    def site(self, key):
        """Positions and information about one site in the reference frame

        Args:
            key (String):  Key specifying which site to calculate position for.

        Returns:
            TrfSite:  Object with positions and information about site.
        """
        if key not in self.data:
            url = self.url["site"].format(site=key)
            log.info("Reading information about {} from {}", key, url)
            db_data = json.loads(requests.get(url).text)
            if not db_data:
                log.warn("No information returned for {}", key)
                self._data[key] = dict(id=None,
                                       provider=None,
                                       siteConfig_id=None)
                return super().site(key)

            site_fields = ("id", "provider", "siteConfig_id")
            site_data = {f: db_data[-1][f] for f in site_fields}

            epoch_data = list()
            epoch_fields = ("year", "doyStart", "doyEnd", "x", "y", "z")
            for epoch in db_data:
                if self.version is not None and epoch["geodeticDatum"][
                        "geodeticDatumName"] != self.version:
                    continue
                epoch_data.append([epoch[f] for f in epoch_fields])
            epoch_data = np.array(epoch_data)

            site_data["time_start"] = np.array([
                datetime.strptime("{:02.0f} {:.0f}".format(y, d), "%y %j")
                for y, d in epoch_data[:, [0, 1]]
            ])
            site_data["time_end"] = np.array([
                datetime.strptime("{:02.0f} {:.0f}".format(y, d), "%y %j")
                for y, d in epoch_data[:, [0, 2]]
            ]) + timedelta(days=1)
            site_data["pos"] = epoch_data[:, [3, 4, 5]]
            self._data[key] = site_data

        return super().site(key)
Example #3
0
def calculate_initial_values(eph):
    """Computing initial values for position and velocity in GCRS system

    This is for later use in orbit integration, from tables in the prediction files.  Use a lagrange polynomial in
    order to interpolate in the tables.

    Args:
        eph:  Dict containing ephemeris information

    Returns:
        eph:  Dict where the initial position and velocity is added
    """
    pos_gcrs = np.empty((3, 0))
    times = np.empty((0))
    table_of_positions = sorted(eph.data["positions"].items())
    mjd1, mjd2 = zip(*[t for t, d in table_of_positions])

    for pos_time, (_, data) in zip(
            time.Time(val=mjd1, val2=mjd2, format="mjd", scale="utc"),
            table_of_positions):
        diffsec = (pos_time.utc.datetime - eph.rundate).total_seconds()
        # Only look at points close to rundate (start of integration)
        # if abs(diffsec) > 4000:
        #    continue
        # Table given in ITRF coordinate system. Convert to GCRS, where the integration of the satellite orbit will
        # be done
        pos_gcrs = np.hstack(
            (pos_gcrs, np.transpose([pos_time.itrs2gcrs @ data["pos"]])))
        times = np.hstack((times, diffsec))

    log.info(
        "Interpolating data from prediction file in order to get initial pos/vel"
    )
    pos_gcrs_ip, vel_gcrs_ip = interpolation.interpolate_with_derivative(
        times,
        np.transpose(pos_gcrs),
        np.array([0.0]),
        kind="lagrange",
        window=10,
        bounds_error=False)
    eph["initial_pos"] = pos_gcrs_ip[0]
    eph["initial_vel"] = vel_gcrs_ip[0]
    return eph
def _markdown_to_pdf(dset):
    """Convert markdown SISRE report file to pdf format

    Args:
       dset (Dataset):           A dataset containing the data.
    """

    if config.where.sisre_report.get("markdown_to_pdf", default=False).bool:
        md_path = str(files.path("output_sisre_comparison_report", file_vars=dset.vars))
        pdf_path = md_path.replace(".md", ".pdf")
        program = "pandoc"

        # Convert markdown to pdf with pandoc
        pandoc_args = ["-f markdown", "-V classoption:twoside", "-N", "-o " + pdf_path, md_path]

        log.info("Start: {} {}".format(program, " ".join(pandoc_args)))
        status = os.system(f"{program} {' '.join(pandoc_args)}")
        if status != 0:
            log.error("{} failed with error code {} ({})", program, status, " ".join([program] + pandoc_args))
Example #5
0
def parse_reference_clock(stations, ref_clock_str):
    """Parses the reference clock string from the edit file

    Args:
        dset:              A Dataset containing model data
        ref_clock_str:     IVS name of reference clock station

    Returns:
        String: IVS name of reference clock station in Dataset
    """
    if ref_clock_str not in stations:
        if ref_clock_str:
            log.warn("Reference clock '{}' unknown. Available options are {}",
                     ref_clock_str, ", ".join(stations))

        # Pick last station as default
        ref_clock_str = stations[-1]
    log.info("Reference clock is '{}'", ref_clock_str)
    return ref_clock_str
Example #6
0
def write_sinex(dset):
    """Write normal equations of session solution in SINEX format.

    Args:
        dset:  Dataset, data for a model run.
    """
    # Add dependency to sinex_blocks-module
    dependencies.add(sinex_blocks.__file__)

    if config.tech.analysis_status.status.str == "bad":
        log.info("Bad session. Not producing SINEX.")
        return
    with files.open("output_sinex", file_vars=dset.vars, mode="wt") as fid:
        sinex = sinex_blocks.SinexBlocks(dset, fid)
        sinex.header_line()
        for block in config.tech[WRITER].blocks.list:
            block_name, *args = block.split(":")
            sinex.write_block(block_name, *args)
        sinex.end_line()
Example #7
0
def ignore_station(dset):
    """Edits data based on observing station

    Args:
        dset:     A Dataset containing model data.

    Returns:
        Array containing False for observations to throw away
    """
    stations = config.tech[_SECTION].stations.list
    remove_idx = np.zeros(dset.num_obs, dtype=bool)

    if stations:
        log.info(
            f"Discarding observations from stations: {', '.join(stations)}")
        for station in stations:
            remove_idx |= dset.filter(station=station)

    return ~remove_idx
Example #8
0
def calculate_initial_values(eph, rundate):
    """Computing initial values for position and velocity in GCRS system

    This is for later use in orbit integration, from tables in the prediction files.  Use a lagrange polynomial in
    order to interpolate in the tables.

    Args:
        eph:  Dict containing ephemeris information

    Returns:
        eph:  Dict where the initial position and velocity is added
    """
    data = sorted(eph["positions"].items())
    pos_itrs = np.zeros((len(data), 3))
    mjd1, mjd2 = zip(*[t for t, d in data])
    rotation_mat = rotation.trs2gcrs(
        time.Time(val=mjd1, val2=mjd2, fmt="mjd", scale="utc"))
    tbl = time.Time(val=mjd1, val2=mjd2, fmt="mjd", scale="utc")

    for i in range(0, len(data)):
        pos_itrs[i] = data[i][1]["pos"]

    diffsec = np.array([(t - rundate).total_seconds()
                        for t in tbl.utc.datetime])

    # Table given in ITRF coordinate system. Convert to GCRS, where the integration of the satellite orbit will
    # be done

    pos_gcrs = np.sum(rotation_mat @ pos_itrs[:, :, None], axis=2)
    log.info(
        "Interpolating data from prediction file in order to get initial pos/vel"
    )
    pos_gcrs_ip, vel_gcrs_ip = interpolation.interpolate_with_derivative(
        diffsec,
        pos_gcrs,
        np.array([0.0]),
        kind="lagrange",
        window=10,
        bounds_error=False)
    eph["initial_pos"] = pos_gcrs_ip[0]
    eph["initial_vel"] = vel_gcrs_ip[0]

    return eph
Example #9
0
def parse_baseline_clock_offsets(dset, baseline_clock_offsets, ref_clock):
    """Parsers and validate the baseline clock offsets from the configuration file and add them to list
    
    Args:
        dset:    Dataset
        bco:     Baseline clock offsets detected automatically
    
    Returns:
        list: baselines to estimate baseline clock offsets for
    """
    baselines = dset.unique("baseline")
    man_bco = config.tech.get("baseline_clock_offsets", section=MODEL).list
    
    for bl in man_bco:
        if bl not in baselines:
            log.warn(f"Baseline {bl} in baseline_clock_offsets is unknown. Available options are {', '.join(baselines)}")
        else:
            baseline_clock_offsets.add(bl)
    
    for bl in list(baseline_clock_offsets):
        if ref_clock in bl:
            sta_1, _, sta_2 = bl.partition("/")
            other_sta = sta_1 if sta_2 == ref_clock else sta_2
            other_baselines = dset.unique("baseline", idx=dset.filter(station=other_sta))
            if all([other_bl in baseline_clock_offsets for other_bl in other_baselines]):
                # Remove the bco for the baseline to the reference clock of all other baselines
                # for the same station is also estimated
                baseline_clock_offsets.remove(bl)

    store_bco = config.tech.get("store_bco", section=MODEL).bool

    if store_bco:
        rundate = dset.analysis["rundate"]
        pipeline = dset.vars["pipeline"]
        session = session=dset.vars["session"]
        with config.update_tech_config(rundate, pipeline, session=session) as cfg:
            cfg.update(MODEL, "baseline_clock_offsets",
                       ", ".join(baseline_clock_offsets), 
                       source=MODEL)

    log.info(f"Estimating baseline clock offsets for:  {', '.join(baseline_clock_offsets)}")
    return baseline_clock_offsets
Example #10
0
def nnt_nnr_trf(dset, param_names):
    n = len(param_names)
    d = np.zeros((n, 6))
    stations = set()
    # todo: config
    reference_frame = config.tech.reference_frames.list[0]
    s1 = 1.5e-11
    s2 = 0.0001

    trf = apriori.get("trf",
                      time=dset.time.utc.mean,
                      reference_frames=reference_frame)

    # thaller2008: eq 2.51 (skipping scale factor)
    for idx, column in enumerate(param_names):
        if "_site_pos-" not in column:
            continue
        station = column.split("-", maxsplit=1)[-1].split("_")[0]
        site_id = dset.meta[station]["site_id"]
        if site_id in trf:
            x0, y0, z0 = trf[site_id].pos.trs
            if column.endswith("_x"):
                d[idx, :] = np.array([1, 0, 0, 0, z0, -y0])
            if column.endswith("_y"):
                d[idx, :] = np.array([0, 1, 0, -z0, 0, x0])
            if column.endswith("_z"):
                d[idx, :] = np.array([0, 0, 1, y0, -x0, 0])
            stations.add(station)

    constraint = __name__
    log.info(
        f"Applying {constraint} with {', '.join(stations)} from {reference_frame.upper()}"
    )

    # thaller2008: eq 2.57
    try:
        h = np.linalg.inv(d.T @ d) @ d.T
    except np.linalg.LinAlgError:
        h = np.zeros((6, n))
        log.warn(f"Applying {constraint} failed")
    sigma = np.array([s2] * 3 + [s1] * 3)
    return h, sigma
Example #11
0
def ignore_satellite(dset):
    """Edits data based on observing station

    Args:
        dset (Dataset):   A Dataset containing model data.

    Returns:
        numpy.ndarray:    Array containing False for observations to throw away
    """
    satellites = config.tech[_SECTION].satellites.list
    remove_idx = np.zeros(dset.num_obs, dtype=bool)

    if satellites:
        log.info("Discarding observations from satellites: {}",
                 ", ".join(satellites))
        for satellite in satellites:
            remove_idx = np.logical_or(remove_idx,
                                       dset.filter(satellite=satellite))

    return np.logical_not(remove_idx)
Example #12
0
def concatenate_datasets(from_date, to_date, dset_vars):
    merged_vars = config.program_vars(rundate=from_date, tech_name=dset_vars["tech"], **dset_vars)
    merged_vars["id"] += "_concatenated"
    dset_merged = data.Dataset(**dict(merged_vars, rundate=from_date, empty=True))

    date_to_read = from_date
    while date_to_read <= to_date:
        dset = data.Dataset(rundate=date_to_read, **dset_vars)
        current_date = date_to_read
        date_to_read += timedelta(days=1)
        if dset.num_obs == 0:
            log.info(f"No data to read for {current_date}")
            continue
        log.info(f"Reading data for {current_date}")
        if not dset_merged:
            dset_merged.copy_from(dset)
        else:
            dset_merged.extend(dset)

    return dset_merged
Example #13
0
def gnss_ignore_system(dset: "Dataset",
                       systems: Union[List[str], None] = None) -> np.ndarray:
    """Edits data based on observing station

    Args:
        dset:       A Dataset containing model data.
        systems:    List with GNSS identifier (e.g. [G, E])

    Returns:
        Array containing False for observations to throw away
    """
    systems = config.tech[_SECTION].systems.list if systems is None else systems
    remove_idx = np.zeros(dset.num_obs, dtype=bool)

    if systems:
        log.info(f"Discarding observations from GNSS: {', '.join(systems)}")
        for system in systems:
            remove_idx |= dset.filter(system=system)

    return ~remove_idx
Example #14
0
def write_to_dataset(dset,
                     rundate=None,
                     session=None,
                     obs_format=None,
                     **obs_args):
    obs_format = config.tech.get("obs_format", section=TECH,
                                 value=obs_format).str
    log.info(f"Reading observation file in {obs_format} format")

    file_vars = config.create_file_vars(rundate,
                                        TECH,
                                        session=session,
                                        **obs_args)
    parser = parsers.parse_key(f"vlbi_obs_{obs_format}", file_vars)

    if parser.data_available:
        _write_to_dataset(parser, dset, rundate, session)
    else:
        raise exceptions.MissingDataError(
            f"No observation file in {obs_format} format found for {rundate}")
Example #15
0
File: slr.py Project: uasau/where
def write_to_dataset(dset, rundate=None, obs_format=None, **obs_args):

    obs_format = config.tech.get("obs_format", section=TECH, value=obs_format).str
    log.info(f"Reading observation file in {obs_format} format")

    file_vars1 = config.create_file_vars(rundate, TECH, **obs_args)
    last_date_to_read = rundate + timedelta(days=config.tech.arc_length.float + 1)
    parser1 = parsers.parse_key(f"slr_obs_{obs_format}", file_vars1)
    file_vars2 = config.create_file_vars(last_date_to_read, TECH, **obs_args)
    parser2 = parsers.parse_key(f"slr_obs_{obs_format}", file_vars2)

    if parser1.data_available and parser2.data_available:
        data = _write_to_dataset(parser1, parser2, dset, rundate)
        _write_met_to_dataset(dset, data, rundate)
    elif parser2.data_available and not parser2.data_available:
        raise exceptions.MissingDataError(
            f"No observation file in {obs_format} format found for {last_date_to_read.month}"
        )
    else:
        raise exceptions.MissingDataError(f"No observation file in {obs_format} format found for {rundate}")
Example #16
0
def apply_removers(config_key: str, dset: "Dataset") -> None:
    """Apply all removers for a given session

    Args:
        config_key:  The configuration key listing which removers to apply.
        dset:        Dataset containing analysis data.
    """
    prefix = config.analysis.get("analysis", default="").str
    log.info(f"Applying removers")
    keep_idxs = plugins.call_all(package_name=__name__,
                                 config_key=config_key,
                                 prefix=prefix,
                                 dset=dset)

    all_keep_idx = np.ones(dset.num_obs, dtype=bool)
    for remover, remover_keep_idx in keep_idxs.items():
        log.info(
            f"Removing {sum(np.logical_not(remover_keep_idx)):5d} observations based on {remover}"
        )
        report.data("remover_data",
                    dset,
                    remover_name=remover,
                    keep_idx=remover_keep_idx)
        all_keep_idx = np.logical_and(all_keep_idx, remover_keep_idx)

    log.info(f"Keeping {sum(all_keep_idx)} of {dset.num_obs} observations")
    dset.subset(all_keep_idx)
Example #17
0
def detect_outliers(config_key, dset):
    """Detect all outliers for a given session

    Args:
        config_key (String):  The configuration key listing which detectors to apply.
        dset (Dataset):       Dataset containing analysis data.
    """
    prefix = config.analysis.get("analysis", default="").str
    log.info(f"Detecting outliers")
    keep_idxs = plugins.call_all(package_name=__name__,
                                 config_key=config_key,
                                 prefix=prefix,
                                 dset=dset)

    all_keep_idx = np.ones(dset.num_obs, dtype=bool)
    for detector, detector_keep_idx in keep_idxs.items():
        log.info(
            f"Detecting {sum(~detector_keep_idx):5d} outliers based on {detector}"
        )
        report.data("detector_data",
                    dset,
                    detector_name=detector,
                    keep_idx=detector_keep_idx)
        all_keep_idx = np.logical_and(all_keep_idx, detector_keep_idx)

    log.info(f"Removing {sum(~all_keep_idx)} of {dset.num_obs} observations")
    return all_keep_idx
Example #18
0
def apply_observation_rejectors(config_key: str, dset: "Dataset", independent: bool) -> np.ndarray:
    """Apply all configured observation rejectors

    Args:
        config_key:     The configuration key listing which rejectors to apply.
        dset:           Dataset containing analysis data.
        independent:    Flag to indicate whether the rejectors are applied independently or sequentially

    Returns:
        Dataset with rejected observation
    """
    prefix = dset.vars["pipeline"]
    rejectors = config.tech[config_key].list
    word = "independently" if independent else "sequentially"
    num_obs_before = dset.num_obs

    log.info(f"Applying observation rejectors {word}")
    all_keep_idx = np.ones(num_obs_before, dtype=bool)
    for rejector in rejectors:
        rejector_keep_idx = plugins.call(package_name=__name__, plugin_name=rejector, prefix=prefix, dset=dset)
        if independent:
            all_keep_idx = np.logical_and(all_keep_idx, rejector_keep_idx)
        else:
            dset.subset(rejector_keep_idx)
        log.info(f"Found {sum(~rejector_keep_idx):5d} observations based on {rejector}")

    if independent:
        dset.subset(all_keep_idx)
    log.info(f"Removing {num_obs_before - dset.num_obs} of {num_obs_before} observations")
    return dset
Example #19
0
def estimate(stage, dset):
    """Filter residuals

    Args:
        rundate (Datetime):  The model run date.
        session (String):    Name of session.
        prev_stage (String): Name of previous stage.
        stage (String):      Name of current stage.
    """
    max_iterations = config.tech.estimate_max_iterations.int

    for iter_num in itertools.count(start=1):
        partial_vectors = estimation.partial_vectors(dset, "estimate_method")
        obs_noise = dset.observed_delay_ferr**2 + np.nan_to_num(
            dset.iono_delay_ferr)**2 + 0.01**2
        log.info(
            f"Estimating parameters for iteration {iter_num} using Kalman Filter and continuous piecewise linear functions"
        )
        estimation.call("estimate_method",
                        dset=dset,
                        partial_vectors=partial_vectors,
                        obs_noise=obs_noise)
        rms = dset.rms("residual")
        log.info(f"{dset.num_obs} observations, postfit residual = {rms:.4f}")
        dset.write_as(stage=stage, label=iter_num - 1)
        if iter_num >= max_iterations:
            break

        # Detect and remove outliers
        num_obs_before = dset.num_obs
        independent = config.tech.estimate_obs_rejectors_independent.bool
        dset = estimation.apply_observation_rejectors("estimate_obs_rejectors",
                                                      dset, independent)
        log.blank()
        if dset.num_obs == num_obs_before or dset.num_obs == 0:
            break

    log.blank()
    estimation.solve_neq(dset)
    dset.write()
def cable_calibration(dset):
    """Edit cable calibration data

    Args:
        dset:     A Dataset containing model data.

    """
    stations = config.tech[_SECTION].ignore_cable.list

    if stations:
        log.info(
            f"{_SECTION}: Discarding cable calibration data from {', '.join(stations)}"
        )
        idx_1 = np.zeros(dset.num_obs, dtype=bool)
        idx_2 = np.zeros(dset.num_obs, dtype=bool)

        for station in stations:
            idx_1 = np.logical_or(idx_1, dset.filter(station_1=station))
            idx_2 = np.logical_or(idx_2, dset.filter(station_2=station))

        dset.cable_delay_1[idx_1] = 0.0
        dset.cable_delay_2[idx_2] = 0.0
Example #21
0
def calculate(stage, dset):

    # CALCULATE
    # -----------
    # Correction of station position in GCRS due to loading and tide effects
    site.calculate_site("site", dset)
    delta_pos = site.add("site", dset)
    dset.site_pos[:] = (dset.site_pos.gcrs + delta_pos[0].gcrs).trs

    # Initialize models given in configuration file by adding model fields to Dataset
    delay.calculate_delay("delay", dset)
    delta_delay = delay.add("delay", dset)

    if "observed" in dset.fields:
        dset.observed[:] = gnss.get_code_observation(dset)
    else:
        dset.add_float("observed",
                       val=gnss.get_code_observation(dset),
                       unit="meter")

    # Get model corrections
    if "calc" in dset.fields:
        dset.calc[:] = delta_delay
    else:
        dset.add_float("calc",
                       val=delta_delay,
                       unit="meter",
                       write_level="operational")

    if "residual" in dset.fields:
        dset.residual[:] = dset.observed - dset.calc
    else:
        dset.add_float("residual", val=dset.observed - dset.calc, unit="meter")

    # Store calculate results
    log.info(
        f"{dset.num_obs} observations, residual = {dset.rms('residual'):.4f}")
    dset.write_as(stage="calculate", dataset_id=0)
Example #22
0
File: slr.py Project: mfkiwl/where
def edit(stage, dset):
    """Edit the data by applying editor

    Args:
        stage:  Name of current stage
        dset:   Dataset containing the data
    """
    # Clean up dataset
    cleaners.apply_removers("removers", dset)

    # Indicate if range and time bias are estimated or not
    # and apply biases
    dset.add_float("range_bias", np.zeros(dset.num_obs), unit="meter")
    dset.add_float("time_bias", np.zeros(dset.num_obs), unit="meter")
    dset.add_bool("estimate_range", np.zeros(dset.num_obs))
    dset.add_bool("estimate_time", np.zeros(dset.num_obs))

    for station in config.tech.slr_range_bias.estimate_stations.list:
        int_idx = dset.filter(station=station)
        if np.any(int_idx):
            log.info(
                f"Config file: Will estimate range bias for station {station} in estimation stage"
            )
            dset.estimate_range[:] = np.logical_or(int_idx,
                                                   dset.estimate_range[:])
    for station in config.tech.slr_time_bias.estimate_stations.list:
        int_idx = dset.filter(station=station)
        if np.any(int_idx):
            log.info(
                f"Config file: Will estimate time bias for station {station} in estimation stage"
            )
            dset.estimate_time[:] = np.logical_or(int_idx,
                                                  dset.estimate_time[:])

    cleaners.apply_editors("editors", dset)

    # Write dataset
    dset.write_as(stage=stage, label=0)
Example #23
0
def gnss_dops(dset: "Dataset") -> None:
    """Adds dilution of precision (DOP) to dataset

    Args:
        dset:     A Dataset containing model data.
    """
    dops = {
        "gdop": np.zeros((dset.num_obs)),
        "pdop": np.zeros((dset.num_obs)),
        "hdop": np.zeros((dset.num_obs)),
        "vdop": np.zeros((dset.num_obs)),
    }

    for time in dset.unique("time"):
        idx = dset.filter(time=time)
        dops["gdop"][idx], dops["pdop"][idx], dops["hdop"][idx], dops["vdop"][idx] = compute_dops(
            dset.site_pos.azimuth[idx], dset.site_pos.elevation[idx]
        )

    for dop, val in dops.items():
        dset.add_float(dop, val=val])

    log.info(f"{_SECTION}: Add gdop, pdop, hdop and vdop fields to Dataset.")
Example #24
0
def ignore_baseline(dset):
    """Edits data based on baselines

    Args:
        dset (Dataset):   A Dataset containing model data.

    Returns:
        Array containing False for observations to throw away
    """
    baselines = config.tech[_SECTION].baselines.as_list(split_re=", *")
    remove_idx = np.zeros(dset.num_obs, dtype=bool)

    if baselines:
        log.info(
            f"Discarding observations with baselines: {', '.join(baselines)}")

        # Add baselines with stations in reverse order
        baselines.extend(["/".join(reversed(b.split("/"))) for b in baselines])
        for baseline in baselines:
            remove_idx = np.logical_or(remove_idx,
                                       dset.filter(baseline=baseline))

    return np.logical_not(remove_idx)
Example #25
0
def meteorological_data(dset):
    """Edit cable calibration data

    Args:
        dset:     A Dataset containing model data.

    """
    pressure_stations = config.tech[_SECTION].ignore_pressure.list
    temperature_stations = config.tech[_SECTION].ignore_temperature.list

    if pressure_stations:
        log.info(
            f"{_SECTION}: Discarding pressure data from {', '.join(pressure_stations)}"
        )
        idx_1 = np.zeros(dset.num_obs, dtype=bool)
        idx_2 = np.zeros(dset.num_obs, dtype=bool)

        for station in pressure_stations:
            idx_1 = np.logical_or(idx_1, dset.filter(station_1=station))
            idx_2 = np.logical_or(idx_2, dset.filter(station_2=station))

        dset.pressure_1[idx_1] = np.nan
        dset.pressure_2[idx_2] = np.nan

    if temperature_stations:
        log.info(
            f"{_SECTION}: Discarding temperature data from {', '.join(temperature_stations)}"
        )
        idx_1 = np.zeros(dset.num_obs, dtype=bool)
        idx_2 = np.zeros(dset.num_obs, dtype=bool)

        for station in temperature_stations:
            idx_1 = np.logical_or(idx_1, dset.filter(station_1=station))
            idx_2 = np.logical_or(idx_2, dset.filter(station_2=station))

        dset.temperature_1[idx_1] = np.nan
        dset.temperature_2[idx_2] = np.nan
Example #26
0
def read(rundate, session, prev_stage, stage):
    """Read VLBI data

    Args:
        rundate (Datetime):  The model run date.
        session (String):    Name of session.
        prev_stage (String): Name of previous stage.
        stage (String):      Name of current stage.

    Returns:
        Bool: True if data are available for the session, False otherwise
    """
    try:
        dset = obs.get(rundate, TECH, session)
    except exceptions.MissingDataError:
        return False

    dset.write_as(rundate=rundate,
                  tech=TECH,
                  stage=stage,
                  dataset_name=session,
                  dataset_id=0)
    log.info(f"Parsed {dset.num_obs} observations")
    return True
Example #27
0
def parse_args(*param_types, doc_module=None):
    """Parse command line arguments and general options

    Log versions of python, the script and the configuration.
    Finally parse arguments from the given parameter types.

    Args:
        param_types: Strings describing the expected parameter types.
                     Each string must be one of the keys in #_PARSERS.

    Returns:
        List of command line arguments parsed according to param_types.
    """
    # Log version of python and the program, and the configuration file used
    if doc_module:
        log.info(
            f"Start {_get_program_version(doc_module)} at {datetime.now().strftime(config.FMT_datetime)}"
        )
        log.debug(
            f"Receive command line arguments [{', '.join(sys.argv[1:])}]")
        title, sources = get_configuration(cfg=get_program_name())
        # TODO log something meaningful when session config already exists
        log.info(f"Use {title} configuration from {', '.join(sources)}")

    # Parse arguments
    try:
        arguments = [_PARSERS[type]() for type in param_types]
    except Exception:
        _print_help_from_doc(doc_module)
        raise

    # Return arguments (scalar if only one element, None if list is empty)
    if len(arguments) > 1:
        return arguments
    elif arguments:
        return arguments[0]
Example #28
0
    def markdown_to_pdf(self) -> None:
        """Convert markdown file to pdf format
        """
        # Close file object
        self.fid.close()

        if self.path.stat().st_size == 0:
            log.warn(f"Markdown file {self.path} is empty.")
            return 1

        pdf_path = str(self.path).replace(".md", ".pdf")
        program = "pandoc"

        # Convert markdown to pdf with pandoc
        pandoc_args = [
            "-f markdown", "-V classoption:twoside", "-N", "-o " + pdf_path,
            str(self.path)
        ]
        log.info(f"Start: {program} {' '.join(pandoc_args)}")
        status = os.system(f"{program} {' '.join(pandoc_args)}")
        if status != 0:
            log.error(
                f"{program} failed with error code {status} ({' '.join([program] + pandoc_args)})"
            )
Example #29
0
def _calculate_model(calculate_func, config_key, dset_in, dset_out, write_levels=None):
    """Call models and store output in dataset

    If the model output is empty, we still create a dummy field in the table only containing zeros. This is done to
    assert that the table will always exist after doing a `models.calculate...`-call.

    Args:
        calculate_func (Function):  The function that calls models.
        config_key (String):        Key in config with list of models, also table the model output is stored in.
        dset_in (Dataset):          Dataset to read data from.
        dset_out (Dataset):         Dataset to store data to.
    """
    dset_out = dset_in if dset_out is None else dset_out
    write_levels = dict() if write_levels is None else write_levels

    model_output = calculate_func(config_key, dset_in)

    for model_name, values in sorted(model_output.items()):
        field_name = f"{config_key}.{model_name}"
        if field_name in dset_out.fields:
            dset_out[field_name][:] = values
        else:
            dset_out.add_float(field_name, values, write_level=write_levels.get(model_name, "analysis"), unit="meter")
        log.info(f"Average correction = {dset_out.rms(f'{config_key}.{model_name}'):14.5f} in {model_name} model")
Example #30
0
def _concatenate_datasets(from_date: date, to_date: date, dset_vars: Dict[str,
                                                                          str],
                          only_for_rundate: bool) -> np.ndarray:
    """Concatenate datasets

    Args:
        from_date:         Start date for reading Dataset.
        to_date:           End date for reading Dataset.
        dset_vars:         Common Dataset variables.
        only_for_rundate:  Concatenate only data for given rundate.
    """
    merged_vars = config.program_vars(rundate=from_date,
                                      tech_name=dset_vars["tech"],
                                      **dset_vars)
    merged_vars["id"] += "_concatenated"
    dset_merged = data.Dataset(
        **dict(merged_vars, rundate=from_date, empty=True))

    date_to_read = from_date
    while date_to_read <= to_date:
        dset = data.Dataset(rundate=date_to_read, **dset_vars)

        current_date = date_to_read
        date_to_read += timedelta(days=1)

        if dset.num_obs == 0:
            log.info(f"No data to read for {current_date}")
            continue

        if only_for_rundate:
            _keep_data_only_for_rundate(dset)

            if dset.num_obs == 0:
                log.info(f"No data to read for {current_date}")
                continue

        log.info(f"Reading data for {current_date}")
        if not dset_merged:
            dset_merged.copy_from(dset)
        else:
            dset_merged.extend(dset)

    return dset_merged