Ejemplo n.º 1
0
def parse_baseline_clock_offsets(dset, baseline_clock_offsets, ref_clock):
    """Parsers and validate the baseline clock offsets from the configuration file and add them to list
    
    Args:
        dset:    Dataset
        bco:     Baseline clock offsets detected automatically
    
    Returns:
        list: baselines to estimate baseline clock offsets for
    """
    baselines = dset.unique("baseline")
    man_bco = config.tech.get("baseline_clock_offsets", section=MODEL).list
    
    for bl in man_bco:
        if bl not in baselines:
            log.warn(f"Baseline {bl} in baseline_clock_offsets is unknown. Available options are {', '.join(baselines)}")
        else:
            baseline_clock_offsets.add(bl)
    
    for bl in list(baseline_clock_offsets):
        if ref_clock in bl:
            sta_1, _, sta_2 = bl.partition("/")
            other_sta = sta_1 if sta_2 == ref_clock else sta_2
            other_baselines = dset.unique("baseline", idx=dset.filter(station=other_sta))
            if all([other_bl in baseline_clock_offsets for other_bl in other_baselines]):
                # Remove the bco for the baseline to the reference clock of all other baselines
                # for the same station is also estimated
                baseline_clock_offsets.remove(bl)

    store_bco = config.tech.get("store_bco", section=MODEL).bool

    if store_bco:
        rundate = dset.analysis["rundate"]
        pipeline = dset.vars["pipeline"]
        session = session=dset.vars["session"]
        with config.update_tech_config(rundate, pipeline, session=session) as cfg:
            cfg.update(MODEL, "baseline_clock_offsets",
                       ", ".join(baseline_clock_offsets), 
                       source=MODEL)

    log.info(f"Estimating baseline clock offsets for:  {', '.join(baseline_clock_offsets)}")
    return baseline_clock_offsets
Ejemplo n.º 2
0
def calculate(rundate, session, prev_stage, stage):
    """Estimate model parameters

    Args:
        rundate (Datetime):  The model run date.
        session (String):    Name of session.
        prev_stage (String): Name of previous stage.
        stage (String):      Name of current stage.
    """
    dset = data.Dataset(rundate,
                        tech=TECH,
                        stage=prev_stage,
                        dataset_name=session,
                        dataset_id="last")
    dset.delete_from_file(stage=stage, dataset_id="all")

    # Run models adjusting station positions
    log.info("Calculating station displacements for {}", session)
    models.calculate_site("pos_models", dset, shape=(6, ))
    delta_pos = np.sum(dset.get_table("pos_models").reshape(
        (dset.num_obs, -1, 6)),
                       axis=1)
    gcrs_dpos_1 = delta_pos[:, :3]
    gcrs_dvel_1 = (
        dset.time.itrs2gcrs_dot
        @ dset.site_pos_1.convert_gcrs_to_itrs(gcrs_dpos_1)[:, :, None])[:, :,
                                                                         0]
    dset.site_pos_1.add_to_gcrs(
        np.concatenate((gcrs_dpos_1, gcrs_dvel_1), axis=1))
    gcrs_dpos_2 = delta_pos[:, 3:]
    gcrs_dvel_2 = (
        dset.time.itrs2gcrs_dot
        @ dset.site_pos_2.convert_gcrs_to_itrs(gcrs_dpos_2)[:, :, None])[:, :,
                                                                         0]
    dset.site_pos_2.add_to_gcrs(
        np.concatenate((gcrs_dpos_2, gcrs_dvel_2), axis=1))
    log.blank()

    # Run models for each term of the observation equation
    log.info("Calculating theoretical delays for {}", session)
    models.calculate_delay("calc_models", dset)
    dset.add_float("obs",
                   val=dset.observed_delay,
                   unit="meter",
                   write_level="operational")
    dset.add_float("calc",
                   val=np.sum(dset.get_table("calc_models"), axis=1),
                   unit="meter",
                   write_level="operational")
    dset.add_float("residual",
                   val=dset.obs - dset.calc,
                   unit="meter",
                   write_level="operational")
    log.blank()

    # Estimate clock polynomial
    log.info("Calculating clock polynomials for {}", session)
    max_iterations = config.tech.calculate_max_iterations.int
    outlier_limit = config.tech.calculate_outlier_limit.float
    store_outliers = config.tech.store_outliers.bool

    for iter_num in itertools.count(start=1):
        models.calculate_delay("correction_models", dset, dset)
        dset.calc[:] = np.sum(np.hstack((dset.get_table("calc_models"),
                                         dset.get_table("correction_models"))),
                              axis=1)
        dset.residual[:] = dset.obs - dset.calc
        rms = dset.rms("residual")
        log.info("{}: {} observations, residual = {:.4f}", session,
                 dset.num_obs, rms)

        # Store results
        dset.write_as(stage=stage, dataset_id=iter_num - 1)

        # Detect and remove extreme outliers
        idx = np.abs(dset.residual) < outlier_limit * rms
        if iter_num > max_iterations or idx.all():
            break

        if store_outliers:
            bad_idx = np.logical_not(idx)
            log.info(
                f"Adding {np.sum(bad_idx)} observations to ignore_observation")
            bad_obs = np.char.add(np.char.add(dset.time.utc.iso[bad_idx], " "),
                                  dset.baseline[bad_idx]).tolist()
            with config.update_tech_config(rundate, TECH, session) as cfg:
                current = cfg.ignore_observation.observations.as_list(", *")
                updated = ", ".join(sorted(current + bad_obs))
                cfg.update("ignore_observation",
                           "observations",
                           updated,
                           source=util.get_program_name())

        dset.subset(idx)
        log.info("Removing {} observations with residuals bigger than {:.4f}",
                 sum(np.logical_not(idx)), outlier_limit * rms)
        log.blank()

    # Try to detect clock breaks
    if config.tech.detect_clockbreaks.bool:
        writers.write_one("vlbi_detect_clockbreaks", dset)
        dset.write()
Ejemplo n.º 3
0
    def update_dataset(self, dset, param_names, normal_idx, num_unknowns):
        """Update the given dataset with results from the filtering

        Args:
            dset (Dataset):       The dataset.
            param_names (List):   Strings with names of parameters. Used to form field names.
            normal_idx (Slice):   Slice denoting which parameters should be used for the normal equations.
            num_unknowns (Int):   Number of unknowns.
        """
        # Update dataset with state and estimation fields and calculate new residuals
        self._add_fields(dset, param_names)
        dset.residual[:] = dset.estimate - (dset.obs - dset.calc)
        num_unknowns += dset.meta.get("num_clock_coeff", 0)

        # Calculate normal equations, and add statistics about estimation to dataset
        N, b = self._normal_equations(normal_idx, dset.num_obs - 1)
        g = self.x_hat[dset.num_obs - 1, normal_idx, :]
        deg_freedom = dset.num_obs - num_unknowns
        v = dset.residual[:, None]
        P = np.diag(1 / self.r[:dset.num_obs])
        sq_sum_residuals = np.asscalar(v.T @ P @ v)
        sq_sum_omc_terms = np.asscalar(2 * b.T @ g - g.T @ N @ g)
        variance_factor = sq_sum_residuals / deg_freedom if deg_freedom != 0 else np.inf
        log.info("Variance factor = {:.4f}, degrees of freedom = {:d}",
                 variance_factor, deg_freedom)

        # Report and set analysis status if there are too few degrees of freedom
        if deg_freedom < 1:
            log.error(
                f"Degrees of freedom is {deg_freedom} < 1. Estimate fewer parameters"
            )
            if dset.meta.get("analysis_status") == "unchecked":
                dset.meta["analysis_status"] = "too few degrees of freedom"

                # Update config
                # with config.update_tech_config(dset.rundate, dset.vars["tech"], dset.vars["session"]) as cfg:
                # cfg.update("analysis_status", "status", dset.meta["analysis_status"], source=__file__)
        else:
            if dset.meta.get(
                    "analysis_status") == "too few degrees of freedom":
                dset.meta["analysis_status"] = "unchecked"

                # Update config
                # with config.update_tech_config(dset.rundate, dset.vars["tech"], dset.vars["session"]) as cfg:
                # cfg.update("analysis_status", "status", dset.meta["analysis_status"], source=__file__)

        # Report and set analysis status if there are too few stations
        # TODO: if vlbi_site_pos in state_vector and num_stations < 3
        estimate_site_pos = np.char.startswith(np.array(param_names),
                                               "vlbi_site_pos").any()
        if len(dset.unique("station")) < 3 and estimate_site_pos:
            log.error(
                f"Too few stations {len(dset.unique('station'))} < 3. Do not estimate station positions."
            )
            if dset.meta.get("analysis_status") == "unchecked":
                dset.meta["analysis_status"] = "needs custom state vector"
        elif len(dset.unique("station")) < 3 and estimate_site_pos:
            if dset.meta.get("analysis_status") == "needs custom state vector":
                dset.meta["analysis_status"] = "unchecked"
        # Update config
        with config.update_tech_config(dset.rundate, dset.vars["tech"],
                                       dset.vars["session"]) as cfg:
            cfg.update("analysis_status",
                       "status",
                       dset.meta.get("analysis_status", ""),
                       source=__file__)

        # Add information to dset.meta
        dset.add_to_meta("statistics", "number of observations", dset.num_obs)
        dset.add_to_meta("statistics", "number of unknowns", num_unknowns)
        dset.add_to_meta("statistics", "square sum of residuals",
                         sq_sum_residuals)
        dset.add_to_meta("statistics", "degrees of freedom", deg_freedom)
        dset.add_to_meta("statistics", "variance factor", variance_factor)
        dset.add_to_meta("statistics", "weighted square sum of o-c",
                         sq_sum_residuals + sq_sum_omc_terms)
        dset.add_to_meta("normal equation", "matrix", N.tolist())
        dset.add_to_meta("normal equation", "vector", b[:, 0].tolist())
        dset.add_to_meta("normal equation", "names", param_names[normal_idx])
        dset.add_to_meta("normal equation", "unit", [
            config.tech[f.split("-")[0]].unit.str
            for f in param_names[normal_idx]
        ])

        # TODO should this be here?
        log.info("Solving normal equations")
        names = dset.meta["normal equation"]["names"]
        n = len(names)
        d = np.zeros((n, 6))
        stations = set()
        reference_frame = config.tech.reference_frames.list[0]

        from where import apriori

        trf = apriori.get("trf",
                          time=dset.time.utc.mean,
                          reference_frames=reference_frame)

        # thaller2008: eq 2.51 (skipping scale factor)
        for idx, column in enumerate(names):
            if "_site_pos-" not in column:
                continue
            station = column.split("-", maxsplit=1)[-1].rsplit("_",
                                                               maxsplit=1)[0]
            site_id = dset.meta[station]["site_id"]
            if site_id in trf:
                x0, y0, z0 = trf[
                    site_id].pos.itrs  # TODO: Take units into account
                if column.endswith("_x"):
                    d[idx, :] = np.array([1, 0, 0, 0, z0, -y0])
                if column.endswith("_y"):
                    d[idx, :] = np.array([0, 1, 0, -z0, 0, x0])
                if column.endswith("_z"):
                    d[idx, :] = np.array([0, 0, 1, y0, -x0, 0])
                stations.add(station)

        log.info("Applying NNT/NNR with {} from {}", ", ".join(stations),
                 reference_frame.upper())
        # thaller2008: eq 2.57
        try:
            H = np.linalg.inv(d.T @ d) @ d.T
        except np.linalg.LinAlgError:
            H = np.zeros((6, n))

        sigmas = [0.0001] * 3 + [1.5e-11] * 3

        # NNR to CRF
        if "celestial_reference_frames" in config.tech.master_section:
            celestial_reference_frame = config.tech.celestial_reference_frames.list[
                0]
            crf = apriori.get(
                "crf",
                celestial_reference_frames=celestial_reference_frame,
                session=dset.dataset_name)
            H2 = np.zeros((3, n))
            for idx, column in enumerate(names):
                if "_src_dir-" not in column:
                    continue
                source = column.split("-", maxsplit=1)[-1].split("_")[0]
                if source in crf:
                    ra = crf[source].pos.crs[0]
                    dec = crf[source].pos.crs[1]
                    if column.endswith("_ra"):
                        H2[0, idx] = -np.cos(ra) * np.sin(dec) * np.cos(dec)
                        H2[1, idx] = -np.sin(ra) * np.sin(dec) * np.cos(dec)
                        H2[2, idx] = np.cos(dec)**2
                    if column.endswith("_dec"):
                        H2[0, idx] = np.sin(ra)
                        H2[1, idx] = -np.cos(ra)

            if H2.any():
                log.info("Applying NNR constraint to {}",
                         celestial_reference_frame.upper())
                # add NNR to CRF constraints
                H = np.concatenate((H, H2))
                sigmas = sigmas + [1e-6] * 3

        # thaller2008: eq 2.45
        P_h = np.diag(1 / np.array(sigmas)**2)

        # thaller2008: eq 2.58
        N_h = N + H.T @ P_h @ H

        # solve neq
        N_h_inv = np.linalg.inv(N_h)
        x = N_h_inv @ b

        # Covariance: thaller2008: eq 2.16
        Q_xx = variance_factor**2 * N_h_inv

        dset.add_to_meta("normal equation", "solution", x[:, 0].tolist())
        dset.add_to_meta("normal equation", "covariance", Q_xx.tolist())
Ejemplo n.º 4
0
Archivo: vlbi.py Proyecto: uasau/where
def calculate(stage, dset):
    """Estimate model parameters

    Args:
        rundate (Datetime):  The model run date.
        session (String):    Name of session.
        prev_stage (String): Name of previous stage.
        stage (String):      Name of current stage.
    """
    # Run models adjusting station positions
    log.info(f"Calculating station displacements")
    site.calculate_site("site", dset)
    delta_pos = site.add("site", dset)

    dset.site_pos_1[:] = (dset.site_pos_1.gcrs + delta_pos[0].gcrs).trs
    dset.site_pos_2[:] = (dset.site_pos_2.gcrs + delta_pos[1].gcrs).trs
    log.blank()

    # Run models for each term of the observation equation
    log.info(f"Calculating theoretical delays")
    delay.calculate_delay("delay", dset)

    delta_delay = delay.add("delay", dset)
    dset.add_float("obs",
                   val=dset.observed_delay,
                   unit="meter",
                   write_level="operational")
    dset.add_float("calc",
                   val=delta_delay,
                   unit="meter",
                   write_level="operational")
    dset.add_float("residual",
                   val=dset.obs - dset.calc,
                   unit="meter",
                   write_level="operational")
    log.blank()

    # Estimate clock polynomial
    log.info(f"Calculating clock polynomials")
    max_iterations = config.tech.calculate_max_iterations.int
    outlier_limit = config.tech.calculate_outlier_limit.float
    store_outliers = config.tech.store_outliers.bool

    for iter_num in itertools.count(start=1):
        delay.calculate_delay("delay_corr", dset, dset)
        delta_correction = delay.add("delay_corr", dset)

        dset.calc[:] = dset.calc + delta_correction
        dset.residual[:] = dset.obs - dset.calc
        rms = dset.rms("residual")
        log.info(f"{dset.num_obs} observations, residual = {rms:.4f}")

        # Store results
        dset.write_as(stage=stage, label=iter_num - 1)

        # Detect and remove extreme outliers
        idx = np.abs(dset.residual) < outlier_limit * rms
        if iter_num > max_iterations or idx.all():
            break

        if store_outliers:
            bad_idx = np.logical_not(idx)
            log.info(
                f"Adding {np.sum(bad_idx)} observations to ignore_observation")
            bad_obs = np.char.add(np.char.add(dset.time.utc.iso[bad_idx], " "),
                                  dset.baseline[bad_idx]).tolist()
            with config.update_tech_config(
                    dset.analysis["rundate"],
                    pipeline,
                    session=dset.vars["session"]) as cfg:
                current = cfg.ignore_observation.observations.as_list(", *")
                updated = ", ".join(sorted(current + bad_obs))
                cfg.update("ignore_observation",
                           "observations",
                           updated,
                           source=util.get_program_name())

        dset.subset(idx)
        log.info(
            f"Removing {sum(~idx)} observations with residuals bigger than {outlier_limit * rms}"
        )
        log.blank()

    # Try to detect clock breaks
    if config.tech.detect_clockbreaks.bool:
        writers.write_one("vlbi_detect_clockbreaks", dset=dset)
        dset.write()
Ejemplo n.º 5
0
    def update_dataset(self, dset, param_names, normal_idx, num_unknowns):
        """Update the given dataset with results from the filtering

        Args:
            dset (Dataset):       The dataset.
            param_names (List):   Strings with names of parameters. Used to form field names.
            normal_idx (Slice):   Slice denoting which parameters should be used for the normal equations.
            num_unknowns (Int):   Number of unknowns.
        """
        # Update dataset with state and estimation fields and calculate new residuals
        self._add_fields(dset, param_names)
        dset.residual[:] = dset.est - (dset.obs - dset.calc)
        num_unknowns += dset.meta.get("num_clock_coeff", 0)

        # Calculate normal equations, and add statistics about estimation to dataset
        N, b = self._normal_equations(normal_idx, dset.num_obs - 1)
        g = self.x_hat[dset.num_obs - 1, normal_idx, :]
        deg_freedom = dset.num_obs - num_unknowns
        v = dset.residual[:, None]
        P = np.diag(1 / self.r[:dset.num_obs])
        sq_sum_residuals = np.asscalar(v.T @ P @ v)
        sq_sum_omc_terms = np.asscalar(2 * b.T @ g - g.T @ N @ g)
        variance_factor = sq_sum_residuals / deg_freedom if deg_freedom != 0 else np.inf
        log.info(
            f"Variance factor = {variance_factor:.4f}, degrees of freedom = {deg_freedom:d}"
        )

        # Report and set analysis status if there are too few degrees of freedom
        if deg_freedom < 1:
            log.error(
                f"Degrees of freedom is {deg_freedom} < 1. Estimate fewer parameters"
            )
            if dset.meta.get("analysis_status") == "unchecked":
                dset.meta["analysis_status"] = "too few degrees of freedom"

        else:
            if dset.meta.get(
                    "analysis_status") == "too few degrees of freedom":
                dset.meta["analysis_status"] = "unchecked"

        # Report and set analysis status if there are too few stations
        # TODO: if vlbi_site_pos in state_vector and num_stations < 3
        estimate_site_pos = np.char.startswith(
            np.array(param_names, dtype=str), "vlbi_site_pos").any()
        if len(dset.unique("station")) < 3 and estimate_site_pos:
            log.warn(
                f"Too few stations {len(dset.unique('station'))} < 3. Do not estimate station positions."
            )
            # if dset.meta.get("analysis_status") == "unchecked":
            # dset.meta["analysis_status"] = "needs custom state vector"
        elif len(dset.unique("station")) < 3 and estimate_site_pos:
            if dset.meta.get("analysis_status") == "needs custom state vector":
                dset.meta["analysis_status"] = "unchecked"
        # Update config
        cfg_vars = dset.vars.copy()
        cfg_vars.pop("rundate")
        with config.update_tech_config(dset.analysis["rundate"],
                                       cfg_vars.pop("pipeline"),
                                       **cfg_vars) as cfg:
            cfg.update("analysis_status",
                       "status",
                       dset.meta.get("analysis_status", ""),
                       source=__file__)

        # Add information to dset.meta
        dset.meta.add("number of observations",
                      dset.num_obs,
                      section="statistics")
        dset.meta.add("number of unknowns", num_unknowns, section="statistics")
        dset.meta.add("square sum of residuals",
                      sq_sum_residuals,
                      section="statistics")
        dset.meta.add("degrees of freedom", deg_freedom, section="statistics")
        dset.meta.add("variance factor", variance_factor, section="statistics")
        dset.meta.add("weighted square sum of o-c",
                      sq_sum_residuals + sq_sum_omc_terms,
                      section="statistics")
        dset.meta.add("matrix", N.tolist(), section="normal equation")
        dset.meta.add("vector", b[:, 0].tolist(), section="normal equation")
        dset.meta.add("names",
                      param_names[normal_idx],
                      section="normal equation")
        dset.meta.add("unit", [
            config.tech[f.split("-")[0]].unit.str
            for f in param_names[normal_idx]
        ],
                      section="normal equation")
Ejemplo n.º 6
0
    def update_dataset(self, dset, param_names, normal_idx, num_unknowns):
        """Update the given dataset with results from the filtering

        Args:
            dset (Dataset):       The dataset.
            param_names (List):   Strings with names of parameters. Used to form field names.
            normal_idx (Slice):   Slice denoting which parameters should be used for the normal equations.
            num_unknowns (Int):   Number of unknowns.
        """
        # Update dataset with state and estimation fields and calculate new residuals
        self._add_fields(dset, param_names)
        dset.residual[:] = dset.est - (dset.obs - dset.calc)
        num_unknowns += dset.meta.get("num_clock_coeff", 0)

        # Calculate normal equations, and add statistics about estimation to dataset
        N, b = self._normal_equations(normal_idx, dset.num_obs - 1)
        g = self.x_hat[dset.num_obs - 1, normal_idx, :]
        deg_freedom = dset.num_obs - num_unknowns
        v = dset.residual[:, None]
        P = np.diag(1 / self.r[:dset.num_obs])
        sq_sum_residuals = np.asscalar(v.T @ P @ v)
        sq_sum_omc_terms = np.asscalar(2 * b.T @ g - g.T @ N @ g)
        variance_factor = sq_sum_residuals / deg_freedom if deg_freedom != 0 else np.inf
        log.info(
            f"Variance factor = {variance_factor:.4f}, degrees of freedom = {deg_freedom:d}"
        )

        # Report and set analysis status if there are too few degrees of freedom
        if deg_freedom < 1:
            log.error(
                f"Degrees of freedom is {deg_freedom} < 1. Estimate fewer parameters"
            )
            if dset.meta.get("analysis_status") == "unchecked":
                dset.meta["analysis_status"] = "too few degrees of freedom"

        else:
            if dset.meta.get(
                    "analysis_status") == "too few degrees of freedom":
                dset.meta["analysis_status"] = "unchecked"

        # Report and set analysis status if there are too few stations
        # TODO: if vlbi_site_pos in state_vector and num_stations < 3
        estimate_site_pos = np.char.startswith(
            np.array(param_names, dtype=str), "vlbi_site_pos").any()
        if len(dset.unique("station")) < 3 and estimate_site_pos:
            log.warn(
                f"Too few stations {len(dset.unique('station'))} < 3. Do not estimate station positions."
            )
            # if dset.meta.get("analysis_status") == "unchecked":
            # dset.meta["analysis_status"] = "needs custom state vector"
        elif len(dset.unique("station")) < 3 and estimate_site_pos:
            if dset.meta.get("analysis_status") == "needs custom state vector":
                dset.meta["analysis_status"] = "unchecked"
        # Update config
        cfg_vars = dset.vars.copy()
        cfg_vars.pop("rundate")
        with config.update_tech_config(dset.analysis["rundate"],
                                       cfg_vars.pop("pipeline"),
                                       **cfg_vars) as cfg:
            cfg.update("analysis_status",
                       "status",
                       dset.meta.get("analysis_status", ""),
                       source=__file__)

        # Add information to dset.meta
        dset.meta.add("number of observations",
                      dset.num_obs,
                      section="statistics")
        dset.meta.add("number of unknowns", num_unknowns, section="statistics")
        dset.meta.add("square sum of residuals",
                      sq_sum_residuals,
                      section="statistics")
        dset.meta.add("degrees of freedom", deg_freedom, section="statistics")
        dset.meta.add("variance factor", variance_factor, section="statistics")
        dset.meta.add("weighted square sum of o-c",
                      sq_sum_residuals + sq_sum_omc_terms,
                      section="statistics")
        dset.meta.add("matrix", N.tolist(), section="normal equation")
        dset.meta.add("vector", b[:, 0].tolist(), section="normal equation")
        dset.meta.add("names",
                      param_names[normal_idx],
                      section="normal equation")
        dset.meta.add("unit", [
            config.tech[f.split("-")[0]].unit.str
            for f in param_names[normal_idx]
        ],
                      section="normal equation")

        # TODO should this be here?
        log.info("Solving normal equations")
        names = dset.meta["normal equation"]["names"]
        n = len(names)
        d = np.zeros((n, 6))
        fix_param_weight = np.zeros(n)
        H = np.zeros((6, n))
        stations = set()
        from where import apriori

        reference_frame = config.tech.reference_frames.list[0]
        trf = apriori.get("trf",
                          time=dset.time.utc.mean,
                          reference_frames=reference_frame)
        # thaller2008: eq 2.51 (skipping scale factor)
        for idx, column in enumerate(names):
            if "_site_pos-" not in column:
                continue
            station = column.split("-", maxsplit=1)[-1].rsplit("_",
                                                               maxsplit=1)[0]
            site_id = dset.meta[station]["site_id"]
            if site_id in trf:
                x0, y0, z0 = trf[site_id].pos.trs
                if column.endswith("_x"):
                    d[idx, :] = np.array([1, 0, 0, 0, z0, -y0])
                if column.endswith("_y"):
                    d[idx, :] = np.array([0, 1, 0, -z0, 0, x0])
                if column.endswith("_z"):
                    d[idx, :] = np.array([0, 0, 1, y0, -x0, 0])
                stations.add(station)

        if len(stations) >= 3:
            try:
                # thaller2008: eq 2.57
                H = np.linalg.inv(d.T @ d) @ d.T
                log.info(
                    f"Applying NNT/NNR with {', '.join(stations)} from {reference_frame.upper()}"
                )
            except np.linalg.LinAlgError:
                log.warn(f"Unable to invert matrix for NNR/NNT constraints")
        else:
            log.info(
                f"Too few stations to use NNR/NNT contraints from {reference_frame.upper()}. Using absolute constraints for station positions."
            )
            # Too few stations to use NNT/NNR?
            for idx, column in enumerate(names):
                if "_site_pos-" not in column:
                    continue
                station = column.split("-",
                                       maxsplit=1)[-1].rsplit("_",
                                                              maxsplit=1)[0]
                fix_param_weight[idx] = 1 / (1e-6)**2  # 1/meters**2

        sigmas = [0.0001] * 3 + [1.5e-11] * 3

        # NNR to CRF
        if "celestial_reference_frames" in config.tech.master_section:
            celestial_reference_frame = config.tech.celestial_reference_frames.list[
                0]
            crf = apriori.get(
                "crf",
                time=dset.time,
                celestial_reference_frames=celestial_reference_frame)
            H2 = np.zeros((3, n))
            for idx, column in enumerate(names):
                if "_src_dir-" not in column:
                    continue
                source = column.split("-", maxsplit=1)[-1].split("_")[0]
                if source in crf:
                    ra = crf[source].pos.right_ascension
                    dec = crf[source].pos.declination
                    if dset.num(source=source) < 5:
                        fix_param_weight[idx] = 1 / (1e-12)**2  # 1/radians**2
                        if column.endswith("_ra"):
                            log.info(
                                f"Too few observations for source {source}. Using absolute constraints for source positions."
                            )
                        continue

                    if column.endswith("_ra"):
                        H2[0, idx] = -np.cos(ra) * np.sin(dec) * np.cos(dec)
                        H2[1, idx] = -np.sin(ra) * np.sin(dec) * np.cos(dec)
                        H2[2, idx] = np.cos(dec)**2
                    if column.endswith("_dec"):
                        H2[0, idx] = np.sin(ra)
                        H2[1, idx] = -np.cos(ra)

            if H2.any():
                log.info(
                    f"Applying NNR constraint to {celestial_reference_frame.upper()}"
                )
                # add NNR to CRF constraints
                H = np.concatenate((H, H2))
                sigmas = sigmas + [1e-6] * 3

        # thaller2008: eq 2.45
        P_h = np.diag(1 / np.array(sigmas)**2)

        # Free network constraints: thaller2008: eq 2.58
        N_h = N + H.T @ P_h @ H

        # Baselines with too few obs?
        for idx, column in enumerate(names):
            if "_baseline-" not in column:
                continue
            baseline = column.split("-", maxsplit=1)[-1].rsplit("_",
                                                                maxsplit=1)[0]
            if dset.num(baseline=baseline) < 5:
                fix_param_weight[idx] = 1 / (1e-6)**2  # 1/meters**2
                log.info(
                    f"Too few observations for baseline {baseline}. Constrained to a priori value"
                )
                continue

        # Absolute constraints (on sources with too few observations): thaller2008: eq.2.49
        N_h += np.diag(fix_param_weight)

        # solve neq
        N_h_inv = np.linalg.inv(N_h)
        x = N_h_inv @ b

        # Covariance: thaller2008: eq 2.16
        Q_xx = variance_factor**2 * N_h_inv

        dset.meta.add("solution", x[:, 0].tolist(), section="normal equation")
        dset.meta.add("covariance", Q_xx.tolist(), section="normal equation")