Exemple #1
0
def vectorized_llh(pos, ref_ellipsoid=2):
    """Vectorized version of SOFA gc2gd-function. 

    Converts xyz coordinates to latitude, longitude and height

    @todo ref_ellipsoid
    Args:
        pos:        xyz coordinates
    Returns:
        np.array:   llh coordinates 
    """
    if np.array(pos).ndim == 1:
        lon, lat, h, _ = sofa.iau_gc2gd(ref_ellipsoid, pos)
        return np.array([lat, lon, h])
    else:
        llh = np.empty(pos.shape)
        for i, xyz in enumerate(pos):
            lon, lat, h, _ = sofa.iau_gc2gd(ref_ellipsoid, xyz)
            llh[i, :] = (lat, lon, h)
        return llh
Exemple #2
0
    def llh(self):
        """Transform geocentric coordinates to geodetic using the specified reference ellipsoid

        Returns:
            numpy.ndarray: Geodetic coordinates (latitude in radians, longitude in radians, height in meters)
        """
        llh = np.empty((self.num_obs, 3))
        for obs, itrs in enumerate(self.itrs):
            longitude, latitude, height, _ = sofa.iau_gc2gd(self._ref_ellipsoid, itrs[:3])
            llh[obs, :] = (latitude, longitude, height)

        return llh
Exemple #3
0
def _write_to_dataset(parser, dset, rundate, session):

    data = parser.as_dict()
    units = data.get("meta", {}).get("units", {})

    # Session meta
    dset.meta.add("tech", "vlbi")
    dset.meta.add("file", parser.file_path.stem, section="input")
    dset.meta.add("type", config.tech.obs_format.str.upper(), section="input")

    if "meta" not in data:
        # Only read master file if session_code is not available in data["meta"]
        # This is to avoid a dependency to the master file which changes frequently
        master = apriori.get("vlbi_master_schedule", rundate=rundate)
        master_data = master.get((rundate.timetuple().tm_yday, session), {})
        session_code = master_data.get("session_code", "")
    else:
        master = apriori.get("vlbi_master_schedule")
        session_code = data["meta"].get("session_code", "")

    dset.meta.add("session_code", session_code, section="input")
    dset.meta.add("session_type",
                  master.session_type(session_code),
                  section="input")

    log.info(f"Session code: {session_code}")

    # Convert source names to official IERS names
    source_names = apriori.get("vlbi_source_names")
    iers_source_names = [
        source_names[src]["iers_name"] if src in source_names else src
        for src in data["source"]
    ]
    # Replace the characeter "." with the letters "dot" in source names because "." has a special meaning in where
    data["source"] = iers_source_names

    # Replace spaces in station names with underscores to match official IVS name
    data["station_1"] = np.char.replace(data["station_1"], " ", "_")
    data["station_2"] = np.char.replace(data["station_2"], " ", "_")

    dset.num_obs = len(data["time"])
    dset.add_time("time",
                  val=data.pop("time"),
                  scale="utc",
                  fmt="isot",
                  write_level="operational")

    # Source directions
    crf = apriori.get("crf", time=dset.time)
    ra = np.array([
        crf[s].pos.right_ascension if s in crf else 0 for s in data["source"]
    ])
    dec = np.array(
        [crf[s].pos.declination if s in crf else 0 for s in data["source"]])
    dset.add_direction("src_dir",
                       ra=ra,
                       dec=dec,
                       time=dset.time,
                       write_level="operational")
    # Replace the characeter "." with the letters "dot" in source names because "." has a special meaning in where
    data["source"] = [s.replace(".", "dot") for s in iers_source_names]

    for field, values in data.items():
        values = np.array(values)
        if values.dtype.kind in {"U", "S"}:
            multiplier = -1 if field.endswith("_1") else 1
            dset.add_text(field,
                          val=values,
                          multiplier=multiplier,
                          write_level="operational")
        elif values.dtype.kind in {"f", "i"}:
            multiplier = -1 if field.endswith("_1") else 1
            unit = units.get(field, None)
            dset.add_float(field,
                           val=values,
                           multiplier=multiplier,
                           write_level="operational",
                           unit=unit)
        elif values.dtype.kind in {"O"}:
            continue
        else:
            log.warn(f"Unknown datatype {values.dtype} for field {field}")

    # Station information
    log.info(f"Found stations: {', '.join(dset.unique('station'))}")
    trf = apriori.get("trf", time=dset.time)
    station_codes = apriori.get("vlbi_station_codes")
    dset.add_text(
        "baseline",
        val=np.array([
            f"{s1}/{s2}"
            for s1, s2 in zip(data["station_1"], data["station_2"])
        ]),
        write_level="operational",
    )
    for site in dset.unique("station"):
        if site in station_codes:
            cdp = station_codes[site]["cdp"]
            trf_site = trf[cdp]
        else:
            named_site = trf.named_site(site)
            trf_site = trf.closest(named_site.pos)
            cdp = trf_site.key
            ignore_stations = config.tech.ignore_station.stations.list
            logger = log.info if site in ignore_stations else log.warn
            logger(
                f"Undefined station name {site}. Assuming station is {trf_site.name} to get a cdp number."
            )

        data["pos_" + site] = trf_site.pos.trs.val
        _site_pos = np.mean(data[f"pos_{site}"], axis=0)
        log.debug(
            f"Using position {_site_pos} for {site} from {trf_site.source}")

        ivsname = station_codes[cdp]["name"]
        data["sta_" + site] = dict(site_id=cdp, cdp=cdp, ivsname=ivsname)

    # Positions
    itrs_pos_1 = np.array(
        [data["pos_" + s][i, :] for i, s in enumerate(data["station_1"])])
    itrs_vel_1 = np.zeros((dset.num_obs, 3))
    dset.add_posvel(
        "site_pos_1",
        val=np.concatenate((itrs_pos_1, itrs_vel_1), axis=1),
        ellipsoid=ellipsoid.get(config.tech.reference_ellipsoid.str.upper()),
        system="trs",
        time=dset.time,
        # other=dset.src_dir,
        write_level="operational",
    )

    itrs_pos_2 = np.array(
        [data["pos_" + s][i, :] for i, s in enumerate(data["station_2"])])
    itrs_vel_2 = np.zeros((dset.num_obs, 3))
    dset.add_posvel(
        "site_pos_2",
        val=np.concatenate((itrs_pos_2, itrs_vel_2), axis=1),
        ellipsoid=ellipsoid.get(config.tech.reference_ellipsoid.str.upper()),
        system="trs",
        time=dset.time,
        # other=dset.src_dir,
        write_level="operational",
    )

    # Compute aberrated source directions
    def aberrated_src_dir(site_pos):
        """See IERS2010 Conventions, equation 11.15"""
        site_vel_gcrs = site_pos.gcrs.vel.val
        eph = apriori.get("ephemerides", time=dset.time)
        vel = eph.vel_bcrs("earth") + site_vel_gcrs
        return (
            dset.src_dir.unit_vector + vel / constant.c -
            dset.src_dir.unit_vector *
            (dset.src_dir.unit_vector[:, None, :] @ vel[:, :, None])[:, :, 0] /
            constant.c)

    k_1 = aberrated_src_dir(dset.site_pos_1)
    dset.add_direction("abr_src_dir_1", val=k_1, system="gcrs", time=dset.time)
    dset.site_pos_1.other = dset.abr_src_dir_1

    k_2 = aberrated_src_dir(dset.site_pos_2)
    dset.add_direction("abr_src_dir_2", val=k_2, system="gcrs", time=dset.time)
    dset.site_pos_2.other = dset.abr_src_dir_2

    # Station data
    sta_fields = set().union(
        *[v.keys() for k, v in data.items() if k.startswith("sta_")])
    for field in sta_fields:
        dset.add_text(field + "_1",
                      val=[data["sta_" + s][field] for s in data["station_1"]],
                      multiplier=-1)  # write_level='analysis')
        dset.add_text(field + "_2",
                      val=[data["sta_" + s][field] for s in data["station_2"]],
                      multiplier=1)  # write_level='analysis')

    # Station meta
    station_keys = sorted([k for k, v in data.items() if k.startswith("sta_")])
    pos_keys = sorted([k for k, v in data.items() if k.startswith("pos_")])

    for sta_key, pos_key in zip(station_keys, pos_keys):
        sta_name = sta_key.replace("sta_", "")
        cdp = data[sta_key]["cdp"]
        ivsname = station_codes[cdp]["name"]
        longitude, latitude, height, _ = sofa.iau_gc2gd(
            2, data[pos_key][0, :])  # TODO: Reference ellipsoid

        dset.meta.add("cdp", cdp, section=ivsname)
        dset.meta.add("site_id", cdp, section=ivsname)
        dset.meta.add("domes", station_codes[cdp]["domes"], section=ivsname)
        dset.meta.add("marker", station_codes[cdp]["marker"], section=ivsname)
        dset.meta.add("description",
                      station_codes[cdp]["description"],
                      section=ivsname)
        dset.meta.add("longitude", longitude, section=ivsname)
        dset.meta.add("latitude", latitude, section=ivsname)
        dset.meta.add("height", height, section=ivsname)
        if sta_name != ivsname:
            dset.meta.add("cdp", cdp, section=sta_name)
            dset.meta.add("site_id", cdp, section=sta_name)
            dset.meta.add("domes",
                          station_codes[cdp]["domes"],
                          section=sta_name)
            dset.meta.add("marker",
                          station_codes[cdp]["marker"],
                          section=sta_name)
            dset.meta.add("description",
                          station_codes[cdp]["description"],
                          section=sta_name)
            dset.meta.add("longitude", longitude, section=sta_name)
            dset.meta.add("latitude", latitude, section=sta_name)
            dset.meta.add("height", height, section=sta_name)

    # Final cleanup
    # If there are more than 300 sources in a NGS-file the source names are gibberish
    bad_source_idx = ra == 0
    bad_sources = np.array(dset.source)[bad_source_idx]
    for s in np.unique(bad_sources):
        log.warn(
            f"Unknown source {s}. Observations with this source is discarded")
    dset.subset(np.logical_not(bad_source_idx))
Exemple #4
0
def _write_to_dataset(parser1, parser2, dset, rundate):
    """Store SLR data in a dataset"""

    data_all1 = parser1.as_dict()
    data_all2 = parser2.as_dict()
    if parser1.file_path == parser2.file_path:
        collection = [data_all1]
    else:
        collection = [data_all1, data_all2]

    # Meta information
    dset.meta["tech"] = "slr"
    dset.meta.add("file", parser1.file_path.stem, section="input")
    dset.meta.add("file", parser2.file_path.stem, section="input")
    dset.meta.add("type", config.tech.obs_format.str.upper(), section="input")

    # Make new dict "obs_data" containing only data in relevant time interval:
    arc_length = config.tech.arc_length.float
    rundate_datetime = datetime(rundate.year, rundate.month, rundate.day)
    obs_data = dict()
    for data_all in collection:
        for i, x in enumerate(data_all["meta"]["obs_time"]):
            if rundate_datetime <= x < rundate_datetime + timedelta(
                    days=arc_length):
                for key in ("meta", "obs", "obs_str"):
                    for field, val in data_all[key].items():
                        obs_data.setdefault(key, dict()).setdefault(
                            field, list()).append(val[i])

        data_all.pop("meta")
        data_all.pop("obs")
        data_all.pop("obs_str")

        for key in data_all.keys():
            if key.startswith("met_"):
                for key2, val in data_all[key].items():
                    obs_data.setdefault(key,
                                        dict()).setdefault(key2,
                                                           list()).append(val)
            elif key.startswith("satellite_"):
                # TODO: Use this information in the future?
                continue
            elif key.startswith("station_"):
                # TODO: Use this information in the future?
                continue
            else:
                log.fatal(f"Unknown data type{key}")

    obs_date = obs_data["meta"]["obs_date"]
    time = [
        obs_date[i] + timedelta(seconds=obs_data["meta"]["obs_sec"][i])
        for i in range(0, len(obs_date))
    ]
    dset.num_obs = len(obs_data["meta"]["obs_time"])
    dset.add_time("time", val=time, scale="utc", fmt="datetime")
    dset.add_text(val=obs_data["meta"]["station"], name="station")
    dset.add_text(val=obs_data["meta"]["satellite"], name="satellite")
    dset.add_float(val=obs_data["meta"]["bin_rms"],
                   unit="picoseconds",
                   name="bin_rms")
    # Positions
    trf = apriori.get("trf", time=dset.time)
    for station in dset.unique("station"):
        trf_site = trf[station]
        station_pos = trf_site.pos.trs.val
        log.debug(
            f"Station position for {station} ({trf_site.name}) is (x,y,z) = {station_pos.mean(axis=0)}"
        )
        domes = trf_site.meta["domes"]
        obs_data["pos_" + station] = station_pos
        obs_data["station-other_" + station] = dict(domes=domes,
                                                    cdp=station,
                                                    site_id=station)
    dset.add_position(
        "site_pos",
        time=dset.time,
        system="trs",
        val=np.array(
            [obs_data["pos_" + s][idx] for idx, s in enumerate(dset.station)]),
    )
    # Station data
    sta_fields = set().union(
        *[v.keys() for k, v in obs_data.items() if k.startswith("station_")])
    for field in sta_fields:
        dset.add_float(field,
                       val=np.array([
                           float(obs_data["station_" + s][field])
                           for s in dset.station
                       ]))
    sta_fields = set().union(*[
        v.keys() for k, v in obs_data.items() if k.startswith("station-other_")
    ])
    for field in sta_fields:
        dset.add_text(
            field,
            val=[obs_data["station-other_" + s][field] for s in dset.station])

    # Station meta
    station_keys = sorted(
        [k for k, v in obs_data.items() if k.startswith("station-other_")])
    pos_keys = sorted([k for k, v in obs_data.items() if k.startswith("pos_")])

    for sta_key, pos_key in zip(station_keys, pos_keys):
        sta_name = sta_key.replace("station-other_", "")
        cdp = obs_data[sta_key]["cdp"]
        dset.meta.add(sta_name, "site_id", cdp)
        longitude, latitude, height, _ = sofa.iau_gc2gd(
            2, obs_data[pos_key][0, :])  # TODO: Reference ellipsoid
        dset.meta.add("cdp", cdp, section=sta_name)
        dset.meta.add("site_id", cdp, section=sta_name)
        dset.meta.add("domes", obs_data[sta_key]["domes"], section=sta_name)
        dset.meta.add("marker", " ", section=sta_name)
        dset.meta.add("description", " ", section=sta_name)
        dset.meta.add("longitude", longitude, section=sta_name)
        dset.meta.add("latitude", latitude, section=sta_name)
        dset.meta.add("height", height, section=sta_name)

    # Satellite data
    sat_fields = set().union(
        *[v.keys() for k, v in obs_data.items() if k.startswith("satellite_")])
    for field in sat_fields:
        dset.add_float(field,
                       val=np.array([
                           float(obs_data["satellite_" + s][field])
                           for s in dset.satellite
                       ]))

    # Observations
    # In the dataset, obs_time is seconds since rundate:
    v = [(obs_data["meta"]["obs_date"][i] - rundate_datetime).total_seconds() +
         obs_data["meta"]["obs_sec"][i] for i in range(0, dset.num_obs)]

    obs_data["obs"].pop("obs_time")
    dset.add_float("obs_time", val=v)
    for field, values in obs_data["obs"].items():
        dset.add_float(field, val=np.array(values))

    for field, values in obs_data["obs_str"].items():
        dset.add_text(field, val=values)

    return obs_data
Exemple #5
0
def _write_to_dataset(parser, dset, rundate, session):

    data = parser.as_dict()
    # TODO: units on fields

    # Session meta
    dset.meta["tech"] = "vlbi"
    dset.add_to_meta("input", "file", parser.file_path.stem)
    dset.add_to_meta("input", "type", config.tech.obs_format.str.upper())

    if "meta" not in data:
        # Only read master file if session_code is not available in data["meta"]
        # This is to avoid a dependency to the master file which changes frequently
        master = apriori.get("vlbi_master_schedule", rundate=rundate)
        master_data = master.get((rundate.timetuple().tm_yday, session), {})
        session_code = master_data.get("session_code", "")
        dset.add_to_meta("input", "session_code", session_code)
    else:
        master = apriori.get("vlbi_master_schedule")
        session_code = data["meta"].get("session_code", "")
        dset.add_to_meta("input", "session_code", session_code)

    dset.add_to_meta("input", "session_type",
                     master.session_type(session_code))

    log.info(f"Session code: {session_code}")

    # Convert source names to official IERS names
    source_names = apriori.get("vlbi_source_names")
    iers_source_names = [
        source_names[src]["iers_name"] if src in source_names else src
        for src in data["source"]
    ]
    data["source"] = iers_source_names

    # Replace spaces in station names with underscores to match official IVS name
    data["station_1"] = np.char.replace(data["station_1"], " ", "_")
    data["station_2"] = np.char.replace(data["station_2"], " ", "_")

    dset.num_obs = len(data["time"])
    dset.add_time("time",
                  val=data.pop("time"),
                  scale="utc",
                  format="isot",
                  write_level="operational")
    for field, values in data.items():
        values = np.array(values)
        if values.dtype.kind in {"U", "S"}:
            dset.add_text(field, val=values, write_level="operational")
        elif values.dtype.kind in {"f", "i"}:
            dset.add_float(field, val=values, write_level="operational")
        elif values.dtype.kind in {"O"}:
            continue
        else:
            log.warn(f"Unknown datatype {values.dtype} for field {field}")

    # Source directions
    crf = apriori.get("crf", time=dset.time.mean.utc)
    ra = np.array(
        [crf[s].pos.crs[0] if s in crf else 0 for s in data["source"]])
    dec = np.array(
        [crf[s].pos.crs[1] if s in crf else 0 for s in data["source"]])

    dset.add_direction("src_dir", ra=ra, dec=dec, write_level="operational")

    # Station information
    log.info(f"Found stations: {', '.join(dset.unique('station'))}")
    trf = apriori.get("trf", time=dset.time)
    station_codes = apriori.get("vlbi_station_codes")
    dset.add_text(
        "baseline",
        val=np.array([
            f"{s1}/{s2}"
            for s1, s2 in zip(data["station_1"], data["station_2"])
        ]),
        write_level="operational",
    )
    for site in dset.unique("station"):
        if site in station_codes:
            cdp = station_codes[site]["cdp"]
            trf_site = trf[cdp]
        else:
            named_site = trf.named_site(site)
            trf_site = trf.closest(named_site.pos)
            cdp = trf_site.key
            ignore_stations = config.tech.ignore_station.stations.list
            logger = log.info if site in ignore_stations else log.warn
            logger(
                f"Undefined station name {site}. Assuming station is {trf_site.name}."
            )

        data["pos_" + site] = trf_site.pos.itrs
        _site_pos = np.mean(data[f"pos_{site}"], axis=0)
        log.debug(
            f"Using position {_site_pos} for {site} from {trf_site.source}")

        ivsname = station_codes[cdp]["name"]
        data["sta_" + site] = dict(site_id=cdp, cdp=cdp, ivsname=ivsname)

    # Positions
    itrs_pos_1 = np.array(
        [data["pos_" + s][i, :] for i, s in enumerate(data["station_1"])])
    itrs_vel_1 = np.zeros((dset.num_obs, 3))
    dset.add_posvel(
        "site_pos_1",
        time="time",
        other="src_dir",
        itrs=np.concatenate((itrs_pos_1, itrs_vel_1), axis=1),
        write_level="operational",
    )
    itrs_pos_2 = np.array(
        [data["pos_" + s][i, :] for i, s in enumerate(data["station_2"])])
    itrs_vel_2 = np.zeros((dset.num_obs, 3))
    dset.add_posvel(
        "site_pos_2",
        time="time",
        other="src_dir",
        itrs=np.concatenate((itrs_pos_2, itrs_vel_2), axis=1),
        write_level="operational",
    )

    # Station data
    sta_fields = set().union(
        *[v.keys() for k, v in data.items() if k.startswith("sta_")])
    for field in sta_fields:
        dset.add_text(field + "_1",
                      val=[data["sta_" + s][field] for s in data["station_1"]
                           ])  # write_level='analysis')
        dset.add_text(field + "_2",
                      val=[data["sta_" + s][field] for s in data["station_2"]
                           ])  # write_level='analysis')

    # Station meta
    station_keys = sorted([k for k, v in data.items() if k.startswith("sta_")])
    pos_keys = sorted([k for k, v in data.items() if k.startswith("pos_")])

    for sta_key, pos_key in zip(station_keys, pos_keys):
        sta_name = sta_key.replace("sta_", "")
        cdp = data[sta_key]["cdp"]
        ivsname = station_codes[cdp]["name"]
        longitude, latitude, height, _ = sofa.iau_gc2gd(
            2, data[pos_key][0, :])  # TODO: Reference ellipsoid
        dset.add_to_meta(ivsname, "cdp", cdp)
        dset.add_to_meta(ivsname, "site_id", cdp)
        dset.add_to_meta(ivsname, "domes", station_codes[cdp]["domes"])
        dset.add_to_meta(ivsname, "marker", station_codes[cdp]["marker"])
        dset.add_to_meta(ivsname, "description",
                         station_codes[cdp]["description"])
        dset.add_to_meta(ivsname, "longitude", longitude)
        dset.add_to_meta(ivsname, "latitude", latitude)
        dset.add_to_meta(ivsname, "height", height)
        if sta_name != ivsname:
            dset.add_to_meta(sta_name, "cdp", cdp)
            dset.add_to_meta(sta_name, "site_id", cdp)
            dset.add_to_meta(sta_name, "domes", station_codes[cdp]["domes"])
            dset.add_to_meta(sta_name, "marker", station_codes[cdp]["marker"])
            dset.add_to_meta(sta_name, "description",
                             station_codes[cdp]["description"])
            dset.add_to_meta(sta_name, "longitude", longitude)
            dset.add_to_meta(sta_name, "latitude", latitude)
            dset.add_to_meta(sta_name, "height", height)

    # Final cleanup
    # If there are more than 300 sources in a NGS-file the source names are gibberish
    bad_source_idx = ra == 0
    bad_sources = np.array(dset.source)[bad_source_idx]
    for s in np.unique(bad_sources):
        log.warn(
            f"Unknown source {s}. Observations with this source is discarded")
    dset.subset(np.logical_not(bad_source_idx))