Exemple #1
0
    def _calculate_pos_crs(self, source):
        """Calculate position for a source

        Args:
            source (String):    Key saying which source to calculate position for.

        Returns:
            Array:  Positions, one 2-vector
        """

        ga = 0.0058 * Unit.mas2rad  # Aberration constant (mas/yr)
        mjd_2015 = 57023.0  # # Reference epoch of aberration model

        # Galactic center
        gc = Direction(ra=Unit.hms_to_rad(17, 45, 40.04),
                       dec=Unit.dms_to_rad(-29, 0, 28.1))

        # Radio source
        src = Direction(ra=self.data[source]["ra"],
                        dec=self.data[source]["dec"])

        # Compute correction
        dra = ga * gc.unit_vector @ src.dsrc_dra
        ddec = ga * gc.unit_vector @ src.dsrc_ddec
        dt = (self.time.mean.mjd - mjd_2015) * Unit.day2julian_year

        ra = src.right_ascension + dra * dt
        dec = src.declination + ddec * dt

        return np.squeeze(Direction(ra=ra, dec=dec, time=self.time))
Exemple #2
0
def _plot(
    dset1: "Dataset", dset2: "Dataset", ddiff: "Dataset", common_fields: Set[str], figure_dir: "pathlib.PosixPath"
) -> None:
    """Generate plots

    Args:
        dset1:          First dataset containing the data.
        dset2:          Second dataset containing the data.
        ddiff:          Dataset containing the differences for each field between Dataset 'dset1' and Dataset 'dset2'
        common_fields:  Set with fields common in both datasets
        figure_dir:     Figure directory.
    """
    dset1_name = config.where.gnss_compare_datasets.get("dset1_name", default="dset1").str + ":"
    dset2_name = config.where.gnss_compare_datasets.get("dset2_name", default="dset2").str + ":"

    for field in common_fields:
        ylabel = FIELDS[field].label if field in FIELDS.keys() else field.lower()
        title = FIELDS[field].title if field in FIELDS.keys() else ""
        unit = Unit(FIELDS[field].unit).units if field in FIELDS.keys() else Unit(dset1.unit(field)).units
        options = _set_plot_config(title=title)

        plot_scatter_subplots(
            x_array=dset1.time.gps.datetime,
            y_arrays=[dset1[field], dset2[field], ddiff[field]],
            xlabel="Time [GPS]",
            ylabels=[f"{dset1_name} {ylabel}", f"{dset2_name} {ylabel}", f"Difference: {ylabel}"],
            colors=["steelblue", "darkorange", "limegreen"],
            y_units=[f"{unit:~P}", f"{unit:~P}", f"{unit:~P}"],
            figure_path=figure_dir / f"plot_{field}.{FIGURE_FORMAT}",
            opt_args=options,
        )
Exemple #3
0
    def _calculate_pos_crs(self, source):
        """Calculate position for a source

        Args:
            source (String):    Key saying which source to calculate position for.

        Returns:
            Array:  Positions, one 2-vector
        """

        ga = 0.0058 * Unit.mas2rad  # Aberration constant (mas/yr)
        mjd_2015 = 57023.0  # # Reference epoch of aberration model

        # Galactic center
        gc_ra = Unit.hms_to_rad(17, 45, 40.04)
        gc_dec = Unit.dms_to_rad(-29, 0, 28.1)
        gc = radec2unitvector(gc_ra, gc_dec)

        source_info = self.data[source]

        # Compute correction
        dra = ga * gc @ dsrc_dra(source_info["ra"], source_info["dec"])
        ddec = ga * gc @ dsrc_ddec(source_info["ra"], source_info["dec"])
        dt = (self.time.mean.mjd - mjd_2015) * Unit.day2julian_year

        return np.array(
            [source_info["ra"] + dra * dt, source_info["dec"] + ddec * dt])
Exemple #4
0
def partial_vectors(dset, estimator_config_key):
    """Call all partials specified in the configuration and set up the corresponding state vector

    The list of partials to calculate is taken from the config file of the given technique. Each partial calculator is
    passed a :class:`~where.data.dataset.Dataset` with data for the modelrun and should return a tuple with the partial
    vectors and their names.

    Args:
        dset (Dataset):                 A Dataset containing model run data.
        estimator_config_key (String):  Key in config file with the name of the estimator.

    Returns:
        Dict: List of names of the partial derivatives for each partial config key.
    """
    partial_vectors = dict()
    prefix = dset.vars["pipeline"]

    # Delete values from previous iterations
    if "partial" in dset.fields:
        del dset.partial

    for config_key in estimators.partial_config_keys(estimator_config_key):
        partial_vectors[config_key] = list()
        partials = config.tech[config_key].list
        partial_data = plugins.call_all(package_name=__name__,
                                        plugins=partials,
                                        prefix=prefix,
                                        dset=dset)

        for param, (data, names, data_unit) in partial_data.items():
            param_unit_cfg = config.tech[param].unit
            if not param_unit_cfg.str:
                log.fatal(
                    f"No unit given for parameter {param!r} in {param_unit_cfg.source}"
                )

            display_unit = config.tech[param].display_unit.str
            display_unit = param_unit_cfg.str if not display_unit else display_unit
            partial_unit_str = f"{dset.unit('calc')[0]} / ({param_unit_cfg.str})"
            partial_unit = str(Unit(partial_unit_str).u)
            factor = Unit(data_unit, partial_unit)
            for values, name in zip(data.T, names):
                partial_name = f"{param}-{name}" if name else f"{param}"
                partial_vectors[config_key].append(partial_name)

                field_name = f"partial.{partial_name}"
                dset.add_float(field_name,
                               val=values * factor,
                               unit=partial_unit,
                               write_level="operational")
                dset.meta.add(partial_name,
                              display_unit,
                              section="display_units")

    return partial_vectors
Exemple #5
0
def saastamoinen_zenith_wet_delay(latitude, height, temperature, e):
    r"""Calculates zenith wet delay based Saastamoinen model

    The total tropospheric delay for a given zenith distance :math:`z` is determined after Equation (19a) in
    Saastamoinen :cite:`saastamoinen1972`:

    .. math::
       \Delta T = 0.002277 \cdot \sec z \cdot (p + (1255/T + 0.05) \cdot e) - 1.16 \cdot \tan^2 z

    The zenith tropospheric delay is determined with :math:`z = 0`:

    .. math::
       \Delta T^z = \Delta T_h^z + \Delta T_w^z = zhd + zwd

    with the zenith hydrostatic delay :math:`zhd = 0.002277 \cdot p` and zenith wet delay
    :math:`zwd = 0.002277 \cdot (1255/T + 0.05) \cdot e`.

    Args:
        latitude (numpy.ndarray):     Geodetic latitude for each observation in [rad]
        height (numpy.ndarray):       Orthometric height for each observation in [m]
        temperature (numpy.ndarray):  Temperature for each observation in [Celsius]
        e (numpy.ndarray):            Water vapor pressure for each observation in [hPa]

    Returns:
        numpy.ndarray:     Zenith wet delay for each observation in [m]
    """
    # Zenith wet delay based on Eq. (19a) in Saastamoinen :cite:`saastamoinen1972`
    return 0.002_276_8 * (1255 / Unit.celsius_to_kelvin(temperature) +
                          0.05) * e
Exemple #6
0
def get_field_by_attrs(dset: "Dataset", attrs: Tuple[str],
                       unit: str) -> np.ndarray:
    """Get field values of a Dataset specified by the field attributes

    If necessary the unit of the data fields are corrected to the defined 'output' unit.

    Args:
        dset:     Dataset, a dataset containing the data.
        attrs:    Field attributes (e.g. for Time object: (<scale>, <time format>)).
        unit:     Unit used for output.

    Returns:
        Array with Dataset field values
    """
    f = dset
    for attr in attrs:
        f = getattr(f, attr)

    # Convert 'unit' if necessary
    if unit:
        field = f"{'.'.join(attrs)}"
        if dset.unit(field):
            field_unit = dset.unit(field)[0]
            try:
                log.debug(
                    f"Convert dataset field {field} from unit {field_unit} to {unit}."
                )
                f = f * Unit(field_unit).to(unit).m
            except exceptions.UnitError:
                log.warn(f"Cannot convert from '{field_unit}' to '{unit}'.")

    return f
Exemple #7
0
    def _extend(self, other_field, memo) -> None:
        """Add observations from another field"""
        if other_field.data.ndim != self.data.ndim:
            raise ValueError(
                f"Field '{self.name}' cannot be extended. Dimensions must be equal. ({other_field.data.ndim} != {self.data.ndim})"
            )

        try:
            factors = [
                Unit(from_unit, to_unit)
                for from_unit, to_unit in zip(other_field._unit, self._unit)
            ]
        except exceptions.UnitError:
            raise exceptions.UnitError(
                f"Cannot extend field '{self.name}'. {other_field._unit} cannot be converted to {self._unit}"
            )
        except TypeError:
            if self._unit == other_field._unit == None:
                factors = 1
            else:
                raise exceptions.UnitError(
                    f"Cannot extend field '{self.name}'. {other_field._unit} cannot be converted to {self._unit}"
                )
        old_id = id(self.data)
        self.data = np.insert(self.data,
                              self.num_obs,
                              other_field.data * factors,
                              axis=0)
        memo[old_id] = self.data
Exemple #8
0
    def structure_data(self):
        ra = Unit.hms_to_rad(self._array["ra_h"], self._array["ra_m"],
                             self._array["ra_s"])
        dec = Unit.dms_to_rad(self._array["dec_deg"], self._array["dec_m"],
                              self._array["dec_s"])

        src_type = dict(vcs=False, non_vcs=True, undefined=False)
        self.data = {
            src["iers_name"]: dict(icrf_name=src["icrf_name"],
                                   defining=src["defining"],
                                   special=src["special"],
                                   ra=ra[i],
                                   dec=dec[i],
                                   **src_type)
            for i, src in enumerate(self._array)
        }
Exemple #9
0
    def parse_radio_source(self, line, _):
        """Read station position

        Reads the station position from the NGS file.

        Args:
            line:  Input data from NGS file
        """
        src_name = line["name"]
        self.data[src_name] = dict()
        self.data[src_name]["ra"] = Unit.hms_to_rad(float(line["ra_hrs"]),
                                                    int(line["ra_mins"]),
                                                    float(line["ra_secs"]))
        self.data[src_name]["dec"] = Unit.dms_to_rad(
            float(line["dec_degs"].replace(" ", "")), int(line["dec_mins"]),
            float(line["dec_secs"]))
Exemple #10
0
def get_field(dset: "Dataset", field: str, attrs: Tuple[str], unit: str) -> np.ndarray:
    """Get field values of a Dataset specified by the field attributes

    If necessary the unit of the data fields are corrected to the defined 'output' unit.

    Args:
        dset:     Dataset, a dataset containing the data.
        field:    Field name.
        attrs:    Field attributes (e.g. for Time object: (<scale>, <time format>)).
        unit:     Unit used for output.

    Returns:
        Array with Dataset field values
    """
    f = dset[field]
    for attr in attrs:
        f = getattr(f, attr)
        
    # Convert 'unit' if necessary
    if unit:
        field_attrs = field if len(attrs) == 0 else f"{field}.{'.'.join(attrs)}"
        
        try:
            field_unit = dset.unit(field_attrs)[0]
        except (exceptions.UnitError, TypeError) as e:
            log.debug(f"Skip unit conversion for field '{field_attrs}'.")
            return f # Skip unit conversion for text fields, which do not have a unit.
        
        try:
            log.debug(f"Convert dataset field {field} from unit {field_unit} to {unit}.")
            f = f * Unit(field_unit).to(unit).m
        except (exceptions.UnitError):
            log.warn(f"Cannot convert from '{field_unit}' to '{unit}' for field {field}.")

    return f
Exemple #11
0
    def _parse_observation(self, line: Dict[str, str],
                           cache: Dict[str, Any]) -> None:
        """Parse observations of COST records
        """

        # Skip slant sample lines
        if not line["hour"]:
            if float(line["minute"]) > 0:
                log.debug("Parsing of slant sample data is not implemented.")
            return

        if line["hour"][0].isalpha():
            return

        # Save data in cache
        for key, value in line.items():
            value = float(value) if value.replace('.', '').replace(
                '-', '').isnumeric() else value
            if key in UNIT_DEF.keys():
                value = value * Unit(UNIT_DEF[key].from_, UNIT_DEF[key].to_)

            cache.setdefault(f"data_{key}", list()).append(value)

        cache.setdefault(f"data_time", list()).append(
            datetime(
                cache["date_data"].year,
                cache["date_data"].month,
                cache["date_data"].day,
                int(line["hour"]),
                int(line["minute"]),
                int(line["second"]),
            ))
Exemple #12
0
 def as_cartesian(self):
     """Get Euler pole of tectonic plate in cartesian coordinates (X,Y,Z)
    
    Returns:
        Euler pole in cartesian coordinates (X, Y, Z) in [milliarcsecond/yr]
    """
     return np.array([self.pole.wx, self.pole.wy, self.pole.wz]) * Unit(
         self.pole.unit).to("milliarcsecond per year").m
Exemple #13
0
    def parse_obs(self, unit_in="meter", except_fields=()):
        """Read information about an observation

        Stores the information in the temporary cache-dict, which will be transfered to self.data when all information
        about this observation is parsed. If `unit_in` is given, all values will be converted to meter unless the field
        is listed in the `except_fields`-list.

        Args:
            unit_in (String):      Name of unit of values to be parsed.
            except_fields (Tuple): Names of fields where values should not be converted to meters.
        """
        # Find scale factor for converting to meter
        if unit_in == "meter":
            scale_factor = 1
        else:
            quantity = Unit(unit_in)
            try:
                scale_factor = quantity.to("meter").magnitude
            except Unit.DimensionalityError:
                # Try to convert between time and length by multiplying by the speed of light
                scale_factor = (
                    quantity * constant.c *
                    Unit("meters per second")).to("meter").magnitude
        obs = {}

        # Define the function doing the actual parsing
        def parse_func(line, cache):
            for field in line:
                if field.startswith("flag_"):  # Flags are currently ignored
                    continue
                try:
                    obs[field] = float(line[field])
                except ValueError:
                    obs[field] = 0
                    log.debug(
                        f"Could not convert {line['field']} to a number for {field}. Value set to 0.0"
                    )
                if field not in except_fields:
                    obs[field] *= scale_factor
            for field, value in obs.items():
                self.data.setdefault(field, list()).append(value)

        return parse_func
Exemple #14
0
    def _convert_dms2rad(self, field: bytes) -> float:
        """Convert DMS (degrees, minutes, seconds) to radians

        Args:
            field:  Original field with degrees, minutes, seconds separated by whitespace.

        Returns:
            Field converted to radians.
        """
        degrees, minutes, seconds = [float(f) for f in field.split()]
        return Unit.dms_to_rad(degrees, minutes, seconds)
 def to_unit(self, unit: str) -> None:
     """Change rotation pole unit for all tectonic plates
     
     Args:
         unit:  Define unit of rotation pole (e.g. 'radian per year', 'milliarcsecond per year')
     """
     for plate, pole in self.poles.items():
         for entry in ["wx", "wy", "wz", "dwx", "dwy", "dwz"]:
             setattr(pole, entry, getattr(pole, entry) * Unit(pole.unit).to(unit).m)
         pole.unit = unit
         self.poles[plate] = replace(pole) # Make a copy of RotationPole object
Exemple #16
0
 def as_spherical(self):
     """Get Euler pole of tectonic plate in spherical coordinates, that means location in latitude and longitude and magnitude of rotation
    
    https://geo.libretexts.org/Courses/University_of_California_Davis/GEL_056%3A_Introduction_to_Geophysics/Geophysics_is_everywhere_in_geology.../04%3A_Plate_Tectonics/4.07%3A_Plate_Motions_on_a_Sphere
    
    Returns:
        Euler pole in spherical coordinates (latitude [deg], longitude [deg], magnitude of rotation [degree per million years])
    """
     return self.to_spherical(
         np.array([self.pole.wx, self.pole.wy, self.pole.wz]) *
         Unit(self.pole.unit).to("milliarcsecond per year").m)
def test_as_cartesian():
    """Test as_spherical() and to_cartesian() function
    """
    pm = PlateMotion(plate="eura", model="itrf2014")
    crt = pm.as_cartesian()
    expected_crt = np.array([pm.pole.wx, pm.pole.wy, pm.pole.wz]) * Unit(
        pm.pole.unit).to("milliarcsecond per year").m

    print(
        f"DEBUG test_as_cartesian: crt({crt[0]:.3f}, {crt[1]:.3f}, {crt[2]:.3f}); "
        f"expected_crt({expected_crt[0]:.3f}, {expected_crt[1]:.3f}, {expected_crt[2]:.3f}))"
    )
    np.testing.assert_allclose(crt, expected_crt, rtol=0, atol=1e-3)
Exemple #18
0
    def site_id(self):
        """Mandatory block.

        Content:
        *Code PT Domes____ T Station description___ Approx_lon_ Approx_lat_ App_h__
        """
        self.fid.write("+SITE/ID\n")
        self.fid.write(
            "*Code PT Domes____ T Station description___ Approx_lon_ Approx_lat_ App_h__\n"
        )
        for sta in self.dset.unique("station"):
            site_id = self.dset.meta[sta]["site_id"]
            domes = self.dset.meta[sta]["domes"]
            marker = self.dset.meta[sta]["marker"]
            height = self.dset.meta[sta]["height"]
            description = self.dset.meta[sta]["description"][0:22]
            long_deg, long_min, long_sec = Unit.rad_to_dms(
                self.dset.meta[sta]["longitude"])
            lat_deg, lat_min, lat_sec = Unit.rad_to_dms(
                self.dset.meta[sta]["latitude"])

            self.fid.write(
                " {} {:>2} {:5}{:4} {:1} {:<22} {:>3.0f} {:>2.0f} {:4.1f} {:>3.0f} {:>2.0f} {:4.1f} {:7.1f}"
                "\n".format(
                    site_id,
                    "A",
                    domes,
                    marker,
                    _TECH[self.dset.meta["tech"]],
                    description,
                    (long_deg + 360) % 360,
                    long_min,
                    long_sec,
                    lat_deg,
                    lat_min,
                    lat_sec,
                    height,
                ))
        self.fid.write("-SITE/ID\n")
Exemple #19
0
def davis_zenith_wet_delay(latitude, height, temperature, e):
    r"""Calculates zenith wet delay based on Saastamoinen/Davis model

    The total tropospheric delay for a given zenith distance :math:`z` is determined after Equation (19a) in
    Saastamoinen :cite:`saastamoinen1972`:

    .. math::
       \Delta T = 0.002277 \cdot \sec z \cdot (p + (1255/T + 0.05) \cdot e) - 1.16 \cdot \tan^2 z

    The zenith tropospheric delay is determined with :math:`z = 0`:

    .. math::
       \Delta T^z = \Delta T_h^z + \Delta T_w^z = zhd + zwd

    with the zenith hydrostatic delay :math:`zhd = 0.002277 \cdot p` and zenith wet delay :math:`zwd = 0.002277 \cdot
    (1255/T + 0.05) \cdot e`.

    A Fortran routine written by Davis corrects also for the gravity effect due to height :math:`H` and latitude
    :math:`\phi` (see http://acc.igs.org/tropo/wetsaas.f) and uses the constant :math:`0.0022768` instead of
    :math:`0.002277`, which leads to

    .. math::
       zwd = 0.0022768 \cdot (1255/T + 0.05) \cdot e / (1 - 0.00266 \cos 2 \phi - 0.00000028 H) .

    The difference between the orthometric height and geodetic height is called geoid undulation and can reach up to
    100 m due to Boehm et al. :cite:`boehm2007`. The influence of height difference is not significant in this
    equation.  The geodetic (ellipsoidal) height is therefore often used instead of the orthometric height.

    Args:
        latitude (numpy.ndarray):     Geodetic latitude for each observation in [rad]
        height (numpy.ndarray):       Orthometric height for each observation in [m]
        temperature (numpy.ndarray):  Temperature for each observation in [Celsius]
        e (numpy.ndarray):            Water vapor pressure for each observation in [hPa]

    Returns:
        numpy.ndarray:    Zenith wet delay for each observation in [m]
    """
    gravity_corr = saastamoinen_gravity_correction(latitude, height)

    # Zenith wet delay based on Eq. (19a) in Saastamoinen :cite:`saastamoinen1972` with additional gravity
    # correction
    zwd = 0.002_276_8 * (1255 / Unit.celsius_to_kelvin(temperature) +
                         0.05) * e / gravity_corr

    return zwd
Exemple #20
0
    def to_spherical(self, pole: np.ndarray) -> np.ndarray:
        """Convert Euler pole of tectonic plate from cartesian to spherical coordinates, that means location in latitude and longitude and magnitude of rotation
       
       https://geo.libretexts.org/Courses/University_of_California_Davis/GEL_056%3A_Introduction_to_Geophysics/Geophysics_is_everywhere_in_geology.../04%3A_Plate_Tectonics/4.07%3A_Plate_Motions_on_a_Sphere
       
       Args:
           pole: Euler pole array in cartesian coordinates (X,Y,Z) in [milliarcsecond per year])
       
       Returns:
           Euler pole in spherical coordinates (latitude [deg], longitude [deg], magnitude of rotation [degree per million years])
       """
        wx = pole[0] * Unit.milliarcsec2radian
        wy = pole[1] * Unit.milliarcsec2radian
        wz = pole[2] * Unit.milliarcsec2radian

        latitude = np.arctan2(wz, np.sqrt(wx**2 + wy**2)) * Unit.radian2degree
        longitude = np.arctan2(wy, wx) * Unit.radian2degree
        omega = np.sqrt(wx**2 + wy**2 + wz**2) * Unit("radian per year").to(
            "degree per year").m * 1000000

        return np.array([latitude, longitude, omega])
Exemple #21
0
    def to_cartesian(self, pole: np.ndarray) -> np.ndarray:
        """Convert Euler pole of tectonic plate from spherical to cartesian coordinates
       
       That means from location in latitude and longitude and magnitude of rotation to X, Y and Z coordinates.
       
       https://geo.libretexts.org/Courses/University_of_California_Davis/GEL_056%3A_Introduction_to_Geophysics/Geophysics_is_everywhere_in_geology.../04%3A_Plate_Tectonics/4.07%3A_Plate_Motions_on_a_Sphere
       
       Args:
           pole: Euler pole array in spherical coordinates (latitude [deg], longitude [deg], magnitude of rotation [degree per million years])
       
       Returns:
           Euler pole in cartesian coordinates (X, Y, Z) in [milliarcsecond/yr]
       """
        lat = pole[0] * Unit.degree2radian
        lon = pole[1] * Unit.degree2radian
        w = pole[2] * Unit("degree per year").to("radian per year").m / 1000000

        wx = w * np.cos(lat) * np.cos(lon) * Unit.radian2milliarcsecond
        wy = w * np.cos(lat) * np.sin(lon) * Unit.radian2milliarcsecond
        wz = w * np.sin(lat) * Unit.radian2milliarcsecond

        return np.array([wx, wy, wz])
    def get_pole(self, plate: str, unit: Union[None, str] = None ) -> "RotationPole":
        """Get rotation pole object for given tectonic plate
        
        Args:
            plate: Name of tectonic plate (e.g. eura)
            model: Plate motion model name
            unit:  Define unit of rotation pole (e.g. 'radian per year', 'milliarcsecond per year')
            
        Returns:
            Instance of RotationPole dataclass for chosen tectonic plate
        """
        try:
            pole = replace(self.poles[plate])  # Make a copy of RotationPole object
        except KeyError:
            plates = ", ".join(sorted(self.poles.keys()))
            raise exceptions.UnknownSystemError(f"Tectonic plate {plate!r} unknown in plate motion model {self.name}. Use one of {plates}")

        if unit:
            for entry in ["wx", "wy", "wz", "dwx", "dwy", "dwz"]:
                setattr(pole, entry, getattr(pole, entry) * Unit(pole.unit).to(unit).m)
            pole.unit = unit
            
        return pole
Exemple #23
0
class TimeArray(TimeBase):
    """Base class for time objects. Is immutable to allow the data to be hashable"""

    cls_name = "TimeArray"
    _SCALES.setdefault(cls_name, dict())
    _unit = staticmethod(Unit.unit_factory(__name__))

    @classmethod
    def now(cls, scale="utc", fmt="datetime") -> "TimeArray":
        """Create a new time representing now"""
        jd1, jd2 = cls._formats()["datetime"].to_jds(datetime.now(),
                                                     scale=scale)
        return cls._cls_scale("utc").from_jds(jd1, jd2,
                                              fmt=fmt).to_scale(scale)

    @classmethod
    def empty_from(cls, other: "TimeArray") -> "TimeArray":
        """Create a new time of the same type as other but with empty(datetime.min) values
        """
        return _SCALES[other.scale](np.full(other.shape,
                                            fill_value=datetime.min),
                                    fmt="datetime")

    @classmethod
    def _scales(cls):
        return _SCALES.setdefault(cls.cls_name, dict())

    @classmethod
    def _formats(cls):
        return _FORMATS["TimeFormat"]

    @lru_cache()
    def to_scale(self, scale: str) -> "TimeArray":
        """Convert time to a different scale

        Returns a new TimeArray with the same time in the new scale.

        Args:
            scale:  Name of new scale.

        Returns:
            TimeArray representing the same times in the new scale.
        """
        # Don't convert if not necessary
        if scale == self.scale:
            return self

        # Raise error for unknown scales
        if scale not in self._scales():
            scales = ", ".join(self._scales())
            raise exceptions.UnknownSystemError(
                f"Scale {scale!r} unknown. Use one of {scales}")

        # Convert to new scale
        hop = (self.scale, scale)
        if hop in _CONVERSIONS:
            jd1, jd2 = _CONVERSIONS[hop](self)
            return self._scales()[scale].from_jds(jd1, jd2, self.fmt)

        if hop not in _CONVERSION_HOPS:
            _CONVERSION_HOPS[hop] = _find_conversion_hops(hop)

        converted_time = self
        for one_hop in _CONVERSION_HOPS[hop]:
            jd1, jd2 = _CONVERSIONS[one_hop](converted_time)
            converted_time = self._scales()[one_hop[-1]].from_jds(
                jd1, jd2, self.fmt)
        return converted_time

    @property
    @Unit.register(("year", ))
    @lru_cache()
    def year(self):
        if isinstance(self.datetime, datetime):
            return self.datetime.year
        return np.array([d.year for d in self.datetime])

    @property
    @lru_cache()
    @Unit.register(("month", ))
    def month(self):
        if isinstance(self.datetime, datetime):
            return self.datetime.month
        return np.array([d.month for d in self.datetime])

    @property
    @lru_cache()
    @Unit.register(("day", ))
    def day(self):
        if isinstance(self.datetime, datetime):
            return self.datetime.day
        return np.array([d.day for d in self.datetime])

    @property
    @lru_cache()
    @Unit.register(("hour", ))
    def hour(self):
        if isinstance(self.datetime, datetime):
            return self.datetime.hour
        return np.array([d.hour for d in self.datetime])

    @property
    @lru_cache()
    @Unit.register(("minute", ))
    def minute(self):
        if isinstance(self.datetime, datetime):
            return self.datetime.minute
        return np.array([d.minute for d in self.datetime])

    @property
    @lru_cache()
    @Unit.register(("second", ))
    def second(self):
        if isinstance(self.datetime, datetime):
            return self.datetime.second
        return np.array([d.second for d in self.datetime])

    @property
    @lru_cache()
    @Unit.register(("day", ))
    def doy(self):
        if isinstance(self.datetime, datetime):
            return self.datetime.timetuple().tm_yday
        return np.array([d.timetuple().tm_yday for d in self.datetime])

    @property
    @lru_cache()
    @Unit.register(("second", ))
    def sec_of_day(self):
        """Seconds since midnight

        Note   -  Does not support leap seconds

        Returns:
            Seconds since midnight
        """
        if isinstance(self.datetime, datetime):
            return self.datetime.hour * 60 * 60 + self.datetime.minute * 60 + self.datetime.second
        return np.array([
            d.hour * 60 * 60 + d.minute * 60 + d.second for d in self.datetime
        ])

    @property
    @lru_cache()
    def mean(self):
        """Mean time

        Returns:
            Time:    Time object containing the mean time
        """
        if self.size == 1:
            return self

        return self._cls_scale(self.scale)(np.mean(self.utc.jd), fmt="jd")

    @property
    @lru_cache()
    def min(self):
        return self[np.argmin(self.jd)]

    @property
    @lru_cache()
    def max(self):
        return self[np.argmax(self.jd)]

    @property
    @lru_cache()
    def jd_int(self):
        """Integer part of Julian Day

        To ensure consistency, we therefore add two properties `jd_int` and `jd_frac` where the integer part is
        guaranteed to be a "half-integer" (e.g. 2457617.5) and the fractional part is guaranteed to be a float in the
        range [0., 1.). The parts are calculated from `jd1` and `jd2` to preserve precision.

        Returns:
            Numpy-float scalar or array with (half-)integer part of Julian Day.
        """
        return self.jd1 - self._jd_delta

    @property
    @lru_cache()
    def jd_frac(self):
        """Fractional part of Julian Day

        See the docstring of `jd_int` for more information.

        Returns:
            Numpy-float scalar or array with fractional part of Julian Day, in the range [0., 1.).
        """
        return self.jd2 + self._jd_delta

    @property
    @lru_cache()
    def _jd_delta(self):
        """Delta between jd1 and jd_int

        This is a helper function used by `jd_int` and `jd_frac` to find the difference to `jd1` and `jd2`
        respectively. See the docstring of `jd_int` for more information.

        Returns:
            Numpy-float scalar or array with difference between `jd1` and the integer part of Julian Day.
        """
        return self.jd1 - (np.floor(self.jd - 0.5) + 0.5)

    @property
    @lru_cache()
    def mjd_int(self):
        """Integer part of Modified Julian Day

        In general, we have that MJD = JD - 2400000.5. See the docstring of `jd_int` for more information.

        Returns:
            Numpy-float scalar or array with the integer part of Modified Julian Day.
        """
        return self.jd_int - 2_400_000.5

    @property
    @lru_cache()
    def mjd_frac(self):
        """Fractional part of Modified Julian Day

        See the docstring of `jd_int` for more information. The way we have defined `jd_int` and `jd_frac` means that
        `mjd_frac` will be equal to `jd_frac`.

        Returns:
            Numpy-float scalar or array with the fractional part of Modified Julian Day, in the range [0., 1.).
        """
        return self.jd_frac

    def __add__(self, other):
        """self + other"""
        if self.scale != other.scale:
            return NotImplemented

        if isinstance(other, TimeDeltaArray):
            # time + timedelta
            jd2 = self.jd2 + other.days
            return self.from_jds(self.jd1, jd2, self.fmt)

        elif isinstance(other, TimeArray):
            # time1 + time2 does not make sense
            return NotImplemented

        return NotImplemented

    def __sub__(self, other):
        """self - other"""
        if self.scale != other.scale:
            return NotImplemented

        if isinstance(other, TimeDeltaArray):
            # time - timedelta -> time
            jd1 = self.jd1 - other.jd1
            jd2 = self.jd2 - other.jd2
            return self.from_jds(self.jd1, jd2, self.fmt)

        elif isinstance(other, TimeArray):
            # time - time -> timedelta
            jd1 = self.jd1 - other.jd1
            jd2 = self.jd2 - other.jd2
            fmt = "timedelta" if self.fmt == other.fmt == "datetime" else "jd"
            return _SCALES["TimeDeltaArray"][self.scale].from_jds(
                jd1, jd2, fmt)

        return NotImplemented

    # Turn off remaining arithmetic operations
    def __rsub__(self, _):
        """ other - self"""
        return NotImplemented

    def __radd__(self, _):
        """other + self"""
        return NotImplemented

    def __iadd__(self, _):
        """Immutable object does not support this operation"""
        return NotImplemented

    def __isub__(self, _):
        """Immutable object does not support this operation"""
        return NotImplemented
Exemple #24
0
class Eop:
    """A class that can calculate EOP corrections.

    One instance of the `Eop`-class calculates corrections for a given set of time epochs (specified when the instance
    is created). However, all instances share a cache of results from the various functions calculating corrections.

    The class properties are also cached and since the data content of each Eop instance is static there is no need to
    reset the cache at any point.
    """

    _correction_cache = dict()

    def __init__(self,
                 eop_data,
                 time,
                 models=None,
                 pole_model=None,
                 window=4,
                 sources=None):
        """Create an Eop-instance that calculates EOP corrections for the given time epochs

        The interpolation window is based on https://hpiers.obspm.fr/iers/models/interp.f which uses 4 days.

        Args:
            eop_data (Dict): Dictionary of tabular EOP data, typically read from file.
            time (Time):     Time epochs for which to calculate EOPs.
            models (Tuple):  Optional tuple of EOP correction models. If not given, the config setting is used.
            window (Int):    Number of days to use as interpolation window.
        """
        if time.scale == "ut1":
            raise ValueError(f"Time scale of 'time' cannot be 'ut1'")
        self.window = window
        self.time = time
        self.sources = sources
        self.data = self.pick_data(eop_data, self.time, self.window, sources)
        self.calculate_leap_second_offset()

        # Figure out which correction models to use
        self.models = config.tech.eop_models.tuple if models is None else models

        if "rg_zont2" in self.models:
            self.remove_low_frequency_tides()

        # Figure out which pole model to use:
        self.pole_model = config.tech.get("eop_pole_model",
                                          value=pole_model,
                                          default=None).str
        if self.pole_model == "mean_2015":
            # Read the tabulated data needed for the model
            data = parsers.parse_key("eop_mean_pole_2015").as_dict()
            self.mean_pole_last_idx = len(data["year"]) - 1
            self.mean_pole_years = interpolate.interp1d(
                data["year"],
                data["year"],
                kind="previous",
                fill_value="extrapolate")
            self.mean_pole_idx = interpolate.interp1d(data["year"],
                                                      range(len(data["year"])),
                                                      kind="previous",
                                                      fill_value=np.nan,
                                                      bounds_error=False)
            self.mean_pole_x = interpolate.interp1d(range(len(data["x"])),
                                                    data["x"],
                                                    kind="previous",
                                                    fill_value="extrapolate")
            self.mean_pole_y = interpolate.interp1d(range(len(data["y"])),
                                                    data["y"],
                                                    kind="previous",
                                                    fill_value="extrapolate")

    @staticmethod
    def pick_data(eop_data, time, window, sources):
        """Pick out subset of eop_data relevant for the given time epochs and interpolation window

        Args:
            eop_data (Dict):   Dictionary of EOP data indexed by MJD dates.
            time (Time):       Time epochs for which to calculate EOPs.
            window (Int):      Interpolation window [days].

        Returns:
            Dict: EOP data subset to the time period needed.
        """
        if time.size == 1:
            start_time = np.floor(time.utc.mjd) - window // 2
            end_time = np.ceil(time.utc.mjd) + window // 2
        else:
            start_time = np.floor(time.utc.mjd.min()) - window // 2
            end_time = np.ceil(time.utc.mjd.max()) + window // 2

        sources = config.tech.get("eop_sources", value=sources).list
        for source in sources:
            try:
                picked_data = {
                    d: eop_data[source][d].copy()
                    for d in np.arange(start_time, end_time + 1)
                }
                eop_path = config.files.path(f"eop_{source}")
                log.debug(f"Using a priori EOP values from {eop_path} ")
                return picked_data
            except KeyError:
                pass

        # No data found if we reached this point
        paths = [str(config.files.path(f"eop_{k}")) for k in sources]
        raise exceptions.MissingDataError(
            "Not all days in the time period {:.0f} - {:.0f} MJD were found in EOP-files {}"
            "".format(start_time, end_time, ", ".join(paths)))

    def calculate_leap_second_offset(self):
        """Calculate leap second offsets for each day

        Use the difference between UTC and TAI as a proxy for the leap second offset. The leap second offset is
        calculated and stored to the EOP data-dictionary. This is used to correct for the leap second jumps when
        interpolating the UT1 - UTC values.
        """
        days = Time(np.array(list(self.data.keys())), fmt="mjd", scale="utc")
        leap_offset = np.round(
            (days.utc.mjd - days.tai.mjd) * Unit.day2seconds)
        daily_offset = {int(d): lo for d, lo in zip(days.mjd, leap_offset)}

        for d, lo in daily_offset.items():
            self.data[d]["leap_offset"] = lo

    def remove_low_frequency_tides(self):
        """Remove the effect of low frequency tides.

        Tidal variations in the Earth's rotation with periods from 5 days to 18.6 years is present in the UT1-UTC time
        series as described in the IERS Conventions 2010 chapter 8.1. To improve the interpolation of the UT1-UTC time
        series this effect can be removed. In that case the effect needs to be added again to the final interpolated
        values.
        """
        for mjd in self.data.keys():
            t = Time(mjd, fmt="mjd", scale="utc")
            self.data[mjd]["ut1_utc"] -= iers.rg_zont2(t)[0]

    @property
    @lru_cache()
    @Unit.register("arcseconds")
    def x(self):
        """X-motion of the Celestial Intermediate Pole

        See section 5.5.1 in IERS Conventions, :cite:`iers2010`.

        Returns:
            Array: X-motion of the CIP, one value for each time epoch [arcseconds].
        """
        values = self._interpolate_table("x")
        values += self._corrections(
            ("ortho_eop", iers.ortho_eop, 0, 1e-6),
            ("pmsdnut2", iers.pmsdnut2, 0, 1e-6),
            ("hf_eop_xyu", hf_eop.hf_eop_xyu, 0, 1e-6),
        )
        return values

    @property
    @lru_cache()
    @Unit.register("arcseconds per day")
    def x_rate(self):
        """X-motion of the Celestial Intermediate Pole

        See section 5.5.1 in IERS Conventions, :cite:`iers2010`.

        Returns:
            Array: X-motion of the CIP, one value for each time epoch [arcseconds].
        """
        values = self._interpolate_table("x", derivative_order=1)
        # values += self._corrections(('ortho_eop', iers.ortho_eop, 0, 1e-6),
        #                            ('pmsdnut2', iers.pmsdnut2, 0, 1e-6))
        return values

    @property
    @lru_cache()
    @Unit.register("arcseconds")
    def x_pole(self):
        return getattr(self, f"x_{self.pole_model}")()

    #
    # x-pole models
    #
    @Unit.register("arcseconds")
    def x_secular(self):
        """Returns the x-coordinate of the secular pole

        See chapter 7 in IERS Conventions, ::cite:`iers2010`:

        Returns:
            Array: Secular X-motion of the CIP, one value for each time epoch [arcseconds].
        """
        # IERS conventions 2010 v.1.2.0 (chapter 7, equation 21)
        return (55.0 + 1.677 *
                (self.time.jyear - 2000)) * Unit.milliarcsec2arcsec

    @Unit.register("arcseconds")
    def x_mean_2015(self):
        """x-coordindate of Conventional mean pole model version 2015

        Reimplementation of IERS Conventions 2010 Software function IERS_CMP_2015.F
        (ftp://maia.usno.navy.mil/conventions/2010/2010_update/chapter7/software/IERS_CMP_2015.F)

        Units: Arcseconds
        """
        epochs = self.time.jyear
        mean_pole_idx = self.mean_pole_idx(epochs)
        # Inside of tabulated data range
        in_range_idx = ~np.isnan(mean_pole_idx)
        dt = epochs[in_range_idx] - self.mean_pole_years(epochs[in_range_idx])
        idx = mean_pole_idx[in_range_idx]
        x = np.full(len(epochs), fill_value=np.nan)
        x[in_range_idx] = self.mean_pole_x(
            idx) + dt * (self.mean_pole_x(idx + 1) - self.mean_pole_x(idx))

        # Extrapolate outside of tabulated data range
        dt = epochs[~in_range_idx] - self.mean_pole_years(
            epochs[~in_range_idx])
        x[~in_range_idx] = self.mean_pole_x(self.mean_pole_last_idx) + dt * (
            self.mean_pole_x(self.mean_pole_last_idx) -
            self.mean_pole_x(self.mean_pole_last_idx - 1))
        return x

    @Unit.register("arcseconds")
    def x_mean_2010(self):
        """x-coordindate of Conventional mean pole model version 2010

        Reimplementation of IERS Conventions 2010 Software function IERS_CMP_2015.F
        (ftp://maia.usno.navy.mil/conventions/2010/2010_update/chapter7/software/IERS_CMP_2015.F)

        Units: Arcseconds
        """
        epochs = self.time.jyear
        dt = epochs - 2000.0
        idx = dt < 10
        x = np.zeros(len(epochs))
        x[idx] = 0.055_974 + 0.001_824_3 * dt[idx] + 0.000_184_13 * dt[
            idx]**2 + 0.000_007_024 * dt[idx]**3
        x[~idx] = 0.23513 + 0.007_614_1 * dt[~idx]
        return x

    @Unit.register("arcseconds")
    def x_mean_2003(self):
        """x-coordindate of Conventional mean pole model version 2003

        Reimplementation of IERS Conventions 2010 Software function IERS_CMP_2015.F
        (ftp://maia.usno.navy.mil/conventions/2010/2010_update/chapter7/software/IERS_CMP_2015.F)

        Units: Arcseconds
        """
        return 0.054 + 0.00083 * (self.time.jyear - 2000.0)

    @property
    @lru_cache()
    @Unit.register("arcseconds")
    def y_pole(self):
        return getattr(self, f"y_{self.pole_model}")()

    #
    # y-pole models
    #
    @Unit.register("arcseconds")
    def y_secular(self):
        """Returns the x-coordinate of the secular pole

        See chapter 7 in IERS Conventions, ::cite:`iers2010`:

        Returns:
            Array: Mean X-motion of the CIP, one value for each time epoch [arcseconds].
        """
        # IERS conventions 2010 v.1.2.0 (chapter 7, equation 21)
        return (320.5 + 3.460 *
                (self.time.jyear - 2000)) * Unit.milliarcsec2arcsec

    @Unit.register("arcseconds")
    def y_mean_2015(self):
        """y-coordindate of Conventional mean pole model version 2015

        Reimplementation of IERS Conventions 2010 Software function IERS_CMP_2015.F
        (ftp://maia.usno.navy.mil/conventions/2010/2010_update/chapter7/software/IERS_CMP_2015.F)

        Units: Arcseconds
        """
        epochs = self.time.jyear
        mean_pole_idx = self.mean_pole_idx(epochs)
        # Inside of tabulated data range
        in_range_idx = ~np.isnan(mean_pole_idx)
        dt = epochs[in_range_idx] - self.mean_pole_years(epochs[in_range_idx])
        idx = mean_pole_idx[in_range_idx]
        y = np.full(len(epochs), fill_value=np.nan)
        y[in_range_idx] = self.mean_pole_y(
            idx) + dt * (self.mean_pole_y(idx + 1) - self.mean_pole_y(idx))

        # Extrapolate outside of tabulated data range
        dt = epochs[~in_range_idx] - self.mean_pole_years(
            epochs[~in_range_idx])
        y[~in_range_idx] = self.mean_pole_y(self.mean_pole_last_idx) + dt * (
            self.mean_pole_y(self.mean_pole_last_idx) -
            self.mean_pole_y(self.mean_pole_last_idx - 1))
        return y

    @Unit.register("arcseconds")
    def y_mean_2010(self):
        """y-coordindate of Conventional mean pole model version 2010

        Reimplementation of IERS Conventions 2010 Software function IERS_CMP_2015.F
        (ftp://maia.usno.navy.mil/conventions/2010/2010_update/chapter7/software/IERS_CMP_2015.F)

        Units: Arcseconds
        """
        epochs = self.time.jyear
        dt = epochs - 2000.0
        idx = dt < 10
        y = np.zeros(len(epochs))
        y[idx] = 0.346_346 + 0.001_789_6 * dt[idx] - 0.000_107_29 * dt[
            idx]**2 - 0.000_000_908 * dt[idx]**3
        y[~idx] = 0.358_891 - 0.000_628_7 * dt[~idx]
        return y

    @Unit.register("arcseconds")
    def y_mean_2003(self):
        """y-coordindate of Conventional mean pole model version 2003

        Reimplementation of IERS Conventions 2010 Software function IERS_CMP_2015.F
        (ftp://maia.usno.navy.mil/conventions/2010/2010_update/chapter7/software/IERS_CMP_2015.F)

        Units: Arcseconds
        """
        return 0.357 + 0.00395 * (self.time.jyear - 2000.0)

    @property
    @lru_cache()
    @Unit.register("arcseconds")
    def y(self):
        """Y-motion of the Celestial Intermediate Pole

        See section 5.5.1 in IERS Conventions, :cite:`iers2010`.

        Returns:
            Array: Y-motion of the CIP, one value for each time epoch [arcseconds].
        """
        values = self._interpolate_table("y")
        values += self._corrections(
            ("ortho_eop", iers.ortho_eop, 1, 1e-6),
            ("pmsdnut2", iers.pmsdnut2, 1, 1e-6),
            ("hf_eop_xyu", hf_eop.hf_eop_xyu, 1, 1e-6),
        )
        return values

    @property
    @lru_cache()
    @Unit.register("arcseconds per day")
    def y_rate(self):
        """X-motion of the Celestial Intermediate Pole

        See section 5.5.1 in IERS Conventions, :cite:`iers2010`.

        Returns:
            Array: X-motion of the CIP, one value for each time epoch [arcseconds].
        """
        values = self._interpolate_table("y", derivative_order=1)
        # values += self._corrections(('ortho_eop', iers.ortho_eop, 0, 1e-6),
        #                            ('pmsdnut2', iers.pmsdnut2, 0, 1e-6))
        return values

    @property
    @lru_cache()
    @Unit.register("seconds")
    def ut1_utc(self):
        """Delta between UT1 and UTC

        See section 5.5.3 in IERS Conventions, :cite:`iers2010`. Does correction for leap second jumps before
        interpolation.

        Reapplies low frequency tides if these were removed before interpolation.

        Returns:
            Array: UT1 - UTC, one value for each time epoch [seconds].
        """
        values = self._interpolate_table("ut1_utc",
                                         leap_second_correction=True)
        values += self._corrections(
            ("ortho_eop", iers.ortho_eop, 2, 1e-6),
            ("utlibr", iers.utlibr, 0, 1e-6),
            ("hf_eop_xyu", hf_eop.hf_eop_xyu, 2, 1e-6),
            ("rg_zont2", iers.rg_zont2, 0, 1),
        )

        # low frequency tides
        # if "rg_zont2" in self.models:
        #    values += nputil.take(iers.rg_zont2(self.time), 0) # Column 0 contains ut1_utc corrections
        return values

    @property
    @lru_cache()
    @Unit.register("seconds per day")
    def ut1_utc_rate(self):
        """Delta between UT1 and UTC

        See section 5.5.3 in IERS Conventions, :cite:`iers2010`. Does correction for leap second jumps before
        interpolation.

        Reapplies low frequency tides if these were removed before interpolation.

        TODO: apply models based on eop.models
        Only works if eop.models = ()

        Returns:
            Array: UT1 - UTC, one value for each time epoch [seconds].
        """
        values = self._interpolate_table("ut1_utc",
                                         leap_second_correction=True,
                                         derivative_order=1)
        # values += self._corrections(("ortho_eop", iers.ortho_eop, 2, 1e-6), ("utlibr", iers.utlibr, 0, 1e-6))

        return values

    @property
    @lru_cache()
    @Unit.register("seconds")
    def lod(self):
        """Length of day

        See section 5.5.3 in IERS Conventions, :cite:`iers2010`.

        TODO: How should this be implemented? Previous implementation simply used nearest value. Is this in the
        conventions?

        Returns:
            Array: Length of day, one value for each time epoch [seconds].
        """
        values = self._interpolate_table("lod")
        return values

    @property
    @lru_cache()
    @Unit.register("arcseconds")
    def dx(self):
        """X-offset of the Celestial Intermediate Pole

        See section 5.5.? in IERS Conventions, :cite:`iers2010`.

        Returns:
            Array: X-offset of the CIP, one value for each time epoch [arcseconds].
        """
        values = self._interpolate_table("dx")
        return values

    @property
    @lru_cache()
    @Unit.register("arcseconds")
    def dy(self):
        """Y-offset of the Celestial Intermediate Pole

        See section 5.5.? in IERS Conventions, :cite:`iers2010`.

        Returns:
            Array: Y-offset of the CIP, one value for each time epoch [arcseconds].
        """
        values = self._interpolate_table("dy")

        return values

    def _interpolate_table(self,
                           key,
                           leap_second_correction=False,
                           derivative_order=0):
        """Interpolate daily values to the given time epochs

        Uses Lagrange interpolation with the given interpolation window.

        We have observed that the Lagrange interpolation introduces instabilities when the EOP data are constant (as
        for instance in the VASCC-data). In this case, we force the Lagrange polynomial to be constant.

        Args:
            key (String):                   Name of data to be interpolated, key in `self.data`.
            leap_second_correction (Bool):  Whether data should be corrected for leap seconds before interpolation.

        Returns:
            Array: Interpolated values, one value for each time epoch.
        """
        days = np.unique(self.time.utc.mjd_int)
        offsets = range(int(-np.ceil(self.window / 2) + 1),
                        int(np.floor(self.window / 2) + 1))

        if leap_second_correction:
            leap = {
                d: np.array([
                    self.data[d + o].get("leap_offset", np.nan) -
                    self.data[d]["leap_offset"] for o in offsets
                ])
                for d in days
            }
            for lo in leap.values():
                lo[np.isnan(lo)] = 0
        else:
            leap = {d: 0 for d in days}

        table_values = {
            d: np.array([self.data[d + o][key] for o in offsets]) + leap[d]
            for d in days
        }
        interpolators = {
            d: interpolate.lagrange(offsets, v)
            for d, v in table_values.items()
        }
        for poly in interpolators.values():
            poly.c[
                np.abs(poly.c) <
                1e-15] = 0  # Avoid numerical instabilities for constant values

        if derivative_order:
            interp_values = {
                d: np.polyder(ip, derivative_order)(self.time.utc.mjd_frac)
                for d, ip in interpolators.items()
            }
        else:
            interp_values = {
                d: ip(self.time.utc.mjd_frac)
                for d, ip in interpolators.items()
            }

        if self.time.size == 1:
            return interp_values[self.time.utc.mjd_int]

        values = np.empty(self.time.size)
        for day in days:
            idx = self.time.utc.mjd_int == day
            values[idx] = interp_values[day][idx]

        return values

    def _corrections(self, *correction_models):
        """Calculate corrections to tabular values

        The correction models are specified as tuples with name, function, output column and scale factor. Calls to the
        correction functions are cached since some correction functions are used by several EOP-values.

        Args:
            correction_models (Tuple): Specification of correction models (see above)

        Returns:
            Array: Corrections to tabular values, one value for each time epoch.
        """
        corrections = 0 if self.time.size == 1 else np.zeros(self.time.size)
        for name, correction_func, out_idx, factor in correction_models:
            if name not in self.models:
                continue

            corrections += factor * nputil.take(correction_func(self.time),
                                                out_idx)
        return corrections

    # Add methods to deal with units for Eop-properties (set by @Unit.register)
    convert_to = Unit.convert_factory(__name__)
    unit_factor = staticmethod(Unit.factor_factory(__name__))
    unit = staticmethod(Unit.unit_factory(__name__))
Exemple #25
0
    def _add_fields(self, dset, param_names):
        """Add fields to the given dataset

        Adds fields for state vectors and estimate vectors for each parameter. Parameters with names ending with an
        underscore, `_`, are not added to the dataset.

        Args:
            dset (Dataset):       The dataset.
            param_names (List):   Strings with names of parameters. Used to form field names.

        """
        # Delete values from previous iterations
        if "state" in dset.fields:
            del dset.state

        if "estimate" in dset.fields:
            del dset.estimate

        for idx, param_name in enumerate(param_names):
            if param_name.endswith("_"):
                continue

            # State vectors
            fieldname = f"state.{param_name}"
            fieldname_sigma = fieldname + "_sigma"
            value = self.x_smooth[:dset.num_obs, idx, 0]
            value_sigma = np.sqrt(self.x_hat_ferr[:dset.num_obs, idx])

            # Convert values to the display unit. It corresponds to "meter per <unit of partial>"
            partial_unit = dset.unit("partial.{}".format(param_name))
            to_unit = dset.meta["display_units"][param_name]
            from_unit = f"meter/({partial_unit[0]})"
            factor = Unit(from_unit, to_unit)
            dset.meta.add(param_name, factor, section="display_factors")
            dset.add_float(fieldname,
                           val=value * factor,
                           unit=to_unit,
                           write_level="operational")

            # Convert values to the display unit. It corresponds to "meter per <unit of partial>"
            partial_unit = dset.unit("partial.{}".format(param_name))
            to_unit = dset.meta["display_units"][param_name]
            from_unit = f"meter/({partial_unit[0]})"
            factor = Unit(from_unit, to_unit)
            dset.meta.add(param_name, factor, section="display_factors")
            dset.add_float(fieldname_sigma,
                           val=value_sigma * factor,
                           unit=to_unit,
                           write_level="operational")

            # Estimate vectors
            fieldname = f"estimate.{param_name}"
            value = self.h[:dset.num_obs, idx,
                           0] * self.x_smooth[:dset.num_obs, idx, 0]
            dset.add_float(fieldname,
                           val=value,
                           unit="meter",
                           write_level="analysis")

        value = (self.x_smooth.transpose(0, 2, 1) @ self.h)[:dset.num_obs, 0,
                                                            0]
        fieldname = "est"
        if fieldname in dset.fields:
            dset[fieldname][:] = value
        else:
            dset.add_float(fieldname,
                           val=value,
                           unit="meter",
                           write_level="operational")
Exemple #26
0
 def unit_short(self, field):
     units = self.unit(field)
     if units is None:
         return tuple()
     return tuple([Unit.symbol(u) for u in units])
Exemple #27
0
class TimeDeltaArray(TimeBase):
    """Base class for time delta objects. Is immutable to allow the data to be hashable"""

    cls_name = "TimeDeltaArray"
    _SCALES.setdefault(cls_name, dict())
    _unit = staticmethod(Unit.unit_factory(__name__))

    @classmethod
    def empty_from(cls, other: "TimeDeltaArray") -> "TimeDeltaArray":
        """Create a new time of the same type as other but with empty(datetime.min) values
        """
        return _SCALES[other.scale](np.full(other.shape,
                                            fill_value=timedelta(seconds=0)),
                                    fmt="timedelta")

    @classmethod
    def _scales(cls):
        return _SCALES.setdefault(cls.cls_name, dict())

    @classmethod
    def _formats(cls):
        return _FORMATS["TimeDeltaFormat"]

    def __add__(self, other):
        """self + other """
        if self.scale != other.scale:
            return NotImplemented

        if isinstance(other, TimeDeltaArray):
            # timedelta + timedelta -> timedelta
            jd1 = self.jd1 + other.jd2
            jd2 = self.jd1 + other.jd2
            return self.from_jds(jd1, jd2, fmt=self.fmt)

        elif isinstance(other, TimeArray):
            # timedelta + time -> time
            jd1 = self.jd1 + other.jd1
            jd2 = self.jd2 + other.jd2
            return other.from_jds(jd1, jd2, fmt=other.fmt)

        return NotImplemented

    def __sub__(self, other):
        """self - other"""
        if self.scale != other.scale:
            return NotImplemented

        if isinstance(other, TimeArray):
            # timedelta - time -> time
            jd1 = self.jd1 - other.jd1
            jd2 = self.jd2 - other.jd2
            return other.from_jds(jd1, jd2, fmt=other.fmt)

        elif isinstance(other, TimeDeltaArray):
            # timedelta - timedelta -> timedelta
            jd1 = self.jd1 - other.jd1
            jd2 = self.jd1 - other.jd2
            return self.from_jds(jd1, jd2, fmt=self.fmt)

        return NotImplemented

    # Turn off remaining arithmetic operations
    def __radd__(self, _):
        """other - self"""
        return NotImplemented

    def __rsub__(self, _):
        """other - self"""
        return NotImplemented

    def __iadd__(self, _):
        """Immutable object does not support this operation"""
        return NotImplemented

    def __isub__(self, _):
        """Immutable object does not support this operation"""
        return NotImplemented
Exemple #28
0
    def _difference(self, other, num_obs, self_idx, other_idx, copy_self_on_error=False, copy_other_on_error=False):
        """Perform the - operation for each field in self and other"""
        result = self.__class__()
        for fieldname, field in self._fields.items():
            if fieldname in other._fields:
                try:
                    factors = [Unit(_from, _to) for _to, _from in zip(field._unit, other._fields[fieldname]._unit)]
                except TypeError:
                    factors = None
                except exceptions.UnitError as err:
                    raise ValueError(f"Cannot compute difference for field `{fieldname}`: {err}")
                try:
                    if factors:
                        difference = self[fieldname][self_idx] - other[fieldname][other_idx] * np.array(factors)
                    else:
                        difference = self[fieldname][self_idx] - other[fieldname][other_idx]
                    fieldtype = fieldtypes.fieldtype(difference)
                    func = fieldtypes.function(fieldtype)
                    field = func(
                        num_obs=num_obs,
                        name=fieldname,
                        val=difference,
                        unit=field._unit,
                        write_level=field._write_level.name,
                    )
                    result.add_field(fieldname, field)
                except IndexError as err:
                    # fieldname is a collection
                    collection = self[fieldname]._difference(
                        other[fieldname],
                        num_obs,
                        self_idx,
                        other_idx,
                        copy_self_on_error=copy_self_on_error,
                        copy_other_on_error=copy_other_on_error,
                    )
                    fieldtype = fieldtypes.fieldtype(collection)
                    func = fieldtypes.function(fieldtype)
                    field = func(
                        num_obs=num_obs,
                        name=fieldname,
                        val=collection,
                        unit=field._unit,
                        write_level=field._write_level.name,
                    )
                    result.add_field(fieldname, field)
                except TypeError as err:
                    # Fields that do not support the - operator
                    if copy_self_on_error:
                        index_data = self[fieldname][self_idx]
                        fieldtype = fieldtypes.fieldtype(index_data)
                        func = fieldtypes.function(fieldtype)
                        self_fieldname = f"{fieldname}_self"
                        field = func(
                            num_obs=num_obs,
                            name=self_fieldname,
                            val=index_data,
                            unit=field._unit,
                            write_level=field._write_level.name,
                        )
                        result.add_field(self_fieldname, field)
                    if copy_other_on_error:
                        index_data = other[fieldname][other_idx]
                        fieldtype = fieldtypes.fieldtype(index_data)
                        func = fieldtypes.function(fieldtype)
                        other_fieldname = f"{fieldname}_other"
                        field = func(
                            num_obs=num_obs,
                            name=other_fieldname,
                            val=index_data,
                            unit=other._fields[fieldname]._unit,
                            write_level=other._fields[fieldname]._write_level.name,
                        )
                        result.add_field(other_fieldname, field)

        return result