def test_read_write_3(): """Test references after write and then read when attributes are not dataset fields""" _dset = dataset.Dataset(2) ref_pos = position.Position([[1, 2, 3], [1, 2, 3]], system="trs") ref_pos2 = position.PosVel([[1, 2, 3, 1, 1, 1], [1, 2, 3, 2, 2, 2]], system="trs") other = position.Position([[7, 8, 9], [7, 8, 9]], system="trs") other2 = position.PosVel([[1, 2, 3, 1, 2, 3], [1, 2, 3, 4, 5, 6]], system="trs") _dset.add_position("testpos", [[4, 5, 6], [4, 5, 6]], system="trs", other=other) _dset.add_position_delta("testposdelta", [[0.1, 0.1, 0.1], [0.2, 0.2, 0.2]], system="trs", ref_pos=ref_pos) _dset.add_posvel("testposvel", [[1, 1, 1, 2, 2, 2], [3, 3, 3, 4, 4, 4]], system="trs", other=other2) _dset.add_posvel_delta("testposveldelta", [[4, 4, 4, 1, 1, 1], [5, 5, 5, 2, 2, 2]], system="trs", ref_pos=ref_pos2) file_name = "test.hdf5" _dset.write(file_name) _dset_new = dataset.Dataset.read(file_name) def test_field(field, new_field): try: if field.data.dtype.type is np.str_: assert np.char.equal(field.data, new_field.data).all() else: assert np.equal(np.asarray(field.data), np.asarray(new_field.data)).all() except AttributeError: # field is a collection for collection_field_name, collection_field in field.data._fields.items( ): new_collection_field = new_field.data._fields[ collection_field_name] test_field(collection_field, new_collection_field) for field_name, field in _dset._fields.items(): print(f"Testing {field_name}") new_field = _dset_new._fields[field_name] test_field(field, new_field) os.remove(file_name)
def test_cache(): pos1 = position.Position([1, 2, 3], system="trs") pos2 = position.Position([-4, 5, 2], system="trs") pos3 = position.Position([7, -8, 5], system="trs") pos1.other = pos2 el1 = pos1.elevation pos1.other = pos3 el2 = pos1.elevation # Other position is changed and elevation cache should have been reset assert not np.isclose(el1, el2) pos3[0] = 0 el3 = pos1.elevation # Value of other position is changed and elevation cache should have been reset assert not np.isclose(el2, el3)
def as_dataset(self, ref_pos: Union[np.ndarray, List[float]]) -> "Dataset": """Return the parsed data as a Dataset Args: ref_pos: Reference position given in terrestrial reference system and meters Returns: A dataset containing the data. """ # Initialize dataset dset = dataset.Dataset() if not self.data: log.warn("No data in {self.file_path}.") return dset dset.num_obs = len(self.data["date"]) # Add position ref_pos = position.Position(np.repeat(np.array([ref_pos]), dset.num_obs, axis=0), system="trs") dset.add_position_delta( name="pos", val=np.stack( (self.data["east"], self.data["north"], self.data["vertical"]), axis=1) * Unit.millimeter2meter, system="enu", ref_pos=ref_pos, ) # Add position sigma sigma = np.stack((self.data["east_sigma"], self.data["north_sigma"], self.data["vertical_sigma"]), axis=1) dset.add_sigma(name="pos_sigma", val=dset.pos.val, sigma=sigma * Unit.millimeter2meter, unit="meter") # Add time dset.add_time(name="time", val=self.data["year"], scale="utc", fmt="decimalyear", write_level="operational") return dset
def test_slice_and_columns(): """""" # Ny-Ålesund 1202462.5677 252734.4956 6237766.1746 # Wettzell 4075539.6734 931735.4828 4801629.4955 # Westford 1492404.5274 -4457266.5326 4296881.8189 _other = position.Position([[1, 2, 3], [4, 5, 6], [7, 8, 9]], system="trs") _pos = position.Position( [ [1_202_462.5677, 252_734.4956, 6_237_766.1746], [4_075_539.6734, 931_735.4828, 4_801_629.4955], [1_492_404.5274, -4_457_266.5326, 4_296_881.8189], ], system="trs", other=_other, ) _posdelta = position.PositionDelta( [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]], system="enu", ref_pos=_pos) assert np.equal(_pos.x, np.array([1_202_462.5677, 4_075_539.6734, 1_492_404.5274])).all() assert np.equal(_pos[0].val, np.array([1_202_462.5677, 252_734.4956, 6_237_766.1746])).all() assert np.equal( _pos[-1].val, np.array([1_492_404.5274, -4_457_266.5326, 4_296_881.8189])).all() assert np.equal( _pos[1:].val, np.array([[4_075_539.6734, 931_735.4828, 4_801_629.4955], [1_492_404.5274, -4_457_266.5326, 4_296_881.8189]]), ).all() assert np.equal(_pos[0].other.val, np.array([1, 2, 3])).all() assert np.equal(_pos[-1].other.val, np.array([7, 8, 9])).all() assert np.equal(_posdelta.east, np.array([0.1, 0.4, 0.7])).all() assert np.equal(_posdelta[1].val, np.array([0.4, 0.5, 0.6])).all() assert np.equal(_posdelta[1].ref_pos.val, np.array([4_075_539.6734, 931_735.4828, 4_801_629.4955])).all() assert np.equal(_posdelta[1].ref_pos.other.val, np.array([4, 5, 6])).all() assert np.equal(_pos[1:].other.val, np.array([[4, 5, 6], [7, 8, 9]])).all()
def posdelta_trs_s(): """""" ref_pos = position.Position(np.random.random((3, )) * 6.3e6, system="trs") return position.PositionDelta(np.random.random((3, )), system="trs", ref_pos=ref_pos)
def pos_trs_s(): """""" return position.Position(np.random.random((3, )) * 6.3e6, system="trs")
def test_math(): _pos = position.Position([[1, 2, 3], [4, 5, 6], [7, 8, 9]], system="trs") _pos2 = position.Position([[1, 1, 1], [2, 2, 2], [3, 3, 3]], system="trs") _posdelta = position.PositionDelta( [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]], system="trs", ref_pos=_pos) _posdelta2 = position.PositionDelta( [[0.1, 0.1, 0.1], [0.4, 0.4, 0.4], [0.7, 0.7, 0.7]], system="trs", ref_pos=_pos) _posvel = position.PosVel([1, 2, 3, 0.1, 0.2, 0.3], system="trs") _posvel2 = position.PosVel([1, 1, 1, 0.1, 0.1, 0.1], system="trs") _posveldelta = position.PosVelDelta([0.1, 0.2, 0.3, 0.01, 0.02, 0.03], system="trs", ref_pos=_posvel) _posveldelta2 = position.PosVelDelta([0.1, 0.1, 0.1, 0.01, 0.01, 0.01], system="trs", ref_pos=_posvel) # Positions new_pos = _pos + _posdelta np.testing.assert_almost_equal(new_pos[0].val, [1.1, 2.2, 3.3]) assert new_pos.cls_name == "PositionArray" new_pos = _posdelta + _pos np.testing.assert_almost_equal(new_pos[0].val, [1.1, 2.2, 3.3]) assert new_pos.cls_name == "PositionArray" new_pos2 = _pos - _pos2 np.testing.assert_almost_equal(new_pos2.val, [[0, 1, 2], [2, 3, 4], [4, 5, 6]]) assert new_pos2.cls_name == "PositionDeltaArray" new_pos3 = _pos - _posdelta np.testing.assert_almost_equal(new_pos3[0].val, [0.9, 1.8, 2.7]) assert new_pos3.cls_name == "PositionArray" new_pos3 = _posdelta - _pos np.testing.assert_almost_equal(new_pos3[0].val, [-0.9, -1.8, -2.7]) assert new_pos3.cls_name == "PositionArray" new_posdelta = _posdelta - _posdelta2 np.testing.assert_almost_equal( new_posdelta.val, [[0, 0.1, 0.2], [0, 0.1, 0.2], [0, 0.1, 0.2]]) assert new_posdelta.cls_name == "PositionDeltaArray" # PosVels new_posvel = _posvel + _posveldelta np.testing.assert_almost_equal(new_posvel.val, [1.1, 2.2, 3.3, 0.11, 0.22, 0.33]) assert new_posvel.cls_name == "PosVelArray" new_posvel = _posveldelta + _posvel np.testing.assert_almost_equal(new_posvel.val, [1.1, 2.2, 3.3, 0.11, 0.22, 0.33]) assert new_posvel.cls_name == "PosVelArray" new_posvel2 = _posvel - _posvel2 np.testing.assert_almost_equal(new_posvel2.val, [0, 1, 2, 0, 0.1, 0.2]) assert new_posvel2.cls_name == "PosVelDeltaArray" new_posvel3 = _posvel - _posveldelta np.testing.assert_almost_equal(new_posvel3.val, [0.9, 1.8, 2.7, 0.09, 0.18, 0.27]) assert new_posvel3.cls_name == "PosVelArray" new_posvel3 = _posveldelta - _posvel np.testing.assert_almost_equal(new_posvel3.val, [-0.9, -1.8, -2.7, -0.09, -0.18, -0.27]) assert new_posvel3.cls_name == "PosVelArray" new_posveldelta = _posveldelta - _posveldelta2 np.testing.assert_almost_equal(new_posveldelta.val, [0, 0.1, 0.2, 0, 0.01, 0.02]) assert new_posveldelta.cls_name == "PosVelDeltaArray"
def as_dataset( self, ref_pos: Union[np.ndarray, List[float]] = [0.0, 0.0, 0.0]) -> "Dataset": """Return the parsed data as a Dataset Args: ref_pos: Reference position given in terrestrial reference system and meters Returns: Midgard Dataset where timeseries data are stored with following fields: | Field | Type | Description | |---------------------|-------------------|----------------------------------------------------------------| | obs.dpos | PositionDelta | Position delta object referred to a reference position | | obs.dpos_sigma_east | numpy.array | Standard deviation of east position | | obs.dpos_sigma_north| numpy.array | Standard deviation of north position | | obs.dpos_sigma_up | numpy.array | Standard deviation of up position | | time | Time | Parameter time given as TimeTable object | """ # Initialize dataset dset = dataset.Dataset() if not self.data: log.warn("No data in {self.file_path}.") return dset dset.num_obs = len(self.data["decimalyear"]) dset.meta.update(self.meta) # Add position ref_pos = position.Position(np.repeat(np.array([ref_pos]), dset.num_obs, axis=0), system="trs") dset.add_position_delta( name="obs.dpos", val=np.stack( (self.data["east"], self.data["north"], self.data["vertical"]), axis=1), system="enu", ref_pos=ref_pos, ) # TODO: sigma functionality has to be improved: dpos_sigma.enu.east, dpos_sigma.trs.x ## Add position sigma # sigma = np.stack((self.data["east_sigma"], self.data["north_sigma"], self.data["vertical_sigma"]), axis=1) # dset.add_sigma(name="dpos_sigma", val=dset.dpos.val, sigma=sigma, unit="meter") dset.add_float(name="obs.dpos_sigma_east", val=self.data["east_sigma"], unit="meter") dset.add_float(name="obs.dpos_sigma_north", val=self.data["north_sigma"], unit="meter") dset.add_float(name="obs.dpos_sigma_up", val=self.data["vertical_sigma"], unit="meter") # Add time dset.add_time(name="time", val=self.data["decimalyear"], scale="utc", fmt="decimalyear", write_level="operational") return dset
def as_dataset( self, ref_pos: Union[np.ndarray, List[float], None] = None) -> "Dataset": """Return the parsed data as a Dataset Args: ref_pos: Reference position given in terrestrial reference system and meters Returns: Midgard Dataset where GALAT result data are stored with following fields: | Field | Type | Description | |--------------------------|-------------------|-----------------------------------------------------------| | hpe | np.ndarray | Horizontal Position Error of site position vs. reference | | | | position | | num_satellite_available | np.ndarray | Number of available satellites | | num_satellite_used | np.ndarray | Number of used satellites | | pdop | np.ndarray | Position dilution of precision | | site_pos | Position | Site position | | site_pos_vs_ref | PositionDelta | Site position versus reference coordinate | | site_vel_3d | np.ndarray | 3D site velocity | | time | Time | Parameter time given as TimeTable object | | vpe | np.ndarray | Vertical Position Error of site position vs. reference | | | | position | """ fields = { #"hpe": "meter", # Recalculated based on site position and given reference coordinate #"vpe": "meter", # Recalculated based on site position and given reference coordinate "site_vel_3d": "meter/second", "pdop": "", "num_satellite_available": "", "num_satellite_used": "", } # Initialize dataset dset = dataset.Dataset() if not self.data: log.warn("No data in {self.file_path}.") return dset dset.num_obs = len(self.data["time"]) # Add time field dset.add_time( "time", val=self.data["time"], scale="gps", fmt="datetime", ) # Add float fields for field in fields.keys(): dset.add_float(name=field, val=self.data[field], unit=fields[field]) # Add site position field dset.add_position( "site_pos", val=np.stack(( self.data["latitude"] * Unit.deg2rad, self.data["longitude"] * Unit.deg2rad, self.data["height"], ), axis=1), system="llh", ) # Use either reference position from RINEX header or given argument as reference position if ref_pos is None: ref_pos = position.Position( np.repeat( np.array([[ self.meta["pos_x"], self.meta["pos_y"], self.meta["pos_z"] ]]), dset.num_obs, axis=0, ), system="trs", ) else: ref_pos = position.Position(np.repeat(np.array([ref_pos]), dset.num_obs, axis=0), system="trs") # Add relative position dset.add_position_delta( name="site_pos_vs_ref", val=(dset.site_pos.trs - ref_pos.trs).val, system="trs", ref_pos=ref_pos, ) # Add HPE and VPE to dataset dset.add_float( "hpe", val=np.sqrt(dset.site_pos_vs_ref.enu.east**2 + dset.site_pos_vs_ref.enu.north**2), unit="meter", ) dset.add_float("vpe", val=np.absolute(dset.site_pos_vs_ref.enu.up), unit="meter") return dset
def _generate_dataframes( dset: Dict[str, "Dataset"]) -> Dict[str, pd.core.frame.DataFrame]: """Generate dataframe based on station datasets The dataframe for each station in dictionary "dfs" has following columns: east: East-coordinate in topocentric system north: North-coordinate in topocentric system up: Up-coordinate in topocentric system hpe: horizontal position error vpe: vertical position error pos_3d: 3D position error pdop: position dilution of precision hdop: horizontal dilution of precision vdop: vertical dilution of precision Example for "dfs" dictionary: 'hons': date hpe vpe east north up 0 2019-03-01 00:00:00 0.301738 0.057244 0.113758 0.279472 0.057244 1 2019-03-01 00:00:00 0.301738 0.057244 0.113758 0.279472 0.057244 'krss': date hpe vpe east north up 0 2019-03-01 00:00:00 0.710014 0.186791 -0.235267 0.669903 0.186791 1 2019-03-01 00:00:00 0.710014 0.186791 -0.235267 0.669903 0.186791 Example for "df_day" dictionary: pdop hdop vdop ... vpe pos_3d station date ... 2020-07-01 2.523569 1.371987 2.135124 ... 0.752227 0.870759 krss 2020-07-01 2.571588 1.247443 2.308469 ... 0.998428 1.089063 vegs 2020-07-01 2.622492 1.289113 2.330969 ... 1.084772 1.220454 hofs 2020-07-01 2.699645 1.246052 2.456847 ... 1.044877 1.266227 hons 2020-07-01 2.695779 1.156999 2.448314 ... 1.461449 1.619489 nabd Example for "df_month" dictionary: pdop hdop vdop ... vpe pos_3d station date Jul-2020 2.523569 1.371987 2.135124 ... 0.752227 0.870759 krss Jul-2020 2.571588 1.247443 2.308469 ... 0.998428 1.089063 vegs Jul-2020 2.622492 1.289113 2.330969 ... 1.084772 1.220454 hofs Jul-2020 2.699645 1.246052 2.456847 ... 1.044877 1.266227 hons Jul-2020 2.695779 1.156999 2.448314 ... 1.461449 1.619489 nabd Example for "dfs_day_field" dictionary: 'hpe': nabf vegs hons krss date 2019-03-01 1.368875 0.935687 1.136763 0.828754 2019-03-02 0.924839 0.728280 0.911677 0.854832 'vpe': nabf vegs hons krss date 2019-03-01 1.715893 1.147265 1.600330 0.976541 2019-03-02 1.533437 1.307373 1.476295 1.136991 Example for "dfs_month_field" dictionary: 'hpe': nabf vegs hons krss Mar-2019 1.186240 0.861718 1.095827 1.021354 Apr-2019 0.891947 0.850343 0.977908 0.971099 'vpe': nabf vegs hons krss Mar-2019 1.854684 1.291406 1.450466 1.225467 Apr-2019 1.964404 1.706507 1.687994 1.500742 Args: dset: Dictionary with station name as keys and the belonging Dataset as value Returns: Tuple with following entries: | Element | Description | |----------------------|--------------------------------------------------------------------------------------| | dfs | Dictionary with station name as keys and the belonging dataframe as value with | | | following dataframe columns: east, north, up, hpe, vpe, pos_3d | | df_day | Dataframe with daily entries with columns like date, stationm hpe, vpe, ... | | df_month | Dataframe with monthly entries with columns like date, stationm hpe, vpe, ... | | dfs_day_field | Dictionary with fields as keys (e.g. hpe, vpe) and the belonging dataframe as value | | | with DAILY samples of 95th percentile and stations as columns. | | dfs_month_field | Dictionary with fields as keys (e.g. hpe, vpe) and the belonging dataframe as value | | | with MONTHLY samples of 95th percentile and stations as columns. | """ dsets = dset dfs = {} df_day = pd.DataFrame() dfs_day_field = { "hpe": pd.DataFrame(), "vpe": pd.DataFrame(), "pos_3d": pd.DataFrame(), "pdop": pd.DataFrame(), "hdop": pd.DataFrame(), "vdop": pd.DataFrame(), } df_month = pd.DataFrame() dfs_month_field = { "hpe": pd.DataFrame(), "vpe": pd.DataFrame(), "pos_3d": pd.DataFrame(), "pdop": pd.DataFrame(), "hdop": pd.DataFrame(), "vdop": pd.DataFrame(), } for station, dset in dsets.items(): if dset.num_obs == 0: log.warn(f"Dataset '{station}' is empty.") continue # Determine topocentric coordinates (east, north, up) ref_pos = position.Position( np.repeat( np.array([ dset.meta["pos_x"], dset.meta["pos_y"], dset.meta["pos_z"] ])[None, :], dset.num_obs, axis=0, ), system="trs", ) if not "enu" in dset.fields: dset.add_position_delta( name="enu", val=(dset.site_pos.trs - ref_pos).val, system="trs", ref_pos=ref_pos, ) # TODO: Maybe it is not necessary to introduce enu, hpe and vpe to dataset # Maybe better to introduce fields in estimate stage already. if not "hpe" in dset.fields: hpe = np.sqrt(dset.enu.enu.east**2 + dset.enu.enu.north**2) dset.add_float("hpe", val=hpe) if not "vpe" in dset.fields: vpe = np.absolute(dset.enu.enu.up) dset.add_float("vpe", val=vpe) if not "pos_3d" in dset.fields: pos_3d = np.sqrt(dset.enu.enu.east**2 + dset.enu.enu.north**2 + dset.enu.enu.up**2) dset.add_float("pos_3d", val=pos_3d) # Determine dataframe df = dset.as_dataframe(fields=[ "enu.enu", "time", "hpe", "vpe", "pos_3d", "pdop", "vdop", "hdop" ]) df = df.rename( columns={ "enu_enu_0": "east", "enu_enu_1": "north", "enu_enu_2": "up", "time": "date" }) if df.empty: continue else: # Save data in dictionaries dfs.update({station: df}) df_day_tmp = df.set_index("date").resample( "D", how=lambda x: np.nanpercentile(x, q=95)) for field in dfs_day_field.keys(): if dfs_day_field[field].empty: dfs_day_field[field][station] = df_day_tmp[field] else: dfs_day_field[field] = pd.concat( [dfs_day_field[field], df_day_tmp[field]], axis=1) dfs_day_field[field] = dfs_day_field[field].rename( columns={field: station}) df_day_tmp["station"] = np.repeat(station, df_day_tmp.shape[0]) df_day = pd.concat([df_day, df_day_tmp], axis=0) df_month_tmp = df.set_index("date").resample( "M", how=lambda x: np.nanpercentile(x, q=95)) df_month_tmp.index = df_month_tmp.index.strftime("%b-%Y") for field in dfs_month_field.keys(): dfs_month_field[field][station] = df_month_tmp[field] df_month_tmp["station"] = np.repeat(station, df_month_tmp.shape[0]) df_month = pd.concat([df_month, df_month_tmp], axis=0) df_month.index.name = "date" return dfs, df_day, df_month, dfs_day_field, dfs_month_field
def _generate_dataframes( dset: Dict[str, "Dataset"]) -> Dict[str, pd.core.frame.DataFrame]: """Generate dataframe based on station datasets The dataframe for each station in dictionary "dfs" has following columns: east: East-coordinate in topocentric system north: North-coordinate in topocentric system up: Up-coordinate in topocentric system hpe: horizontal position error vpe: vertical position error pos_3d: 3D position error pdop: position dilution of precision hdop: horizontal dilution of precision vdop: vertical dilution of precision Example for "dfs" dictionary: 'hons': time_gps hpe vpe east north up 0 2019-03-01 00:00:00 0.301738 0.057244 0.113758 0.279472 0.057244 1 2019-03-01 00:00:00 0.301738 0.057244 0.113758 0.279472 0.057244 'krss': time_gps hpe vpe east north up 0 2019-03-01 00:00:00 0.710014 0.186791 -0.235267 0.669903 0.186791 1 2019-03-01 00:00:00 0.710014 0.186791 -0.235267 0.669903 0.186791 Example for "dfs_day" dictionary for "mean" key: 'mean':{ 'hpe': nabf vegs hons krss time_gps 2019-03-01 1.368875 0.935687 1.136763 0.828754 2019-03-02 0.924839 0.728280 0.911677 0.854832 'vpe': nabf vegs hons krss time_gps 2019-03-01 1.715893 1.147265 1.600330 0.976541 2019-03-02 1.533437 1.307373 1.476295 1.136991 } Example for "dfs_month" dictionary for "mean" key: 'mean':{ 'hpe': nabf vegs hons krss Mar-2019 1.186240 0.861718 1.095827 1.021354 Apr-2019 0.891947 0.850343 0.977908 0.971099 'vpe': nabf vegs hons krss Mar-2019 1.854684 1.291406 1.450466 1.225467 Apr-2019 1.964404 1.706507 1.687994 1.500742 } Args: dset: Dictionary with station name as keys and the belonging Dataset as value Returns: Tuple with following entries: | Element | Description | |----------------------|--------------------------------------------------------------------------------------| | dfs | Dictionary with station name as keys and the belonging dataframe as value with | | | following dataframe columns: east, north, up, hpe, vpe, pos_3d | | dfs_day | Dictionary with function type as keys ('mean', 'percentile', 'rms', 'std') and a | | | dictionary as values. The dictionary has fields as keys (e.g. hpe, vpe) and the | | | belonging dataframe as value with DAILY samples of 95th percentile and stations as | | | columns. | | dfs_month | Dictionary with function type as keys ('mean', 'percentile', 'rms', 'std') and a | | | dictionary as values. The dictionary has fields as keys (e.g. hpe, vpe) and the | | | belonging dataframe as value with MONTHLY samples of 95th percentile and stations as | | | columns. | """ dsets = dset dfs = {} fields = { "east": pd.DataFrame(), "north": pd.DataFrame(), "up": pd.DataFrame(), "hpe": pd.DataFrame(), "vpe": pd.DataFrame(), "pos_3d": pd.DataFrame(), "pdop": pd.DataFrame(), "hdop": pd.DataFrame(), "vdop": pd.DataFrame(), } dfs_day = { "mean": copy.deepcopy(fields), "percentile": copy.deepcopy(fields), "std": copy.deepcopy(fields), "rms": copy.deepcopy(fields), } dfs_month = { "mean": copy.deepcopy(fields), "percentile": copy.deepcopy(fields), "std": copy.deepcopy(fields), "rms": copy.deepcopy(fields), } for station, dset in dsets.items(): if dset.num_obs == 0: log.warn(f"Dataset '{station}' is empty.") continue # Determine topocentric coordinates (east, north, up) ref_pos = position.Position( np.repeat( np.array([ dset.meta["pos_x"], dset.meta["pos_y"], dset.meta["pos_z"] ])[None, :], dset.num_obs, axis=0, ), system="trs", ) if not "enu" in dset.fields: dset.add_position_delta( name="enu", val=(dset.site_pos.trs - ref_pos).val, system="trs", ref_pos=ref_pos, write_level="operational", ) # TODO: Maybe it is not necessary to introduce enu, hpe and vpe to dataset # Maybe better to introduce fields in estimate stage already. if not "hpe" in dset.fields: hpe = np.sqrt(dset.enu.enu.east**2 + dset.enu.enu.north**2) dset.add_float( "hpe", val=hpe, write_level="operational", ) if not "vpe" in dset.fields: vpe = np.absolute(dset.enu.enu.up) dset.add_float( "vpe", val=vpe, write_level="operational", ) if not "pos_3d" in dset.fields: pos_3d = np.sqrt(dset.enu.enu.east**2 + dset.enu.enu.north**2 + dset.enu.enu.up**2) dset.add_float( "pos_3d", val=pos_3d, write_level="operational", ) # Determine dataframe df = dset.as_dataframe(fields=[ "enu.enu", "time.gps", "hpe", "vpe", "pos_3d", "pdop", "vdop", "hdop" ]) df = df.rename(columns={ "enu_enu_0": "east", "enu_enu_1": "north", "enu_enu_2": "up" }) if df.empty: continue else: # Save data in dictionaries dfs.update({station: df}) for type_ in dfs_day.keys(): df_day = _apply(df, "D", type_) for field in fields.keys(): if dfs_day[type_][field].empty: dfs_day[type_][field][station] = df_day[field] else: dfs_day[type_][field] = pd.concat( [dfs_day[type_][field], df_day[field]], axis=1) dfs_day[type_][field] = dfs_day[type_][field].rename( columns={field: station}) df_month = _apply(df, "M", type_) df_month.index = df_month.index.strftime("%b-%Y") for field in fields.keys(): dfs_month[type_][field][station] = df_month[field] return dfs, dfs_day, dfs_month
def gnss_position(dset: "Dataset") -> None: """Write GNSS position results Args: dset: A dataset containing the data. """ file_path = config.files.path("output_position", file_vars=dset.vars) # Add date field to dataset if "date" not in dset.fields: dset.add_text("date", val=[d.strftime("%Y/%m/%d %H:%M:%S") for d in dset.time.datetime]) # Add ENU position to dataset ref_pos = position.Position( val=np.array([dset.meta["pos_x"], dset.meta["pos_y"], dset.meta["pos_z"]]), system="trs" ) enu = (dset.site_pos.trs.pos - ref_pos).enu dset.add_float("site_pos_vs_ref_east", val=enu.east, unit="meter") dset.add_float("site_pos_vs_ref_north", val=enu.north, unit="meter") dset.add_float("site_pos_vs_ref_up", val=enu.up, unit="meter") # Add HPE and VPE to dataset dset.add_float("hpe", val=np.sqrt(enu.east ** 2 + enu.north ** 2), unit="meter") dset.add_float("vpe", val=np.absolute(enu.up), unit="meter") # Add standard deviation of site position coordinates dset.add_float("site_pos_sigma_x", val=np.sqrt(dset.estimate_cov_site_pos_xx), unit="meter") dset.add_float("site_pos_sigma_y", val=np.sqrt(dset.estimate_cov_site_pos_yy), unit="meter") dset.add_float("site_pos_sigma_z", val=np.sqrt(dset.estimate_cov_site_pos_zz), unit="meter") # Put together fields in an array as specified by the 'dtype' tuple list if config.tech.estimate_epochwise.bool: # Epochwise estimation or over whole time period output_list = list() for epoch in dset.unique("time"): idx = dset.filter(time=epoch) # Append current epoch position solution to final output solution output_list.append(tuple([get_field(dset, f.field, f.attrs, f.unit)[idx][0] for f in FIELDS])) else: # Get position solution for first observation idx = np.squeeze(np.array(np.nonzero(dset.time.gps.mjd)) == 0) # first observation -> TODO: Better solution? output_list = [tuple([get_field(dset, idx, f.field, f.attrs, f.unit)[idx][0] for f in FIELDS])] output_array = np.array(output_list, dtype=[(f.name, f.dtype) for f in FIELDS]) # Write to disk header = get_header( FIELDS, pgm_version=f"where {where.__version__}", run_by=util.get_user_info()["inst_abbreviation"] if "inst_abbreviation" in util.get_user_info() else "", summary="GNSS position results", ) np.savetxt( file_path, output_array, fmt=tuple(f.format for f in FIELDS), header="\n".join(header), delimiter="", encoding="utf8", )