def read_data(self) -> None: """Read data from a data file and parse the contents """ # Get chain of parsers parsers_chain = iter(self.setup_parser()) parser = next(parsers_chain) # Pointing to first parser cache = dict(line_num=0) with files.open(self.file_path, mode="rt", encoding=self.file_encoding) as fid: # Get iterators for current and next line line_iter, next_line_iter = itertools.tee(fid) next(next_line_iter, None) # Iterate over all file lines including last line by using zip_longest for line, next_line in itertools.zip_longest(line_iter, next_line_iter): cache["line_num"] += 1 self.parse_line(line.rstrip(), cache, parser) # Skip to next parser if next_line is None or parser.end_marker(line.rstrip(), cache["line_num"], next_line): if parser.end_callback is not None: parser.end_callback(cache) cache = dict(line_num=0) try: parser = next(parsers_chain) except StopIteration: break
def read_data(self) -> None: """Read data from the data file """ # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----+----8----+----+----9----+----0----+----1--- # --- Residual Summary: # ------------------------------------------------------------------ # --- included residuals : 6082 ( 98.4% ) # --- deleted residuals : 96 ( 1.6% ) # --- DataType Status RMS (m) Max (m) Min (m) number (%) # --- IonoFreeC_1P_2P included 5.072441e-01 2.287646e+00 -1.699980e+00 3087 ( 99.9% ) # --- IonoFreeC_1P_2P deleted 2.334849e+01 3.292118e+01 2.549366e+00 2 ( 0.1% ) # --- # --- IonoFreeL_1P_2P included 7.227355e-03 2.717787e-02 -2.721378e-02 2995 ( 97.0% ) # --- IonoFreeL_1P_2P deleted 1.555886e+01 8.371774e+01 -7.268516e-02 94 ( 3.0% ) # ------------------------------------------------------------------ # # PPP Solution: XYZ DeltaXYZ(Sol-Nom) DeltaENV (meters) # TRO1 2102928.199489438 721619.5988837772 5958196.320616415 -1.819E-01 8.667E-02 1.660E-02 1.410E-01 1.407E-01 -3.445E-02 # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----+----8----+----+----9----+----0----+----1--- # --- Residual Summary: # ------------------------------------------------------------------ # --- No Residuals (or at least FilterResidualTracker has not been informed of any). # ------------------------------------------------------------------ # # PPP Solution: XYZ DeltaXYZ(Sol-Nom) DeltaENV (meters) # TRO1 2102928.381357 721619.512215 5958196.304021 0.000E+00 0.000E+00 0.000E+00 0.000E+00 0.000E+00 0.000E+00 with files.open(self.file_path, mode="rt", encoding=self.file_encoding) as fid: self._parse_file(fid)
def open( self, file_key: str, file_vars: Dict[str, str] = None, create_dirs: bool = False, is_zipped: Optional[bool] = None, download_missing: bool = True, use_aliases: bool = True, **open_args: Any, ) -> Iterator: """Open a file based on information in a configuration Open a file based on file key which is looked up in the configuration. The method automatically handles reading from gzipped files if the filename is specified with the special {gz}-ending (including the curly braces) in the file list. In that case, the mode should be specified to be 'rt' if the contents of the file should be treated as text. If both a zipped and an unzipped version is available, the zipped version is used. This can be overridden by specifying True or False for the is_zipped-parameter. This function behaves similar to the built-in open-function, and should typically be used with a context manager as follows: Example: with cfg.open('eopc04_iau', mode='rt') as fid: for line in fid: print(line.strip()) Args: file_key: String that is looked up in the configuration file list. file_vars: Dict, extra variables used to replace variables in file name and path. create_dirs: True or False, if True missing directories are created. iz_zipped: None, True, or False. Whether the file open_args: All keyword arguments are passed on to open_path. Returns: File object representing the file. """ download_missing = download_missing and "r" in open_args.get("mode", "r") file_path = self.path( file_key, file_vars, is_zipped=is_zipped, download_missing=download_missing, use_aliases=use_aliases ) if "encoding" not in open_args: file_encoding = self.get("encoding", section=file_key, default="").str open_args["encoding"] = file_encoding or None mode = open_args.get("mode", "r") _log_file_open(file_path, description=file_key, mode=mode) try: with files.open(file_path, create_dirs=create_dirs, open_as_gzip=is_zipped, **open_args) as fid: yield fid except Exception: raise
def read_data(self) -> None: """Read data from the data file """ # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----+----8----+----+--- # 634737600 {TRO1(1)-GPS63(1)} IonoFreeL_1W_2W -0.00474853620107751 28.6008 264.741 77.7496 254.441 # 634737600 {TRO1(1)-GPS51(1)} IonoFreeL_1W_2W 0.0312436036183499 28.4875 72.4179 77.7914 317.321 DELETED # 634737600 {TRO1(1)-GPS46(1)} IonoFreeL_1W_2W 0.00156999072351027 54.4754 253.075 81.8949 103.653 # 634737600 {TRO1(1)-GPS44(1)} IonoFreeL_1W_2W -0.00224204588448629 28.7255 325.576 78.0873 41.2581 with files.open(self.file_path, mode="rt", encoding=self.file_encoding) as fid: self._parse_file(fid)
def _parse_header(self) -> None: """Parse header """ with files.open(self.file_path, mode="rt", encoding=self.file_encoding) as fid: for line in fid: # TODO: Read only one line. Better solution as loop. lat_min, lat_max, lon_min, lon_max, dlat, dlon = line.split() self.meta["grid_lat_min"] = float(lat_min) self.meta["grid_lat_max"] = float(lat_max) self.meta["grid_lon_min"] = float(lon_min) self.meta["grid_lon_max"] = float(lon_max) self.meta["grid_increment_lat"] = float(dlat) self.meta["grid_increment_lon"] = float(dlon) break
def _parse_header(self) -> None: """Parse header """ with files.open(self.file_path, mode="rt", encoding=self.file_encoding) as fid: for line in fid: if not line.startswith("%"): break # % Position XYZ : 3855263.3407 -5049731.9986 563040.4252 if line.startswith("% Position XYZ"): pos_x, pos_y, pos_z = line.split(":")[1].split() self.meta["pos_x"] = float(pos_x) self.meta["pos_y"] = float(pos_y) self.meta["pos_z"] = float(pos_z)
def get_rinex_file_version(file_path: pathlib.PosixPath) -> str: """ Get RINEX file version for a given file path Args: file_path: File path. Returns: RINEX file version """ with files.open(file_path, mode="rt") as infile: try: version = infile.readline().split()[0] except IndexError: log.fatal(f"Could not find Rinex version in file {file_path}") return version
def _parse_header(self) -> None: """Parse header """ with files.open(self.file_path, mode="rt", encoding=self.file_encoding) as fid: for line in fid: if line.startswith("LOCAL GEODETIC DATUM"): words = [ w.strip() for w in line.replace("LOCAL GEODETIC DATUM:", ""). replace("EPOCH:", "").split() ] self.meta["reference_frame"] = words[0] self.meta["epoch"] = datetime.strptime( words[1] + words[2], "%Y-%m-%d%H:%M:%S") break
def read_data(self) -> None: """Read data from a Sinex file and parse the contents First the whole Sinex file is read and the requested blocks are stored in self._sinex. After the file has been read, a parser is called on each block so that self.data is properly populated. """ # Read raw sinex data to self._sinex from file with files.open(self.file_path, mode="rb") as fid: if self._header: self.parse_header_line(next(fid)) # Header must be first line self.parse_blocks(fid) # Apply parsers to raw sinex data, the information returned by parsers is stored in self.data for sinex_block in self.sinex_blocks: if sinex_block.parser and sinex_block.marker in self._sinex: params = self._sinex.get("__params__", dict()).get(sinex_block.marker, ()) data = sinex_block.parser(self._sinex.get(sinex_block.marker), *params) if data is not None: self.data[sinex_block.marker] = data
def write_requirements(file_path: Union[str, pathlib.Path]) -> None: """Write requirements (python modules) to file for reproducibility. Note that this only stores the modules that have been imported, and that have a `__version__`-attribute (see PEP 396 - https://www.python.org/dev/peps/pep-0396/) Args: file_path: File path. """ # Find versions of imported modules (use list() for copy in case modules are imported when reading __version__) reqs = { n: getattr(m, "__version__", None) for n, m in list(sys.modules.items()) } reqs["python"] = platform.python_version() reqs_str = "\n".join( sorted("{}=={}".format(m, v.strip()) for m, v in reqs.items() if isinstance(v, str))) # Write to requirements-file with files.open(file_path, mode="w") as fid: fid.write(reqs_str + "\n")
def read_data(self) -> None: """Read grid data from GRAVSOFT text file""" # Parse header # # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+- # 57.000000 72.000000 4.000000 32.000000 0.0050000 0.0100000 self._parse_header() # Parse data # # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+- # 9999.999 9999.999 9999.999 9999.999 9999.999 9999.999 9999.999 9999.999 # 9999.999 9999.999 9999.999 9999.999 9999.999 9999.999 9999.999 9999.999 # 9999.999 9999.999 9999.999 9999.999 9999.999 9999.999 9999.999 9999.999 with files.open(self.file_path, mode="r") as fid: for num_line, line in enumerate(fid): if len( line ) > 1 and num_line != 0: # Skip empty lines and first line self.data.setdefault("griddata", list()).extend(line.split())
def _parse_header(self) -> None: """Parse header """ idx_def = { 3: "apriori_troposphere_model", 6: "mapping_function", 9: "gradient_model", 12: "elevation_cutoff", #15: "tabular_intervall", } apriori_troposphere_model_def = { "-1": "Saastamoinen", "-2": "Hopfield (Remondi)", "-3": "Essen and Froome", "-4": "Marini-Murray (SLR)", "-5": "Saastamoinen with Niell dry mapping", "-6": "GPT with GMF dry+wet mapping", "-7": "ECMWF with VMF1 dry+wet mapping", "-8": "Mendes-Pavlis (SLR)", "-11": "Saastamoinen dry part only", "-12": "Hopfield dry part only", "-13": "Simplified Hopfield dry part only", "-15": "Saastamoinen dry with Niell dry mapping", "-16": "GPT dry with GMF dry mapping", "-17": "ECMWF dry with VMF1 dry mapping", } mapping_function_def = { "1": "1/cos(z)", "2": "Hopfield", "3": "Dry Niell", "4": "Wet Niell", "5": "Dry GMF", "6": "Wet GMT", "7": "Dry VMF1", "8": "Wet VMF1", } gradient_model_def = { "0": "No estimation", "1": "Tilting", "2": "Linear", "3": "TANZ from MacMillan (1995)", "4": "CHENHER from Chen and Herring (1997)", } with files.open(self.file_path, mode="rt", encoding=self.file_encoding) as fid: for line in fid: if "A PRIORI MODEL" in line: elements = line.split() for idx, name in idx_def.items(): if name == "apriori_troposphere_model": self.meta[name] = apriori_troposphere_model_def[ elements[idx]] elif name == "mapping_function": self.meta[name] = mapping_function_def[ elements[idx]] elif name == "gradient_model": self.meta[name] = gradient_model_def[elements[idx]] else: self.meta[name] = elements[idx] break return
def download_xml( self, latitude: float, longitude: float, from_date: datetime, to_date: datetime, url: Optional[str] = None, reference_level: Optional[str] = "chart_datum", ) -> None: """Download XML file from url Args: latitude: Latitude of position in [deg] longitude: Longitude of position in [deg] from_date: Starting date of data period to_date: Ending date of data period url: URL to download from, if None use self.URL instead. reference_level: Choose reference, which can be chart_datum, mean_sea_level or nn1954 """ reference_level_def = { "chart_datum": "cd", "mean_sea_level": "msl", "nn1954": "nn1954", } # Get URL url = self.URL if url is None else url try: args = dict( lat=latitude, lon=longitude, fromtime=from_date.strftime("%Y-%m-%dT%H:%M"), totime=to_date.strftime("%Y-%m-%dT%H:%M"), datatype="all", refcode=reference_level_def[reference_level], place="", file="", lang="nn", interval=10, dst=0, # summer time is not used tzone=0, # UTC tide_request="locationdata", ) except AttributeError: log.fatal( "Following arguments has to be set: latitude, longitude, from_date and to_date" ) url = f"{url}?{'&'.join([f'{k}={v}' for k, v in args.items()])}" print(f"Downloading {url} to {self.file_path}") # Read data from API to file path with files.open(self.file_path, mode="wb") as fid: c = pycurl.Curl() c.setopt(c.URL, url) c.setopt(c.WRITEDATA, fid) try: c.perform() finally: c.close() self.meta["__url__"] = url