def save(config: dict, file: FileOrPathLike, flat: OptBool = None, comment: OptStr = None) -> None: """Save configuration dictionary to INI-file. Args: config: Configuration dictionary file: String or :term:`path-like object` that represents to a writeable file in the directory structure of the system, or a :term:`file object` in write mode. flat: Determines if the desired INI format structure contains sections. By default sections are used, if the dictionary contains subdictionaries. comment: String containing comment lines, which are stored as initial '#' lines in the INI-file. By default no comment is written. """ # Convert configuration dictionary to INI formated text try: text = encode(config, flat=flat, comment=comment) except Exception as err: raise ValueError("dictionary is not valid") from err # Write text to file with textfile.openx(file, mode='w') as fh: fh.write(text)
def load(file: FileOrPathLike, structure: OptStrucDict = None, flat: OptBool = None) -> StrucDict: """Import configuration dictionary from INI file. Args: file: String or :term:`path-like object` that points to a readable file in the directory structure of the system, or a :term:`file object` in read mode. structure: Dictionary of dictionaries, which determines the structure of the configuration dictionary. If structure is None, the INI-file is completely imported and all values are interpreted as strings. If the structure is a dictionary of dictionaries, the keys of the outer dictionary describe valid section names by strings, that are interpreted as regular expressions. Therupon, the keys of the respective inner dictionaries describe valid parameter names as strings, that are also interpreted as regular expressions. Finally the values of the inner dictionaries define the type of the parameters by their own type, e.g. str, int, float etc. Accepted types can be found in the documentation of the function `literal.decode`_. flat: Determines if the desired INI format structure contains sections or not. By default sections are used, if the first non empty, non comment line in the string identifies a section. Return: Structured configuration dictionary """ # Read configuration from file-like or path-like object with textfile.openx(file, mode='r') as fh: return decode(fh.read(), structure=structure, flat=flat)
def save( config: dict, file: FileOrPathLike, flat: OptBool = None, comment: OptStr = None) -> None: """Save configuration dictionary to INI-file. Args: config: Configuration dictionary file: String or :term:`path-like object` that represents to a writeable file in the directory structure of the system, or a :term:`file object` in write mode. flat: Determines if the desired INI format structure contains sections. By default sections are used, if the dictionary contains subdictionaries. comment: String containing comment lines, which are stored as initial '#' lines in the INI-file. By default no comment is written. """ # Convert configuration dictionary to INI formated text try: text = encode(config, flat=flat, comment=comment) except Exception as err: raise ValueError("dictionary is not valid") from err # Write text to file with textfile.openx(file, mode='w') as fh: fh.write(text)
def load( file: FileOrPathLike, structure: OptStrucDict = None, flat: OptBool = None) -> StrucDict: """Import configuration dictionary from INI file. Args: file: String or :term:`path-like object` that points to a readable file in the directory structure of the system, or a :term:`file object` in read mode. structure: Dictionary of dictionaries, which determines the structure of the configuration dictionary. If structure is None, the INI-file is completely imported and all values are interpreted as strings. If the structure is a dictionary of dictionaries, the keys of the outer dictionary describe valid section names by strings, that are interpreted as regular expressions. Therupon, the keys of the respective inner dictionaries describe valid parameter names as strings, that are also interpreted as regular expressions. Finally the values of the inner dictionaries define the type of the parameters by their own type, e.g. str, int, float etc. Accepted types can be found in the documentation of the function `literal.decode`_. flat: Determines if the desired INI format structure contains sections or not. By default sections are used, if the first non empty, non comment line in the string identifies a section. Return: Structured configuration dictionary """ # Read configuration from file-like or path-like object with textfile.openx(file, mode='r') as fh: return decode(fh.read(), structure=structure, flat=flat)
def save( file: FileOrPathLike, data: NpArray, labels: OptStrList = None, comment: OptStr = None, delim: str = ',') -> None: """Save NumPy array to CSV-file. Args: file: String, :term:`path-like object` or :term:`file object` that points to a valid CSV-file in the directory structure of the system. data: :class:`numpy.ndarray` containing the data which is to be exported to a CSV-file. comment: String, which is included in the CSV-file whithin initial '#' lines. By default no initial lines are created. labels: List of strings with column names. delim: String containing CSV-delimiter. The default value is ',' Returns: True if no error occured. """ # Check and prepare arguments if isinstance(comment, str): comment = '# ' + comment.replace('\n', '\n# ') + '\n\n' if isinstance(labels, list): comment += delim.join(labels) elif isinstance(labels, list): comment = delim.join(labels) # Get number of columns from last entry in data.shape cols = list(getattr(data, 'shape'))[-1] # Get column format fmt = delim.join(['%s'] + ['%10.10f'] * (cols - 1)) with textfile.openx(file, mode='w') as fh: np.savetxt(fh, data, fmt=fmt, header=comment, comments='')
def _get_delim(self) -> OptStr: # Return delimiter if set manually if self._delim is not None: return self._delim # Initialize CSV-Sniffer with default values sniffer = csv.Sniffer() sniffer.preferred = self._delim_candidates delim: OptStr = None # Detect delimiter with textfile.openx(self._file, mode='r') as fd: size, probe = 0, '' for line in fd: # Check termination criteria if size > self._delim_maxcount: break # Check exclusion criteria strip = line.strip() if not strip or strip.startswith('#'): continue # Increase probe size probe += line size += 1 if size <= self._delim_mincount: continue # Try to detect delimiter from probe using csv.Sniffer try: dialect = sniffer.sniff(probe) except csv.Error: continue delim = dialect.delimiter break return delim
def _get_skiprows(self) -> int: # Count how many 'comment' and 'blank' rows are to be skipped skiprows = 1 with textfile.openx(self._file, mode='r') as fd: for line in fd: strip = line.strip() if not strip or strip.startswith('#'): skiprows += 1 continue break return skiprows
def test_openx(self) -> None: filepath = Path(tempfile.NamedTemporaryFile().name + '.txt') with self.subTest(file=filepath): with textfile.openx(filepath, mode='w') as fh: fh.write(self.text) if filepath.is_file(): with textfile.openx(filepath, mode='r') as fh: text = fh.read() filepath.unlink() self.assertTrue(text == self.text) file = filepath.open(mode='w') with self.subTest(file=file): with textfile.openx(file, mode='w') as fh: fh.write(self.text) if not file.closed: file.close() file = filepath.open(mode='r') with textfile.openx(file, mode='r') as fh: text = fh.read() if not file.closed: file.close() self.assertTrue(text == self.text)
def _get_rownames(self) -> OptList: # Check type of 'cols' lblcol = self._get_namecol() if lblcol is None: return None lbllbl = self.colnames[lblcol] # Import CSV-file to NumPy ndarray with textfile.openx(self._file, mode='r') as fh: rownames = np.loadtxt(fh, skiprows=self._get_skiprows(), delimiter=self._get_delim(), usecols=(lblcol, ), dtype={'names': (lbllbl, ), 'formats': ('<U12', )}) return [name[0] for name in rownames.flat]
def _get_rownames(self) -> OptList: # Check type of 'cols' lblcol = self._get_namecol() if lblcol is None: return None lbllbl = self.colnames[lblcol] # Import CSV-file to NumPy ndarray with textfile.openx(self._file, mode='r') as fh: rownames = np.loadtxt(fh, skiprows=self._get_skiprows(), delimiter=self._get_delim(), usecols=(lblcol, ), dtype={ 'names': (lbllbl, ), 'formats': ('<U12', ) }) return [name[0] for name in rownames.flat]
def select(self, columns: OptStrTuple = None) -> OptNpArray: """Load numpy ndarray from CSV-file. Args: columns: List of column labels in CSV-file. By default the list of column labels is taken from the first content line in the CSV-file. Returns: :class:`numpy.ndarray` containing data from CSV-file, or None if the data could not be imported. """ # Check type of 'cols' check.has_opt_type("'columns'", columns, tuple) # Get column names and formats usecols = self._get_usecols(columns) colnames = self._get_colnames() names = tuple(colnames[colid] for colid in usecols) lblcol = self._get_namecol() if lblcol is None: formats = tuple(['<f8'] * len(usecols)) elif lblcol not in usecols: formats = tuple(['<U12'] + ['<f8'] * len(usecols)) names = ('label', ) + names usecols = (lblcol, ) + usecols else: lbllbl = colnames[lblcol] formats = tuple(['<U12'] + ['<f8'] * (len(usecols) - 1)) names = tuple(['label'] + [l for l in names if l != lbllbl]) usecols = tuple([lblcol] + [c for c in usecols if c != lblcol]) # Import data from CSV-file as numpy array with textfile.openx(self._file, mode='r') as fh: return np.loadtxt(fh, skiprows=self._get_skiprows(), delimiter=self._get_delim(), usecols=usecols, dtype={ 'names': names, 'formats': formats })
def select(self, columns: OptStrTuple = None) -> OptNpArray: """Load numpy ndarray from CSV-file. Args: columns: List of column labels in CSV-file. By default the list of column labels is taken from the first content line in the CSV-file. Returns: :class:`numpy.ndarray` containing data from CSV-file, or None if the data could not be imported. """ # Check type of 'cols' check.has_opt_type("'columns'", columns, tuple) # Get column names and formats usecols = self._get_usecols(columns) colnames = self._get_colnames() names = tuple(colnames[colid] for colid in usecols) lblcol = self._get_namecol() if lblcol is None: formats = tuple(['<f8'] * len(usecols)) elif lblcol not in usecols: formats = tuple(['<U12'] + ['<f8'] * len(usecols)) names = ('label', ) + names usecols = (lblcol, ) + usecols else: lbllbl = colnames[lblcol] formats = tuple(['<U12'] + ['<f8'] * (len(usecols) - 1)) names = tuple(['label'] + [l for l in names if l != lbllbl]) usecols = tuple([lblcol] + [c for c in usecols if c != lblcol]) # Import data from CSV-file as numpy array with textfile.openx(self._file, mode='r') as fh: return np.loadtxt(fh, skiprows=self._get_skiprows(), delimiter=self._get_delim(), usecols=usecols, dtype={'names': names, 'formats': formats})
def save(file: FileOrPathLike, data: NpArray, labels: OptStrList = None, comment: OptStr = None, delim: str = ',') -> None: """Save NumPy array to CSV-file. Args: file: String, :term:`path-like object` or :term:`file object` that points to a valid CSV-file in the directory structure of the system. data: :class:`numpy.ndarray` containing the data which is to be exported to a CSV-file. comment: String, which is included in the CSV-file whithin initial '#' lines. By default no initial lines are created. labels: List of strings with column names. delim: String containing CSV-delimiter. The default value is ',' Returns: True if no error occured. """ # Check and prepare arguments if isinstance(comment, str): comment = '# ' + comment.replace('\n', '\n# ') + '\n\n' if isinstance(labels, list): comment += delim.join(labels) elif isinstance(labels, list): comment = delim.join(labels) # Get number of columns from last entry in data.shape cols = list(getattr(data, 'shape'))[-1] # Get column format fmt = delim.join(['%s'] + ['%10.10f'] * (cols - 1)) with textfile.openx(file, mode='w') as fh: np.savetxt(fh, data, fmt=fmt, header=comment, comments='')