def get_module_name(frame: int = 0) -> str: """Get name of module, which calls this function. Args: frame: Frame index relative to the current frame in the callstack, which is identified with 0. Negative values consecutively identify previous modules within the callstack. Default: 0 Returns: String with name of module. """ # Check type of 'frame' check.has_type("argument 'frame'", frame, int) # Check value of 'frame' if frame > 0: raise ValueError( "'frame' is required to be a negative number or zero") # Traceback frames using inspect mname: str = '' cframe = inspect.currentframe() for _ in range(abs(frame) + 1): if cframe is None: break cframe = cframe.f_back if cframe is not None: mname = cframe.f_globals['__name__'] return mname
def _get_bindict(self, obj: Group) -> dict: binddict = self.binddict if not binddict: return obj.__dict__ check.has_attr(obj, binddict) check.has_type(binddict, getattr(obj, binddict), dict) return getattr(obj, binddict)
def addcols( base: NpRecArray, data: NpRecArray, cols: NpFields = None) -> NpRecArray: """Add columns from source table to target table. Wrapper function to numpy's `rec_append_fields`_. Args: base: Numpy record array with table like data data: Numpy record array storing the fields to add to the base. cols: String or sequence of strings corresponding to the names of the new columns. If cols is None, then all columns of the data table are appended. Default: None Returns: Numpy record array containing the base array, as well as the appended columns. """ cols = cols or getattr(data, 'dtype').names check.has_type("'cols'", cols, (tuple, str)) cols = list(cols) # make cols mutable # Append fields return nprf.rec_append_fields(base, cols, [data[c] for c in cols])
def get_caller_name(frame: int = 0) -> str: """Get name of the callable, which calls this function. Args: frame: Frame index relative to the current frame in the callstack, which is identified with 0. Negative values consecutively identify previous modules within the callstack. Default: 0 Returns: String with name of the caller. """ # Check type of 'frame' check.has_type("argument 'frame'", frame, int) # Check value of 'frame' if frame > 0: raise ValueError( "'frame' is required to be a negative number or zero") # Get name of caller using inspect stack = inspect.stack()[abs(frame - 1)] mname = inspect.getmodule(stack[0]).__name__ fbase = stack[3] return '.'.join([mname, fbase])
def get_var(varname: str, *args: Any, **kwds: Any) -> OptStr: """Get environment or application variable. Environment variables comprise static and runtime properties of the operating system like 'username' or 'hostname'. Application variables in turn, are intended to describe the application distribution by authorship information, bibliographic information, status, formal conditions and notes or warnings. For mor information see :PEP:`345`. Args: varname: Name of environment variable. Typical application variable names are: 'name': The name of the distribution 'version': A string containing the distribution's version number 'status': Development status of the distributed application. Typical values are 'Prototype', 'Development', or 'Production' 'description': A longer description of the distribution that can run to several paragraphs. 'keywords': A list of additional keywords to be used to assist searching for the distribution in a larger catalog. 'url': A string containing the URL for the distribution's homepage. 'license': Text indicating the license covering the distribution 'copyright': Notice of statutorily prescribed form that informs users of the distribution to published copyright ownership. 'author': A string containing the author's name at a minimum; additional contact information may be provided. 'email': A string containing the author's e-mail address. It can contain a name and e-mail address, as described in :rfc:`822`. 'maintainer': A string containing the maintainer's name at a minimum; additional contact information may be provided. 'company': The company, which created or maintains the distribution. 'organization': The organization, twhich created or maintains the distribution. 'credits': list with strings, acknowledging further contributors, Teams or supporting organizations. *args: Optional arguments that specify the application, as required by the function 'nemoa.base.env.update_vars'. **kwds: Optional keyword arguments that specify the application, as required by the function 'nemoa.base.env.update_vars'. Returns: String representing the value of the application variable. """ # Check type of 'varname' check.has_type("'varname'", varname, str) # Update variables if not present or if optional arguments are given if not '_vars' in globals() or args or kwds: update_vars(*args, **kwds) appvars = globals().get('_vars', {}) return appvars.get(varname, None)
def merge(*args: dict, mode: int = 1) -> dict: """Recursive right merge dictionaries. Args: *args: dictionaries with arbitrary hirarchy structures mode: creation mode for resulting dictionary: 0: change rightmost dictionary 1: create new dictionary by deepcopy 2: create new dictionary by chain mapping Returns: Dictionary containing right merge of dictionaries. Examples: >>> merge({'a': 1}, {'a': 2, 'b': 2}, {'c': 3}) {'a': 1, 'b': 2, 'c': 3} """ # Check for trivial cases if not args: return {} if len(args) == 1: return args[0] # Check for chain mapping creation mode if mode == 2: import collections return dict(collections.ChainMap(*args)) # Recursively right merge if len(args) == 2: d1, d2 = args[0], args[1] else: d1, d2 = args[0], merge(*args[1:], mode=mode) mode = 0 # Check Type of first and second argument check.has_type("first argument", d1, dict) check.has_type("second argument", d2, dict) # Create new dictionary if mode == 1: import copy d2 = copy.deepcopy(d2) # Right merge couple of dictionaries for k1, v1 in d1.items(): if k1 not in d2: d2[k1] = v1 elif isinstance(v1, dict): merge(v1, d2[k1], mode=0) else: d2[k1] = v1 return d2
def as_datetime(text: str, fmt: OptStr = None) -> Date: """Convert text into list. Args: text: String representing datetime. fmt: Returns: Value of the text as datetime. """ # Check types of Arguments check.has_type("first argument 'text'", text, str) fmt = '%Y-%m-%d %H:%M:%S.%f' return datetime.datetime.strptime(text, fmt)
def as_path(text: str, expand: bool = True) -> Path: """Convert text into list. Args: text: String representing a path. expand: Boolen value, whoch determines, if variables in environmental path variables are expanded. Returns: Value of the text as Path. """ # Check types of Arguments check.has_type("first argument 'text'", text, str) check.has_type("argument 'expand'", expand, bool) if expand: return env.expand(text) return Path(text)
def __set__(self, obj: Group, val: Any) -> None: """Bypass and type check set request.""" if self._get_readonly(obj): raise ReadOnlyAttrError(obj, self.name) if self._is_remote(obj): self._set_remote(obj, val) return classinfo = self.classinfo if classinfo and not isinstance(val, type(self.default)): check.has_type(f"attribute '{self.name}'", val, classinfo) if callable(self.fset): self.fset(obj, val) # type: ignore return if isinstance(self.sset, str): getattr(obj, self.sset, void)(val) return binddict = self._get_bindict(obj) bindkey = self._get_bindkey(obj) binddict[bindkey] = val
def split_args(text: str) -> Tuple[str, tuple, dict]: """Split a function call in the function name, its arguments and keywords. Args: text: Function call given as valid Python code. Beware: Function definitions are no valid function calls. Returns: A tuple consisting of the function name as string, the arguments as tuple and the keywords as dictionary. """ # Check type of 'text' check.has_type("first argument 'text'", text, str) # Get function name try: tree = ast.parse(text) func = getattr(getattr(getattr(tree.body[0], 'value'), 'func'), 'id') except SyntaxError as err: raise ValueError(f"'{text}' is not a valid function call") from err except AttributeError as err: raise ValueError(f"'{text}' is not a valid function call") from err # Get tuple with arguments astargs = getattr(getattr(tree.body[0], 'value'), 'args') largs = [] for astarg in astargs: typ = astarg._fields[0] val = getattr(astarg, typ) largs.append(val) args = tuple(largs) # Get dictionary with keywords astkwds = getattr(getattr(tree.body[0], 'value'), 'keywords') kwds = {} for astkw in astkwds: key = astkw.arg typ = astkw.value._fields[0] val = getattr(astkw.value, typ) kwds[key] = val return func, args, kwds
def get_dir(dirname: str, *args: Any, **kwds: Any) -> Path: """Get application specific environmental directory by name. This function returns application specific system directories by platform independent names to allow platform independent storage for caching, logging, configuration and permanent data storage. Args: dirname: Environmental directory name. Allowed values are: :user_cache_dir: Cache directory of user :user_config_dir: Configuration directory of user :user_data_dir: Data directory of user :user_log_dir: Logging directory of user :site_config_dir: Site global configuration directory :site_data_dir: Site global data directory :site_package_dir: Site global package directory :package_dir: Current package directory :package_data_dir: Current package data directory *args: Optional arguments that specify the application, as required by the function 'nemoa.base.env.update_dirs'. **kwds: Optional keyword arguments that specify the application, as required by the function 'nemoa.base.env.update_dirs'. Returns: String containing path of environmental directory or None if the pathname is not supported. """ # Check type of 'dirname' check.has_type("argument 'dirname'", dirname, str) # Update derectories if not present or if any optional arguments are given if not '_dirs' in globals() or args or kwds: update_dirs(*args, **kwds) dirs = globals().get('_dirs', {}) # Check value of 'dirname' if dirname not in dirs: raise ValueError(f"directory name '{dirname}' is not valid") return dirs[dirname]
def decode( text: str, target: OptType = None, undef: OptStr = 'None', **kwds: Any) -> Any: """Decode literal text representation to object of given target type. Args: text: String representing the value of a given type in it's respective syntax format. The standard format corresponds to the standard Python representation if available. Some types also accept further formats, which may use additional keywords. target: Target type, in which the text is to be converted. undef: Optional string, which respresents an undefined value. If undef is a string, then the any text, the matches the string is decoded as None, independent from the given target type. **kwds: Supplementary parameters, that specify the encoding format of the target type. Returns: Value of the text in given target format or None. """ # Check Arguments check.has_type("'text'", text, str) check.has_opt_type("'target'", target, type) # Check for undefined value if text == undef: return None # If no target type is given, estimate type target = target or estimate(text) or str # Elementary literals if target == str: return text.strip().replace('\n', '') if target == bool: return text.lower().strip() == 'true' if target in [int, float, complex]: return target(text, **kwds) fname = 'as_' + target.__name__.lower() return this.call_attr(fname, text, **kwds)
def get_submodule(name: str, ref: OptModule = None) -> OptModule: """Get instance from the name of a submodule of the current module. Args: name: Name of submodule of given module. ref: Module reference. By default the current callers module is used. Returns: Module reference of submodule or None, if the current module does not contain the given module name. """ # Check type of 'name' check.has_type("argument 'name'", name, str) # Set default module to callers module ref = ref or get_caller_module() # Get instance of submodule return entity.get_module(ref.__name__ + '.' + name)
def get_module(name: str) -> OptModule: """Get reference to module instance from a fully qualified module name. Args: name: Fully qualified name of module Returns: Module reference of the given module name or None, if the name does not point to a valid module. """ # Check type of 'name' check.has_type("argument 'name'", name, str) # Try to import module using importlib module: OptModule = None try: module = importlib.import_module(name) except ModuleNotFoundError: return module return module
def _create_header(self, columns: FieldLike) -> None: # Check types of fieldlike column descriptors and convert them to field # descriptors, that are accepted by dataclasses.make_dataclass() fields: list = [] for each in columns: if isinstance(each, str): fields.append(each) continue check.has_type(f"field {each}", each, tuple) check.has_size(f"field {each}", each, min_size=2, max_size=3) check.has_type("first arg", each[0], str) check.has_type("second arg", each[1], type) if len(each) == 2: fields.append(each) continue check.has_type("third arg", each[2], (Field, dict)) if isinstance(each[2], Field): fields.append(each) continue field = dataclasses.field(**each[2]) fields.append(each[:2] + (field, )) # Create record namespace with table hooks namespace = { '_create_row_id': self._create_row_id, '_delete_hook': self._remove_row_id, '_restore_hook': self._append_row_id, '_update_hook': self._update_row_diff, '_revoke_hook': self._remove_row_diff } # Create Record dataclass and constructor self._Record = dataclasses.make_dataclass('Row', fields, bases=(Record, ), namespace=namespace) # Create slots self._Record.__slots__ = ['id', 'state'] + [ field.name for field in dataclasses.fields(self._Record) ] # Reset store, diff and index self._store = [] self._diff = [] self._index = []
def get_caller_name(frame: int = 0) -> str: """Get name of the callable, which calls this function. Args: frame: Frame index relative to the current frame in the callstack, which is identified with 0. Negative values consecutively identify previous modules within the callstack. Default: 0 Returns: String with name of the caller. """ # Check type of 'frame' check.has_type("argument 'frame'", frame, int) # Check value of 'frame' if frame > 0: raise ValueError("'frame' is required to be a negative number or zero") # Get name of caller using inspect stack = inspect.stack()[abs(frame - 1)] mname = inspect.getmodule(stack[0]).__name__ fbase = stack[3] return '.'.join([mname, fbase])
def as_set(text: str, delim: str = ',') -> set: """Convert text into set. Args: text: String representing a set. Valid representations are: Python format: Allows elements of arbitrary types: Example: "{'a', 'b', 3}" Delimiter separated values (DSV): Allows string elements: Example: "a, b, c" delim: A string, which is used as delimiter for the separatation of the text. This parameter is only used in the DSV format. Returns: Value of the text as set. """ # Check types of Arguments check.has_type("first argument 'text'", text, str) check.has_type("argument 'delim'", delim, str) # Return empty set if the string is blank if not text or not text.strip(): return set() # Python standard format val = None if delim == ',': try: val = set(ast.literal_eval(text)) except (SyntaxError, ValueError, Warning): pass if isinstance(val, set): return val # Delimited string format return {item.strip() for item in text.split(delim)}
def _create_header(self, columns: FieldLike) -> None: # Check types of fieldlike column descriptors and convert them to field # descriptors, that are accepted by dataclasses.make_dataclass() fields: list = [] for each in columns: if isinstance(each, str): fields.append(each) continue check.has_type(f"field {each}", each, tuple) check.has_size(f"field {each}", each, min_size=2, max_size=3) check.has_type("first arg", each[0], str) check.has_type("second arg", each[1], type) if len(each) == 2: fields.append(each) continue check.has_type("third arg", each[2], (Field, dict)) if isinstance(each[2], Field): fields.append(each) continue field = dataclasses.field(**each[2]) fields.append(each[:2] + (field,)) # Create record namespace with table hooks namespace = { '_create_row_id': self._create_row_id, '_delete_hook': self._remove_row_id, '_restore_hook': self._append_row_id, '_update_hook': self._update_row_diff, '_revoke_hook': self._remove_row_diff} # Create Record dataclass and constructor self._Record = dataclasses.make_dataclass( 'Row', fields, bases=(Record, ), namespace=namespace) # Create slots self._Record.__slots__ = ['id', 'state'] + [ field.name for field in dataclasses.fields(self._Record)] # Reset store, diff and index self._store = [] self._diff = [] self._index = []
def __init__(self, fget: OptCallOrStr = None, fset: OptCallOrStr = None, fdel: OptCallOrStr = None, doc: OptStr = None, classinfo: OptClassInfo = None, readonly: bool = False, default: Any = None, default_factory: OptCallable = None, binddict: OptStr = None, bindkey: OptStr = None, remote: bool = False, inherit: bool = False, category: OptStr = None) -> None: """Initialize Attribute Descriptor.""" # Initialize Property Class super_kwds: dict = { 'fget': fget if callable(fget) else None, 'fset': fget if callable(fset) else None, 'fdel': fdel if callable(fdel) else None, 'doc': doc } super().__init__(**super_kwds) # Check Types of Arguments check.has_opt_type("argument 'classinfo'", classinfo, (type, tuple)) check.has_type("argument 'readonly'", readonly, bool) check.has_opt_type("argument 'binddict'", binddict, str) check.has_opt_type("argument 'bindkey'", bindkey, str) check.has_type("argument 'remote'", inherit, bool) check.has_type("argument 'inherit'", inherit, bool) check.has_opt_type("argument 'category'", category, str) # Set Instance Attributes to Argument Values self.sget = fget if isinstance(fget, str) else None self.sset = fset if isinstance(fset, str) else None self.sdel = fdel if isinstance(fdel, str) else None self.classinfo = classinfo self.default = default self.default_factory = default_factory self.readonly = readonly self.binddict = binddict self.bindkey = bindkey self.remote = remote self.inherit = inherit self.category = category
def __init__(self, fget: OptCallOrStr = None, fset: OptCallOrStr = None, fdel: OptCallOrStr = None, doc: OptStr = None, classinfo: OptClassInfo = None, readonly: bool = False, default: Any = None, default_factory: OptCallable = None, binddict: OptStr = None, bindkey: OptStr = None, remote: bool = False, inherit: bool = False, category: OptStr = None) -> None: """Initialize Attribute Descriptor.""" # Initialize Property Class super_kwds: dict = { 'fget': fget if callable(fget) else None, 'fset': fget if callable(fset) else None, 'fdel': fdel if callable(fdel) else None, 'doc': doc} super().__init__(**super_kwds) # Check Types of Arguments check.has_opt_type("argument 'classinfo'", classinfo, (type, tuple)) check.has_type("argument 'readonly'", readonly, bool) check.has_opt_type("argument 'binddict'", binddict, str) check.has_opt_type("argument 'bindkey'", bindkey, str) check.has_type("argument 'remote'", inherit, bool) check.has_type("argument 'inherit'", inherit, bool) check.has_opt_type("argument 'category'", category, str) # Set Instance Attributes to Argument Values self.sget = fget if isinstance(fget, str) else None self.sset = fset if isinstance(fset, str) else None self.sdel = fdel if isinstance(fdel, str) else None self.classinfo = classinfo self.default = default self.default_factory = default_factory self.readonly = readonly self.binddict = binddict self.bindkey = bindkey self.remote = remote self.inherit = inherit self.category = category
def decode( text: str, structure: OptStrucDict = None, flat: OptBool = None) -> StrucDict: """Load configuration dictionary from INI-formated text. Args: text: Text, that describes a configuration in INI-format. structure: Dictionary of dictionaries, which determines the structure of the configuration dictionary. If structure is None, the INI-file is completely imported and all values are interpreted as strings. If the structure is a dictionary of dictionaries, the keys of the outer dictionary describe valid section names by strings, that are interpreted as regular expressions. Therupon, the keys of the respective inner dictionaries describe valid parameter names as strings, that are also interpreted as regular expressions. Finally the values of the inner dictionaries define the type of the parameters by their own type, e.g. str, int, float etc. Accepted types can be found in the documentation of the function :func:`~nemoa.base.literal.decode`. flat: Determines if the desired INI format structure contains sections or not. By default sections are used, if the first non empty, non comment line in the string identifies a section. Return: Structured configuration dictionary. """ # Check arguments check.has_type("first argument", text, str) # If the usage of sections is not defined by the argument 'flat' their # existence is determined from the given file structure. If the file # structure also is not given, it is determined by the first not blank and # non comment line in the text. If this line does not start with the # character '[', then the file structure is considered to be flat. if flat is None: if isinstance(structure, dict): flat = not any(isinstance(val, dict) for val in structure.values()) else: flat = True with StringIO(text) as fh: line = '' for line in fh: content = line.strip() if content and not content.startswith('#'): break flat = not line.lstrip().startswith('[') # For flat structured files a temporary [root] section is created and the # structure dictionary is embedded within the 'root' key of a wrapping # dictionary. if flat: text = '\n'.join(['[root]', text]) if isinstance(structure, dict): structure = cast(SecDict, {'root': structure}) # Parse inifile without literal decoding parser = ConfigParser() setattr(parser, 'optionxform', lambda key: key) parser.read_string(text) # Decode literals by using the structure dictionary config = parse(parser, structure=cast(SecDict, structure)) # If structure is flat collapse the 'root' key return config.get('root') or {} if flat else config
def validate(self) -> None: """Check types of fields.""" fields = getattr(self, '__dataclass_fields__', {}) for name, field in fields.items(): value = getattr(self, name) check.has_type(f"field '{name}'", value, field.type)
def as_dict(text: str, delim: str = ',') -> dict: """Convert text into dictionary. Args: text: String representing a dictionary. Valid representations are: Python format: Allows keys and values of arbitrary types: Example: "{'a': 2, 1: True}" Delimiter separated expressions: Allow string keys and values: Example (Variant A): "<key> = <value><delim> ..." Example (Variant B): "'<key>': <value><delim> ..." delim: A string, which is used as delimiter for the separatation of the text. This parameter is only used in the DSV format. Returns: Value of the text as dictionary. """ # Check types of Arguments check.has_type("first argument 'text'", text, str) check.has_type("argumnt 'delim'", delim, str) # Return empty dict if the string is blank if not text or not text.strip(): return dict() Num = pp.Word(pp.nums + '.') Str = pp.quotedString Bool = pp.Or(pp.Word("True") | pp.Word("False")) Key = pp.Word(pp.alphas + "_", pp.alphanums + "_.") Val = pp.Or(Num | Str | Bool) # Try dictionary format "<key> = <value><delim> ..." Term = pp.Group(Key + '=' + Val) Terms = Term + pp.ZeroOrMore(delim + Term) try: l = Terms.parseString(text.strip('{}')) except pp.ParseException: l = None # Try dictionary format "'<key>': <value><delim> ..." if not l: Term = pp.Group(Str + ':' + Val) Terms = Term + pp.ZeroOrMore(delim + Term) try: l = Terms.parseString(text.strip('{}')) except pp.ParseException: return {} # Create dictionary from list d = {} for item in l: if len(item) == 1: if item[0] == ',': continue d[item] = True continue try: key, val = item[0].strip('\'\"'), ast.literal_eval(item[2]) except ( KeyError, NameError, TypeError, ValueError, SyntaxError, AttributeError): continue if isinstance(val, str): val = val.strip() d[key] = val return d
def decode(text: str, structure: OptStrucDict = None, flat: OptBool = None) -> StrucDict: """Load configuration dictionary from INI-formated text. Args: text: Text, that describes a configuration in INI-format. structure: Dictionary of dictionaries, which determines the structure of the configuration dictionary. If structure is None, the INI-file is completely imported and all values are interpreted as strings. If the structure is a dictionary of dictionaries, the keys of the outer dictionary describe valid section names by strings, that are interpreted as regular expressions. Therupon, the keys of the respective inner dictionaries describe valid parameter names as strings, that are also interpreted as regular expressions. Finally the values of the inner dictionaries define the type of the parameters by their own type, e.g. str, int, float etc. Accepted types can be found in the documentation of the function :func:`~nemoa.base.literal.decode`. flat: Determines if the desired INI format structure contains sections or not. By default sections are used, if the first non empty, non comment line in the string identifies a section. Return: Structured configuration dictionary. """ # Check arguments check.has_type("first argument", text, str) # If the usage of sections is not defined by the argument 'flat' their # existence is determined from the given file structure. If the file # structure also is not given, it is determined by the first not blank and # non comment line in the text. If this line does not start with the # character '[', then the file structure is considered to be flat. if flat is None: if isinstance(structure, dict): flat = not any(isinstance(val, dict) for val in structure.values()) else: flat = True with StringIO(text) as fh: line = '' for line in fh: content = line.strip() if content and not content.startswith('#'): break flat = not line.lstrip().startswith('[') # For flat structured files a temporary [root] section is created and the # structure dictionary is embedded within the 'root' key of a wrapping # dictionary. if flat: text = '\n'.join(['[root]', text]) if isinstance(structure, dict): structure = cast(SecDict, {'root': structure}) # Parse inifile without literal decoding parser = ConfigParser() setattr(parser, 'optionxform', lambda key: key) parser.read_string(text) # Decode literals by using the structure dictionary config = parse(parser, structure=cast(SecDict, structure)) # If structure is flat collapse the 'root' key return config.get('root') or {} if flat else config
def distance(x: NpArrayLike, y: NpArrayLike, name: str = 'euclid', axes: NpAxes = 0, **kwds: Any) -> NpArray: """Calculate distance of two arrays along given axes. A vector distance function, also known as metric, is a function d(x, y), which quantifies the proximity of vectors in a vector space as non-negative real numbers. If the distance is zero, then the vectors are equivalent with respect to the distance function. Distance functions are often used as error, loss or risk functions, to evaluate statistical estimations. Args: x: Any sequence that can be interpreted as a numpy ndarray of arbitrary dimension. This includes nested lists, tuples, scalars and existing arrays. y: Any sequence that can be interpreted as a numpy ndarray with the same dimension, shape and datatypes as 'x'. name: Name of distance. Accepted values are: 'minkowski': :term:`Minkowski distance` Remark: requires additional parameter 'p' 'manhattan': :term:`Manhattan distance` 'euclid': :term:`Euclidean distance` (default) 'chebyshev': :term:`Chebyshev distance` 'pmean': :term:`Power mean difference` Remark: requires additional parameter 'p' 'amean': :term:`Mean absolute difference` 'qmean': :term:`Quadratic mean difference` axes: Integer or tuple of integers, that identify the array axes, along which the function is evaluated. In a one-dimensional array the single axis has ID 0. In a two-dimensional array the axis with ID 0 is running across the rows and the axis with ID 1 is running across the columns. For the value None, the function is evaluated with respect to all axes of the array. The default value is 0, which is an evaluation with respect to the first axis in the array. **kwds: Parameters of the given distance or class of distances. The Parameters are documented within the respective 'dist' functions. Returns: :class:`numpy.ndarray` of dimension dim(*x*) - len(*axes*). """ # Try to create numpy arrays from 'x' and 'y' with contextlib.suppress(TypeError): if not isinstance(x, np.ndarray): x = np.array(x) if not isinstance(y, np.ndarray): y = np.array(y) # Check types of 'x', 'y' and 'axes' check.has_type("'x'", x, np.ndarray) check.has_type("'y'", y, np.ndarray) check.has_type("'axes'", axes, (int, tuple)) # Check dimensions of 'x' and 'y' if x.shape != y.shape: raise ValueError("arrays 'x' and 'y' can not be broadcasted together") # Evaluate function fname = _DIST_PREFIX + name.lower() return this.call_attr(fname, x, y, axes=axes, **kwds)
def as_dict(text: str, delim: str = ',') -> dict: """Convert text into dictionary. Args: text: String representing a dictionary. Valid representations are: Python format: Allows keys and values of arbitrary types: Example: "{'a': 2, 1: True}" Delimiter separated expressions: Allow string keys and values: Example (Variant A): "<key> = <value><delim> ..." Example (Variant B): "'<key>': <value><delim> ..." delim: A string, which is used as delimiter for the separatation of the text. This parameter is only used in the DSV format. Returns: Value of the text as dictionary. """ # Check types of Arguments check.has_type("first argument 'text'", text, str) check.has_type("argumnt 'delim'", delim, str) # Return empty dict if the string is blank if not text or not text.strip(): return dict() Num = pp.Word(pp.nums + '.') Str = pp.quotedString Bool = pp.Or(pp.Word("True") | pp.Word("False")) Key = pp.Word(pp.alphas + "_", pp.alphanums + "_.") Val = pp.Or(Num | Str | Bool) # Try dictionary format "<key> = <value><delim> ..." Term = pp.Group(Key + '=' + Val) Terms = Term + pp.ZeroOrMore(delim + Term) try: l = Terms.parseString(text.strip('{}')) except pp.ParseException: l = None # Try dictionary format "'<key>': <value><delim> ..." if not l: Term = pp.Group(Str + ':' + Val) Terms = Term + pp.ZeroOrMore(delim + Term) try: l = Terms.parseString(text.strip('{}')) except pp.ParseException: return {} # Create dictionary from list d = {} for item in l: if len(item) == 1: if item[0] == ',': continue d[item] = True continue try: key, val = item[0].strip('\'\"'), ast.literal_eval(item[2]) except (KeyError, NameError, TypeError, ValueError, SyntaxError, AttributeError): continue if isinstance(val, str): val = val.strip() d[key] = val return d