def apply_qn_regex(name: str, table_qn_regex: Pattern) -> Any: return table_qn_regex.match(name)
def _find_first(cls, regex: Pattern, text: str) -> Optional[Match]: return next(regex.finditer(text), None)
def pattern_matches(s: str, pattern: Pattern = DEFAULT_PATTERN) -> bool: """https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL- SYNTAX-IDENTIFIERS.""" return bool(pattern.match(s))
def fix_pkgconfig_prefix(self, itemlist: typing.List[str], oldprefix: typing.Pattern): ret = [] for item in itemlist: ret.append(oldprefix.sub(self.package_folder, item)) return ret
def principals_with(self, pattern: Pattern) -> List[str]: return [ principal for principal in self.get_principal_list() if isinstance(principal, str) and pattern.match(principal) ]
def search_pat(pat: Pattern) -> Tuple[Optional[Match], int]: match = pat.search(target, curpos) return match, match.end() if match else len(target) + 1
def iter_all_matching(path: pathlib.Path, pattern: typ.Pattern) -> typ.Iterator[pathlib.Path]: return filter(lambda path: pattern.match(str(path)), iter_tree(path))
def _resolve_id_from_arg(arg_string: str, regex: typing.Pattern) -> hikari.Snowflake: if match := regex.match(arg_string): arg_string = match.group(1)
def matches(self, pattern: Pattern): return not pattern.pattern or pattern.search(self.raw_content)
def _accept(pattern: Pattern, negate: bool, values: Tuple[str, ...]) -> bool: return any(v and pattern.search(v) for v in values) != negate
def extract_video_ids(regex: Pattern, s: str): """Find all video ids from video urls in a text string using regex.""" return [m.group(1) for m in regex.finditer(s)]
def match(self, pattern: Pattern) -> Match: self._match = pattern.match(self._line) return self._match
def csv_stack(dframe: pd.DataFrame, stackmatcher: Pattern, stackseparator: str, newcolumn: str) -> pd.DataFrame: """Reshape an incoming dataframe by stacking/pivoting. The dataframe object will be modified in-place. Args: dframe (pd.DataFrame): Data to reshape stackmatcher (Pattern): Regular expression that matches columns to be stacked. stackseparator (str): String to use for splitting columns names newcolumn (str): Name of new column containing the latter part of the stacked column names. Returns: pd.DataFrame """ if isinstance(stackmatcher, str): stackmatcher = re.compile(stackmatcher) if newcolumn in dframe: raise ValueError("Column name %s already exists in the data") tuplecols = [] dostack = False colstostack = 0 logger.info( "Will stack columns matching '%s' with separator '%s'", stackmatcher, stackseparator, ) logger.info("Name of new identifying column will be '%s'", newcolumn) nostackcolumnnames = [] for col in dframe.columns: if stackmatcher.match(col): tuplecols.append(tuple(col.split(stackseparator))) colstostack = colstostack + 1 dostack = True else: tuplecols.append(tuple([col, ""])) nostackcolumnnames.append(col) logger.info("Found %d out of %d columns to stack", colstostack, len(dframe.columns)) if dostack: # Convert to MultiIndex columns dframe.columns = pd.MultiIndex.from_tuples(tuplecols, names=["", newcolumn]) # Stack the multiindex columns, this will add a lot of rows to # our ensemble, and condense the number of columns dframe = dframe.stack() # The values from non-multiindex-columns must be propagated to # the rows that emerged from the stacking. If you use the # 'all' pivottype, then you will get some NaN-values in the # MultiIndex columns that are intentional. dframe[nostackcolumnnames] = dframe[nostackcolumnnames].fillna( method="ffill") dframe = dframe.reset_index() # Now we have rows that does not belong to any well, we should # delete those rows dframe = dframe[dframe[newcolumn] != ""] # And delete a byproduct of our reshaping (this is the index # prior to stacking) del dframe["level_0"] return dframe.reset_index(drop=True)
def not_match_pattern(data: str, pattern: Pattern) -> bool: return not pattern.match(data)
def _is_file_matching_filter(pattern: Pattern, full_path: str): return pattern.match(full_path) is not None
def convert_python_source(source: str, rex: Pattern = re.compile(r"[uU]('.*?')")) -> str: # remove Unicode literal prefixes warnings.warn('convert_python_source() is deprecated.', RemovedInSphinx40Warning, stacklevel=2) return rex.sub('\\1', source)
k = 0 cam = 0 cap.release() cv2.destroyAllWindows() testfile = urllib.URLopener() from urllib2 import urlopen import re urlpath =urlopen("http://"+ip+":8000/SEND/") string = urlpath.read().decode('utf-8') #the pattern actually creates duplicates in the list Pattern = re.compile('[\w\s,!@#$%^&*()=-]*[.][\w]{1,4}"') filelist = Pattern.findall(string) print("Recieved files are:\n") from filenames in filelists: print(filenames[:-1]) fullfilenames = os.path.join("C:\VICINITY\RECIEVE", filenames[:-1]) testfile.reteieve("http://"+ip+":8000/SEND/+filenames[:-1], fullfilenames) print("Goto Recieved folder to see the files.") cam = 1 cap = cv2.VideoCapture(0) if cv2.waitKey(10) & 0xFF == ord('q'): break
def create_index(self, builder: Builder, group_entries: bool = True, _fixre: Pattern = re.compile(r'(.*) ([(][^()]*[)])') ) -> List[Tuple[str, List[Tuple[str, Any]]]]: """Create the real index from the collected index entries.""" new: Dict[str, List] = {} def add_entry(word: str, subword: str, main: str, link: bool = True, dic: Dict = new, key: str = None) -> None: # Force the word to be unicode if it's a ASCII bytestring. # This will solve problems with unicode normalization later. # For instance the RFC role will add bytestrings at the moment word = str(word) entry = dic.get(word) if not entry: dic[word] = entry = [[], {}, key] if subword: add_entry(subword, '', main, link=link, dic=entry[1], key=key) elif link: try: uri = builder.get_relative_uri('genindex', fn) + '#' + tid except NoUri: pass else: entry[0].append((main, uri)) domain = cast(IndexDomain, self.env.get_domain('index')) for fn, entries in domain.entries.items(): # new entry types must be listed in directives/other.py! for type, value, tid, main, index_key in entries: # noqa: B007 try: if type == 'single': try: entry, subentry = split_into(2, 'single', value) except ValueError: entry, = split_into(1, 'single', value) subentry = '' add_entry(entry, subentry, main, key=index_key) elif type == 'pair': first, second = split_into(2, 'pair', value) add_entry(first, second, main, key=index_key) add_entry(second, first, main, key=index_key) elif type == 'triple': first, second, third = split_into(3, 'triple', value) add_entry(first, second + ' ' + third, main, key=index_key) add_entry(second, third + ', ' + first, main, key=index_key) add_entry(third, first + ' ' + second, main, key=index_key) elif type == 'see': first, second = split_into(2, 'see', value) add_entry(first, _('see %s') % second, None, link=False, key=index_key) elif type == 'seealso': first, second = split_into(2, 'see', value) add_entry(first, _('see also %s') % second, None, link=False, key=index_key) else: logger.warning(__('unknown index entry type %r'), type, location=fn) except ValueError as err: logger.warning(str(err), location=fn) # sort the index entries for same keyword. def keyfunc0(entry: Tuple[str, str]) -> Tuple[bool, str]: main, uri = entry return (not main, uri) # show main entries at first for indexentry in new.values(): indexentry[0].sort(key=keyfunc0) for subentry in indexentry[1].values(): subentry[0].sort(key=keyfunc0) # type: ignore # sort the index entries def keyfunc(entry: Tuple[str, List]) -> Tuple[Tuple[int, str], str]: key, (void, void, category_key) = entry if category_key: # using specified category key to sort key = category_key lckey = unicodedata.normalize('NFD', key.lower()) if lckey.startswith('\N{RIGHT-TO-LEFT MARK}'): lckey = lckey[1:] if lckey[0:1].isalpha() or lckey.startswith('_'): # put non-symbol characters at the following group (1) sortkey = (1, lckey) else: # put symbols at the front of the index (0) sortkey = (0, lckey) # ensure a deterministic order *within* letters by also sorting on # the entry itself return (sortkey, entry[0]) newlist = sorted(new.items(), key=keyfunc) if group_entries: # fixup entries: transform # func() (in module foo) # func() (in module bar) # into # func() # (in module foo) # (in module bar) oldkey = '' oldsubitems: Dict[str, List] = None i = 0 while i < len(newlist): key, (targets, subitems, _key) = newlist[i] # cannot move if it has subitems; structure gets too complex if not subitems: m = _fixre.match(key) if m: if oldkey == m.group(1): # prefixes match: add entry as subitem of the # previous entry oldsubitems.setdefault(m.group(2), [[], {}, _key])[0].\ extend(targets) del newlist[i] continue oldkey = m.group(1) else: oldkey = key oldsubitems = subitems i += 1 # sort the sub-index entries def keyfunc2(entry: Tuple[str, List]) -> str: key = unicodedata.normalize('NFD', entry[0].lower()) if key.startswith('\N{RIGHT-TO-LEFT MARK}'): key = key[1:] if key[0:1].isalpha() or key.startswith('_'): key = chr(127) + key return key # group the entries by letter def keyfunc3(item: Tuple[str, List]) -> str: # hack: mutating the subitems dicts to a list in the keyfunc k, v = item v[1] = sorted(((si, se) for (si, (se, void, void)) in v[1].items()), key=keyfunc2) if v[2] is None: # now calculate the key if k.startswith('\N{RIGHT-TO-LEFT MARK}'): k = k[1:] letter = unicodedata.normalize('NFD', k[0])[0].upper() if letter.isalpha() or letter == '_': return letter else: # get all other symbols under one heading return _('Symbols') else: return v[2] return [(key_, list(group)) for (key_, group) in groupby(newlist, keyfunc3)]
def latest_version(lines: List[str], regex: Pattern) -> Optional[str]: for line in lines: match = regex.search(line) if match: return match.groupdict()["version"] return None
def __parse_stat_int(self, page: str, regexp: Pattern, group: str) -> int: match = regexp.search(page) if match: return int(match.group(group)) return 0
def matches(self, field: str, pat: Pattern) -> bool: if field == 'id': return bool(pat.pattern == str(self.id)) if field == 'title': return pat.search(self.name or self.title) is not None return False
def search(regex: Pattern, s: AnyStr) -> AnyStr: return regex.search(s).group(1)
def actions_with(self, pattern: Pattern) -> List[str]: return [ action for action in self.get_action_list() if isinstance(action, str) and pattern.match(action) ]
def _normalize(self, name: str, *, pattern: Pattern = RE_NORMALIZE, substitution: str = RE_NORMALIZE_SUBSTITUTION) -> str: return pattern.sub(substitution, name)
def resources_with(self, pattern: Pattern) -> List[str]: return [ resource for resource in self.get_resource_list() if isinstance(resource, str) and pattern.match(resource) ]
def consume(self, pattern: Pattern) -> Match: matched = pattern.match(self.remain) if matched: self.step(len(matched.group(0))) return matched
def find_site_positions_regex(regex: Pattern, seq: str) -> List[int]: """Finds the start positions of all matches of the regex in the sequence""" positions = [m.start() for m in regex.finditer(seq.upper())] return positions
def exact_match(regexp: Pattern, text: str, trim: bool): match = regexp.search(text) length = len(text.strip()) if trim else len(text) return ConditionalMatch(match, match and len(match.group()) == length)
def _matches(regex: Pattern, value: str) -> bool: return regex.match(value) is not None # type: ignore # caller ensures str
def find_by_contents(entries: set[FileEntry], pattern: Pattern): return set(e for e in entries if pattern.search(e.contents()))