def map_services(self, match: re.Match) -> str: """ If it matches a host that we know about, change the domain for the alternative service. Some hosts needs to be proxied instead (such as twitter pictures), so they're url encoded and appended to the proxy service. """ host = match.group('host') dest = SERVICES.get(host) if dest is None: return match.group(0) destname, proxy = dest replaced = self.config.get(destname, section='services') result = replaced + match.group('rest') if proxy: url = urlquote(match.group(0)) result = replaced + url # TODO: count parenthesis? # Removes comma at the end of a link. if result[-3] == '%2C': result = result[:-3] + ',' return result
def parens_replace(match: re.Match) -> str: opening = match.group(1) a = int(match.group(2)) op = match.group(3) b = int(match.group(4)) closing = match.group(5) return opening + str(calc(a, b, op)) + closing
def to_lower_identifier(input: re.Match) -> str: if len(input.groups()) == 2: return f"{input.group(1)} {input.group(2).lower()}" elif len(input.groups()) == 3: return f"{input.group(1)}{input.group(2).lower()}{input.group(3)}" else: raise Exception(f"Unexpected number of groups in {input}")
def _parse_in(self, match: re.Match): field = self.get_field(match.group(1)) values = [ self._parse_value(field, value.strip(' \'"')) for value in match.group(2).split(',') ] self.query = self.query.filter(field.in_(values))
def replImageResource(m: re.Match): numPattern = '%02d' if srcName.find( '01_Introduction') != -1 else '%01d' imgName = re.sub(r'/image([\d]+)', lambda x: numPattern % int(x[1]), m.group(2)) imgPath = './resources/' + imgName return m.group(1) + ('(%s)' % imgPath)
def _parse_runtime_str(matched: re.Match, rate: Framerate) -> fractions.Fraction: """ _parse_runtime_str parses a runtime string (ex 01:00:00.6) into a fractional seconds value. """ # We will always have a 'frames' group. seconds: decimal.Decimal = decimal.Decimal(matched.group("seconds")) # Some timecodes may be abbreviated, so the other three sections may or may not be # present. Gather them then filter out empty sections. groups: List[int] = [ matched.group("section1"), matched.group("section2"), ] groups = [x for x in groups if x] # Work backwards to fill in the sections that are present, otherwise the value is # '0'. minutes = 0 hours = 0 if len(groups) >= 1: minutes = int(groups[-1]) if len(groups) >= 2: hours = int(groups[-2]) # Calculate the number of seconds as a decimal, then use our decimal parser to # arrive at the correct result. seconds = seconds + minutes * _SECONDS_PER_MINUTE + hours * _SECONDS_PER_HOUR if matched.group("negative"): seconds = -seconds return _parse_decimal(seconds, rate)
def ParseFromMatch(self, address_offset: int, cfa_sp_offset: int, match: re.Match) -> Tuple[AddressUnwind, int]: # The group will be None if the outer non-capturing groups for the(\d+) and # (-\d+) expressions are not matched. new_cfa_sp_offset, ra_cfa_offset = (int(group) if group else None for group in match.groups()[:2]) # Registers are pushed in reverse order by register number so are popped in # order. Sort them to ensure the proper order. registers = sorted([ int(register) for register in self.register_regex.findall(match.group(3)) # `UpdateSpAndOrPopRegisters` only supports popping of register # r4 ~ r15. The ignored registers are translated to sp increments by # the following calculation on `sp_offset`. if int(register) in range(4, 16) ] + # Also pop lr (ra in breakpad terms) if it was stored. ([14] if ra_cfa_offset is not None else [])) sp_offset = 0 if new_cfa_sp_offset is not None: sp_offset = new_cfa_sp_offset - cfa_sp_offset assert sp_offset % 4 == 0 if sp_offset >= len(registers) * 4: # Handles the sub sp, #constant case, and push instructions that push # caller-save registers r0-r3 which don't get encoded in the unwind # instructions. In the latter case we need to move the stack pointer up # to the first pushed register. sp_offset -= len(registers) * 4 return AddressUnwind( address_offset, UnwindType.UPDATE_SP_AND_OR_POP_REGISTERS, sp_offset, tuple(registers)), new_cfa_sp_offset or cfa_sp_offset
def replace(match: re.Match) -> str: if captured_sound(match): return match.group(0) else: return "<ruby><rb>{}</rb><rt>{}</rt></ruby>"\ .format(match.group(1), match.group(2))
def _replace(s: str, m: re.Match, title_level: int = 5) -> str: lang = m.group("lang") or "base" mark = m.group("mark") title = m.group("title") body = m.group("body").replace("<", "<").replace(">", ">") attrs: List[str] = [] if mark is not None: marks = mark.split(",") hl_lines = ",".join([f'"{x}"' for x in marks]) attrs.append(f"hl_lines=[{hl_lines}]") attrs.append("linenos=table") block = f""" ```{lang} {{{",".join(attrs)}}} {body} ``` """ if title is not None: heading = "#" * title_level block = f"{heading} {title}\n" + block return s[:m.start()] + block + s[m.end():]
def repl(matchobj: re.Match): var_name = matchobj.group(1) return ( # Perform a replacement if the ``var_name`` is in ``vars_``. str(vars_[var_name]) if var_name in vars_ # Otherwise, perform no replacement. else matchobj.group(0))
def _add_method(template: JavaTemplate, comment: JavaComment, match_: re.Match) -> None: method = match_.group(0).split('(')[0] if re.match(r'(\w+\s+)(\w+\s+)(.+)\s+(.*)', method): match = re.match(r'(\w+\s+)(\w+\s+)(.+)\s+(.*)', method) mod = match.group(1) return_type = match.group(3) name = match.group(4) elif re.match(r'(\w+\s+)(.+)\s+(.*)', method): match = re.match(r'(\w+\s+)(.+)\s+(.*)', method) mod = match.group(1) if mod.strip() == 'abstract': mod = 'package-private' return_type = match.group(2) name = match.group(3) elif re.match(r'(.+)\s+(.*)', method): match = re.match(r'(.+)\s+(.*)', method) mod = 'package-private' return_type = match.group(1) name = match.group(2) else: mod = 'package-private' return_type = 'null' name = method template.methods.append( JavaMethod( all_name=match_.group(0), mod=mod, return_type=return_type if return_type != 'void' else 'null', name=name, args=match_.group(5), comment=comment))
def _replace(self, match: re.Match, replacement: str, operation: Callable) -> str: groups = match.groupdict() if '%s' in replacement: token = groups.get('token') prefix = '' suffix = '' if token: # Looks like prefix/suffix groups matter only if token-group # was in a pattern prefix = groups.get('prefix', '') suffix = groups.get('suffix', '') else: token = match.group() alias = operation(token) replacement = replacement.replace('%s', prefix + alias + suffix) replacement = self._fix_replacement_groups(replacement) try: return match.expand(replacement) except IndexError as error: raise TransformerError(f'Invalid replacement: {error}')
def f(m: re.Match): from ..ft_global import user_vars word = m.group().lower() if word in user_vars and not isinstance(user_vars[word], Function): return (str(user_vars[word])) else: return (m.group())
def _rm_dot(match: re.Match) -> str: """if any potential field having a . at the end.""" if match.group(1).strip()[-1] == '.': val = match.group(1).strip()[:-1] else: val = match.group(1) return f'{val}'
def _sub_resolver(match: re.Match): result = match.group(1) if len(match.group(3)) > 0: if len(result): result += ' ' result += match.group(3) return result
def path_replacer(match: re.Match) -> str: kind = match.group("kind") source = match.group("path").strip("\"'") p = directory.joinpath(source).expanduser().absolute() value = json.dumps({"absolute": str(p), "original": source}) return f"!lobotomy.inject_{kind} '{value}'"
def _non_eml_val(match: re.Match) -> str: """if we dont have an email in a email field.""" val = match.group(1).split(':', 1) if val: val = f"{val[0]}: \n" else: val = match.group(1) return val
def replImageLinks(m: re.Match): imgPath = m.group(2) width = run('identify -format "%%w" %s' % path.join(path.dirname(targetName), imgPath)) o = m.group(0) if int(width) > 600: o = '[%s](%s)' % (o, imgPath) return o
def is_valid_1(m: re.Match) -> bool: count = 0 for char in m.group("password"): if char == m.group("letter"): count += 1 if count > int(m.group("max")): return False return count >= int(m.group("min"))
def map_to_query_variable(matched: re.Match): """ Map regular expression match to QueryVariable """ lhs, rhs = matched.group(1), matched.group(2) return QueryVariable( str(lhs).strip(), str(rhs).strip() if rhs is not None else None)
def b32_to_sha1(match: re.Match) -> str: hash_ = match.group(1) if len(hash_) == 40: return match.group(0) elif len(hash_) == 32: return "urn:btih:" + base64.b32decode(hash_.upper()).hex() return match.group(0)
async def cmd_do_execute(self, message: types.Message, regexp_command: re.Match): if not self.check_preconditions(message): return cmd = regexp_command.group(1) params = regexp_command.group(2) await self.execute_script(message, cmd, params)
def make_anchor_tag(match: re.Match) -> str: """Function applied to WIKI_LINK's.""" link = match.group(1) trail = match.group(3) anchor = match.group(2) if not anchor: anchor = link anchor += trail return anchor
def repl(target: discord.Member, match: re.Match): if attr := match.group(1): print(attr) if attr.startswith("_") or "." in attr: return str(target) try: return str(getattr(target, attr)) except AttributeError: return str(target)
def _restore_tags(match: re.Match) -> str: tag, key = match.groups() tag = tag.lower() if tag == "cite": return f'<cite idref="{key}"/>' elif tag in _TAGS: return f'<db_xref db="{_TAGS[tag]}" dbkey="{key}"/>' elif tag not in ["mim", "pmid", "pubmed"]: logger.warning(match.group(0))
def _surround_char(match: re.Match, surrounded_by=" "): """Surround each regexp match with a given character. :param match: The Match object of a regexp. :param surrounded_by: the character used to surround the match. :return: str """ return match.group(0).replace(match.group(0), match.group(0).center(3, surrounded_by))
def _a_conv(m: re.Match): text = m.group(2) url = m.group(1) if not url.startswith("http"): url = base + url if icode_links: return f"[`{text}`]({url})" else: return f"[{text}]({url})"
def ParseFromMatch(self, address_offset: int, cfa_sp_offset: int, match: re.Match) -> Tuple[AddressUnwind, int]: register = int(match.group(1)) new_cfa_sp_offset = int(match.group(2)) sp_offset = new_cfa_sp_offset - cfa_sp_offset assert sp_offset % 4 == 0 return AddressUnwind(address_offset, UnwindType.RESTORE_SP_FROM_REGISTER, sp_offset, (register, )), new_cfa_sp_offset
def __replace_overline_chars(cls, match: re.Match) -> str: """ Overline character replacer function. :param match: Match object for characters in parentheses :return: Unicode replacement string, with or without the titlo character """ text = replace_chars( match.group(1).upper(), cls.OVERLINE_ASCII, cls.OVERLINE_UNICODE) return "҇" + text if match.group(1).islower() else text
def _validate_prerelease(capice_version: re.Match, model_version: re.Match): matches = ['minor', 'patch', 'prerelease'] for m in matches: if capice_version.group(m) != model_version.group(m): raise ValueError( f'CAPICE {m} version {capice_version.string} does not match the model {m} ' f'version {model_version.string} (should match for pre-releases)!' )