def infered_substitution_callback(match: re.Match) -> str: return match.group(2).upper()
def return_visible_part(match: re.Match): """Returns only the visible name""" return match.group('visible_name')
def _matches_absolute_time_spec(time: datetime.datetime, match: re.Match): time_from = datetime.datetime.fromisoformat(match.group(1)) time_to = datetime.datetime.fromisoformat(match.group(2)) return time_from <= time <= time_to
def _sub_globs(match: re.Match) -> str: if match.group(0) == '*': return '[^/]*' elif match.group(0) == '**/': return '(?:.*/)?' assert False, match
def _parse_endswith(self, match: re.Match): self.query = self.query.filter( self.get_field(match.group(1)).endswith(match.group(2)), )
def substitute_handler(match: re.Match): param_name = match.group('param_name') consumed_params.add(param_name) # safe defaults to the "/" character, which we need to escape. return urllib.parse.quote(str(path_params[param_name]), safe='')
def parse_param(match: re.Match) -> str: return match.group()[2:-1]
def add_rule(name, match: re.Match): if not rules.get(name): rules[name] = {} rules[name][match.group(2)] = int(match.group(1))
def replacer(match: re.Match): s = match.group(0) if s.startswith('/'): return " " # note: a space and not an empty string else: return s
def _entities_from_regex_match( match: Match, domain: Domain, extractor_name: Optional[Text] ) -> List[Dict[Text, Any]]: """Extracts the optional entity information from the given pattern match. If no entities are specified or if the extraction fails, then an empty list is returned. Args: match: a match produced by `self.pattern` domain: the domain extractor_name: A extractor name which should be added for the entities Returns: some list of entities """ entities_str = match.group(ENTITIES) if entities_str is None: return [] try: parsed_entities = json.loads(entities_str) if not isinstance(parsed_entities, dict): raise ValueError( f"Parsed value isn't a json object " f"(instead parser found '{type(parsed_entities)}')" ) except (JSONDecodeError, ValueError) as e: rasa.shared.utils.io.raise_warning( f"Failed to parse arguments in line '{match.string}'. " f"Failed to decode parameters as a json object (dict). " f"Make sure the intent is followed by a proper json object (dict). " f"Continuing without entities. " f"Error: {e}", docs=DOCS_URL_STORIES, ) parsed_entities = dict() # validate the given entity types if domain: entity_types = set(parsed_entities.keys()) unknown_entity_types = entity_types.difference(domain.entities) if unknown_entity_types: rasa.shared.utils.io.raise_warning( f"Failed to parse arguments in line '{match.string}'. " f"Expected entities from {domain.entities} " f"but found {unknown_entity_types}. " f"Continuing without unknown entity types. ", docs=DOCS_URL_STORIES, ) parsed_entities = { key: value for key, value in parsed_entities.items() if key not in unknown_entity_types } # convert them into the list of dictionaries that we expect entities: List[Dict[Text, Any]] = [] default_properties = {} if extractor_name: default_properties = {EXTRACTOR: extractor_name} for entity_type, entity_values in parsed_entities.items(): if not isinstance(entity_values, list): entity_values = [entity_values] for entity_value in entity_values: entities.append( { ENTITY_ATTRIBUTE_TYPE: entity_type, ENTITY_ATTRIBUTE_VALUE: entity_value, ENTITY_ATTRIBUTE_START: match.start(ENTITIES), ENTITY_ATTRIBUTE_END: match.end(ENTITIES), **default_properties, } ) return entities
def pair_replace(match: re.Match) -> str: a = int(match.group(1)) op = match.group(2) b = int(match.group(3)) return str(calc(a, b, op))
async def pixiv_follow(message: Message, regexp: Match): user_id = int(regexp.group(1)) await pixiv.followUser(user_id) await message.reply(f'Successfully followed user {user_id}.')
def repl_translate_url(m: re.Match): return get_cdn_url(m.group(0))
def _parse_le(self, match: re.Match): field = self.get_field(match.group(1)) parsed_value = self._parse_value(field, match.group(2)) self.query = self.query.filter(field <= parsed_value)
def snake_case_replacer(match: re.Match) -> str: text = match.group(0) return text[0] + "_" + text[1]
def output_inline_math(self, m: re.Match) -> str: return self.renderer.inline_math(m.group(1) or m.group(2))
def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any: if match.group("floatpart"): return parse_float(match.group()) return int(match.group(), 0)
def output_block_math(self, m: re.Match) -> str: return self.renderer.block_math(m.group(1) or m.group(2) or "")
def replace_wildcard_for_re(match: re.Match): name = match.group()[2: -3] return f"(?P<{name}>.*?)"
def output_latex_environment(self, m: re.Match) -> str: return self.renderer.latex_environment(m.group(1), m.group(2))
def _int_group(m: Match, index: int) -> int: g: AnyStr = m.group(index) return int(g) if g else None
def _sub_internal_link(self, m: re.Match) -> str: ref = m.group(1) return f'<a href="bword://{html.escape(ref)}">{ref}</a>'
def _subber(m: re.Match) -> str: return m.group(1)
def _parse_contains(self, match: re.Match): # value = next(iter(url_decode(match.group(2), cls=dict))) self.query = self.query.filter( self.get_field(match.group(1)).contains(match.group(2)), )