Exemplo n.º 1
0
 def passes_filter(self, account: re.Pattern, sender: re.Pattern,
                   conversation: re.Pattern, message: re.Pattern,
                   flags: re.Pattern):
     if account.fullmatch(self.account) is None:
         return False
     if sender.fullmatch(self.sender) is None:
         return False
     if conversation.fullmatch(self.conversation) is None:
         return False
     if message.fullmatch(self.message) is None:
         return False
     if flags.fullmatch(self.flags) is None:
         return False
     return True
def parse_skeleton_file_name(base_path: str, file_name: str,
                             matcher: re.Pattern):
    match = matcher.fullmatch(file_name)
    setup_idx, camera_idx, subject_idx, replication_idx, action_label_idx = match.groups(
    )
    return SkeletonMetaData(os.path.join(base_path, file_name), int(setup_idx),
                            int(camera_idx), int(subject_idx),
                            int(replication_idx), int(action_label_idx))
Exemplo n.º 3
0
def evaluate_line(
        line: str,
        regex: re.Pattern) -> Tuple[bool, Optional[list], Optional[str]]:
    m = regex.fullmatch(line.strip())
    policy = []
    if not isinstance(m, re.Match):
        return False, None, None
    for i in range(1, regex.groups + 1):
        policy.append(m.group(i))
    return True, policy, m.group(regex.groups)
Exemplo n.º 4
0
def main(account: str, start_time: datetime, pattern: Pattern,
         host: List[str]):
    """
    export datadog metric names.
    """

    exporter = MetricNamesExporter(account, start_time)
    exporter.hosts = host
    exporter.connect()
    exporter.export()
    for metric in filter(lambda m: pattern.fullmatch(m), exporter.metrics):
        print(metric)
Exemplo n.º 5
0
    def __init__(self, matchers: List[Matcher], filename: str,
                 pattern: re.Pattern):
        self.matches = []
        self.matchers = matchers

        m = pattern.fullmatch(filename)
        if m is None:
            raise ValueError("Filename did not match pattern.")
        if len(m.groups()) != len(matchers):
            raise IndexError("Not as many matches as matchers.")

        for i in range(len(matchers)):
            self.matches.append(Match(matchers[i], m, i))
Exemplo n.º 6
0
def match_target(
    name: str, pattern: re.Pattern, keys: List[str]
) -> Tuple[bool, Dict[str, str]]:
    """
    From a target name, attempt to match against a pattern and resolve a set
    of key names.
    """

    data: Dict[str, str] = defaultdict(str)
    result = pattern.fullmatch(name)

    if result is None:
        return False, data

    for idx, key in enumerate(keys):
        data[key] = result.group(1 + idx)

    return True, data
Exemplo n.º 7
0
def regex_parse(regex: re.Pattern, content: str, groups: ty.List[int]):
    match = regex.fullmatch(content)
    if not match:
        raise ValueError()
    group_tuple = match.groups()
    return [group_tuple[i] for i in groups]
Exemplo n.º 8
0
def _analyze_word_with_pos(ans: List[Analysis],
                           start_pos: str,
                           regex: re.Pattern,
                           lemma_idx: int,
                           word: str,
                           infl_pos: str = None,
                           lemma_pred=lambda l: True):
    if m := regex.fullmatch(word):
        parsed = list(m.groups())

        def rec(i: int, pos: str, obj: Analysis):
            if i >= len(parsed):
                return [obj]

            part = parsed[i]
            if not part:
                return rec(i + 1, pos, obj)

            if i < lemma_idx:
                part = part + "-"
                obj["PREFIX"] = part

            elif i > lemma_idx:
                part = "-" + part

                if pos == "v" and part in {
                        "-Daq", "-vo'", "-mo'", "-vaD", "-'e'"
                }:
                    pos = "n"

                if "SUFFIX" not in obj:
                    obj["SUFFIX"] = {}

                obj["SUFFIX"][SUFFIX_TYPES[(part, pos)]] = part
                j, m = _next_morphem(parsed, i)
                if m in {"be'", "qu'"}:
                    obj["SUFFIX"][SUFFIX_TYPES[(part, pos)]] += m
                    i = j

            else:
                obj["LEMMA"] = part

            new_pos = pos

            if pos == "v" and part in ["-ghach", "-wI'"]:
                new_pos = "n"

            if i == lemma_idx and infl_pos:
                new_pos = infl_pos

            if (part + ":" + pos) in WORD_INDEX:
                objs = []
                for entry in WORD_INDEX[part + ":" + pos]:
                    new_obj = copy.deepcopy(obj)
                    new_obj["PARTS"].append(entry.id)
                    if i == lemma_idx:
                        #new_obj["BOQWIZ"] = entry

                        new_obj["XPOS"] = _get_xpos(entry)
                        new_obj["BOQWIZ_POS"] = entry.part_of_speech
                        new_obj["BOQWIZ_ID"] = entry.id

                        if not lemma_pred(entry):
                            continue

                    objs += rec(i + 1, new_pos, new_obj)

                return objs

            else:
                obj["PARTS"].append(part)

                return rec(i + 1, new_pos, obj)

        start_obj: Analysis = {
            "WORD": word,
            "POS": start_pos.upper(),
            "XPOS": "UNK",
            "BOQWIZ_POS": "?",
            "BOQWIZ_ID": "?",
            "PARTS": [],
            "LEMMA": "",
        }

        objs = rec(0, start_pos, start_obj)

        ans += objs
Exemplo n.º 9
0
def process(data : str, reg : re.Pattern) -> str:
    result = 'NO'
    if reg.fullmatch(data):
        result = 'YES'
    return result
Exemplo n.º 10
0
def validate_re(pattern: re.Pattern, value: str) -> bool:
    return bool(pattern.fullmatch(value))