Beispiel #1
0
def remove_key_words(text: str, offsets: List[Offset],
                     rate: int) -> Tuple[str, List[Offset]]:
    """
    Modify text to remove some key words, making the learning harder and the model more robust.
    :param text: original paragraph as a string
    :param offsets: list of extracted offsets
    :param rate: chance as an integer between 1 and 100 that a key word is removed
    :return: a tuple (new_text, offsets)
    """
    words_to_delete_offsets: List[Offset] = key_words_matcher.get_matches(
        text=text, tag="TO_DELETE")

    if (len(words_to_delete_offsets) == 0) or (len(offsets) == 0):
        return text, offsets

    detected_spans = dict()
    for offset in offsets:
        span_text = text[offset.start:offset.end]
        if len(span_text) > 0:
            detected_spans[span_text] = offset.type

    if len(detected_spans) == 0:
        return text, offsets

    original_content_offsets_matcher = AcoraMatcher(content=list(
        detected_spans.keys()),
                                                    ignore_case=False)

    cleaned_text = list()
    start_selection_offset = 0
    for offset in words_to_delete_offsets:
        if randint(1, 99) < rate:
            # - 1 to remove also the space following the keyword to remove
            cleaned_text.append(text[start_selection_offset:offset.start - 1])
            start_selection_offset = offset.end
        else:
            cleaned_text.append(text[start_selection_offset:offset.end])
            start_selection_offset = offset.end

    cleaned_text.append(text[start_selection_offset:len(text)])

    cleaned_text = ''.join(cleaned_text)

    updated_offsets = original_content_offsets_matcher.get_matches(
        text=cleaned_text, tag="UNKNOWN")

    offsets_to_return = list()

    # restore original offset type name
    for offset in updated_offsets:
        span_text = cleaned_text[offset.start:offset.end]
        type_name = detected_spans[span_text]
        offsets_to_return.append(Offset(offset.start, offset.end, type_name))

    return cleaned_text, offsets_to_return
def complete_case_annotations(spacy_docs: List[Doc],
                              entity_typename: Dict[str, str]) -> List[Doc]:
    """
    Complete/Normalize annotations from the spacy tagger.

    :param spacy_docs: the spacy annotations
    :param entity_typename: the dictionary with each occurence type
    :returns: the updated spacy_annotions (for convenience only, as the update is inplace)
    """

    if len(spacy_docs) > 0:
        matcher = AcoraMatcher(content=list(entity_typename.keys()),
                               ignore_case=True)

        doc_text, empty_offsets = zip(*[(spacy_doc.text, [])
                                        for spacy_doc in spacy_docs])
        document_addresses_offsets = find_address_in_block_of_paragraphs(
            texts=list(doc_text), offsets=list(empty_offsets))

        for spacy_doc, doc_address_offset in zip(spacy_docs,
                                                 document_addresses_offsets):

            matches = matcher.get_matches(text=spacy_doc.text, tag="UNKNOWN")
            matcher_offsets = list()
            for offset in matches:
                span_text = spacy_doc.text[offset.start:offset.end]
                logger.debug(span_text)
                offset.type = entity_typename[span_text.lower()]
                matcher_offsets.append(offset)

            matcher_offsets_normalized = normalize_offsets(
                offsets=matcher_offsets + doc_address_offset)

            spacy_matcher_offset: List[Span] = list()
            for offset in matcher_offsets_normalized:
                # https://spacy.io/usage/linguistic-features#section-named-entities
                span_doc: Span = spacy_doc.char_span(offset.start,
                                                     offset.end,
                                                     label=offset.type)
                if span_doc is not None:
                    # span will be none if the word is incomplete
                    spacy_matcher_offset.append(span_doc)

                else:
                    logger.error(
                        f"ERROR char offset [{spacy_doc.text[offset.start:offset.end]}] "
                        f"from [{spacy_doc.text}]")

            spacy_doc.ents = spacy_matcher_offset  # all_offsets

    return spacy_docs
class CourtName:
    court_names = set()
    matcher = None

    def __init__(self):
        """
        Build a matcher of French court names based on a list available in open data
        https://www.data.gouv.fr/fr/datasets/les-statistiques-par-juridiction/#_
        (the list has more data, the one store is an extraction)
        """
        config = get_config_default()
        file = config["french_court_names"]

        with open(file) as f1:
            for line in f1.readlines():
                clean_text = line.strip()
                if len(clean_text) > 0:
                    self.court_names.add(clean_text)
        assert len(self.court_names) > 1000
        self.matcher = AcoraMatcher(content=list(self.court_names),
                                    ignore_case=True)

    def get_matches(self, text: str) -> List[Offset]:
        """
        Find match of French court names in a text
        :param text: original text
        :return: list of offsets
        """
        return self.matcher.get_matches(text=text, tag="COURT")
class PostalCodeCity:
    matcher = None

    def __init__(self):
        """
        Build a matcher of first name based on a French names dictionary
        """
        postal_code_city_list = list()
        config = get_config_default()
        file = config["postal_code_city"]

        with open(file) as f1:
            for line in f1.readlines():
                fields = line.split(";")
                city = fields[1].strip()
                if len(city) >= 3:
                    postal_code = fields[2].strip()
                    postal_code_city_list.append(postal_code + " " + city)
                    postal_code_city_list.append(city + " (" + postal_code +
                                                 ")")
        assert len(postal_code_city_list) > 1000
        postal_code_city_list.pop(0)
        self.matcher = AcoraMatcher(list(postal_code_city_list),
                                    ignore_case=True)

    def get_matches(self, text: str) -> List[Offset]:
        """
        Find match of postal code and city names in a text
        :param text: original text
        :return: list of offsets
        """
        return self.matcher.get_matches(text=text, tag="ADDRESS")
    def __init__(self):
        """
        Build a matcher of French court names based on a list available in open data
        https://www.data.gouv.fr/fr/datasets/les-statistiques-par-juridiction/#_
        (the list has more data, the one store is an extraction)
        """
        config = get_config_default()
        file = config["french_court_names"]

        with open(file) as f1:
            for line in f1.readlines():
                clean_text = line.strip()
                if len(clean_text) > 0:
                    self.court_names.add(clean_text)
        assert len(self.court_names) > 1000
        self.matcher = AcoraMatcher(content=list(self.court_names),
                                    ignore_case=True)
 def __init__(self, path_trainset: str, threshold_occurrences: int,
              type_name_to_not_load: List[str], load_data: bool):
     """
     Build an Acora matcher based on the dict of frequent entities
     :param path_trainset: path to a file storing the entity
     :param threshold_occurrences: minimum number of occurences of the entity
     :param type_name_to_not_load: type of entities that should not be loaded to avoid fake match
     :param load_data: boolean to decide if data should be loaded or not
     :return: an Acora matcher matcher
     """
     if load_data:
         self.frequent_entities_dict = self.__read_frequent_entities(
             path_trainset=path_trainset,
             threshold_occurrences=threshold_occurrences,
             type_name_to_not_load=type_name_to_not_load)
         self.matcher = AcoraMatcher(content=list(
             self.frequent_entities_dict.keys()),
                                     ignore_case=True)
     else:
         self.matcher = AcoraMatcher(content=["!@#$%^&*()"],
                                     ignore_case=True)
    def __init__(self):
        """
        Build a matcher of first name based on a French names dictionary
        """
        postal_code_city_list = list()
        config = get_config_default()
        file = config["postal_code_city"]

        with open(file) as f1:
            for line in f1.readlines():
                fields = line.split(";")
                city = fields[1].strip()
                if len(city) >= 3:
                    postal_code = fields[2].strip()
                    postal_code_city_list.append(postal_code + " " + city)
                    postal_code_city_list.append(city + " (" + postal_code +
                                                 ")")
        assert len(postal_code_city_list) > 1000
        postal_code_city_list.pop(0)
        self.matcher = AcoraMatcher(list(postal_code_city_list),
                                    ignore_case=True)
 def get_matcher_of_clerks_from_headers(self) -> AcoraMatcher:
     """
     Create variations of items to search
     :return: a matcher of string which ignore case
     """
     header_content = self.current_header['greffier']
     matcher = AcoraMatcher(content=header_content, ignore_case=False)
     # for content in header_content:
     #     first_name, last_name = get_first_last_name(content)
     #     if len(first_name) > self.threshold_size:
     #         matcher.add(first_name)
     #     if len(last_name) > self.threshold_size:
     #         matcher.add(last_name)
     return matcher
Beispiel #9
0
    def __init__(self, ignore_case: bool):
        """
        Build a matcher of first name based on a French names dictionary
        :type ignore_case: True to ignore case during matching
        :return: Acora matcher
        """
        config = get_config_default()

        file1 = config["first_name_dict_1"]
        file2 = config["first_name_dict_2"]

        firs_name = set()
        with open(file1) as f1:
            for line in f1.readlines():
                fields = line.split(";")
                # all names start with a Upcase letter and finishes with a space
                text = fields[3].strip()
                if len(text) >= 4:
                    firs_name.add(text)

        with open(file2, encoding="ISO-8859-1") as f2:
            for line in f2.readlines():
                fields = line.split(";")
                text = fields[0].strip()
                if len(text) >= 4:
                    firs_name.add(get_title_case(text))

        to_remove = [
            "Elle", "France", "Mercedes", "Paris", "Alger", "Oran", "Sans"
        ]

        for item_to_remove in to_remove:
            firs_name.remove(item_to_remove)

        self.first_name_dict = firs_name
        self.matcher = AcoraMatcher(content=list(self.first_name_dict),
                                    ignore_case=ignore_case)
    def get_matcher_of_partie_pm_from_headers(self) -> AcoraMatcher:
        """
        Create variations of items to search
        :return: a matcher of string which ignore case
        """
        span_text = list()

        for full_content, short_content in zip(
                self.current_header['defendeur_fullname'] + self.current_header['demandeur_fullname'],
                self.current_header['defendeur_hidden'] + self.current_header['demandeur_hidden']):
            if short_content is None:
                span_text.append(full_content)

        matcher = AcoraMatcher(content=span_text, ignore_case=False)
        return matcher
 def test_builder(cls, content: Dict[str, str]):
     """
     Build an instance of this object for tests.
     In particular, don't try to read saved data
     :param content: a dictionary of entities to load
     :return: an instance of FrequentEntities class
     """
     instance = FrequentEntities(path_trainset="",
                                 threshold_occurrences=0,
                                 load_data=False,
                                 type_name_to_not_load=[])
     instance.frequent_entities_dict = content
     instance.matcher = AcoraMatcher(content=list(content.keys()),
                                     ignore_case=True)
     return instance
    def get_matcher_of_partie_pp_from_headers(self) -> AcoraMatcher:
        """
        Create variations of items to search
        :return: a matcher of string which ignore case
        """
        # this way of init assure that the matcher doesn't expect binary data
        # this may happen if we load empty arrays through update function for instance
        span_text = list()
        for full_content, short_content in zip(
                self.current_header['defendeur_fullname'] + self.current_header['demandeur_fullname'],
                self.current_header['defendeur_hidden'] + self.current_header['demandeur_hidden']):
            if short_content is not None:
                span_text.append(full_content)
                # first_name, last_name = get_first_last_name(full_content)
                # if len(first_name) > self.threshold_size:
                #     matcher.add(first_name)
                # if len(last_name) > self.threshold_size:
                #     matcher.add(last_name)

        matcher = AcoraMatcher(content=span_text, ignore_case=False)
        return matcher
Beispiel #13
0
def get_all_name_variation(texts: List[str], offsets: List[List[Offset]], threshold_span_size: int) ->  List[List[Offset]]:
    """
    Search for any variation of known entities
    :param texts: original text
    :param offsets: discovered offsets
    :param threshold_span_size: minimum size of a name (first / last) to be added to the list
    :return: discovered offsets
    """
    pp_text_span = list()
    pm_text_span = list()
    for current_offsets, text in zip(offsets, texts):
        for offset in current_offsets:
            # start_offset, end_offset, type_name = offset
            text_span = text[offset.start:offset.end].strip()
            if len(text_span) > 0:
                if offset.type == "PERS":
                    pp_text_span.append(text_span)
                    first_name, last_name = get_first_last_name(text_span)
                    first_name = first_name.strip()
                    last_name = last_name.strip()

                    if len(first_name) > threshold_span_size:
                        pp_text_span.append(first_name)
                    if len(last_name) > threshold_span_size:
                        pp_text_span.append(last_name)

                if offset.type == "ORGANIZATION":
                    pm_text_span.append(text_span)
                    short_org_name = remove_org_type(text_span).strip()
                    if (len(short_org_name) > 0) and (short_org_name != text_span):
                        pm_text_span.append(short_org_name)

    pp_matcher = AcoraMatcher(content=pp_text_span, ignore_case=True)
    pm_matcher = AcoraMatcher(content=pm_text_span, ignore_case=True)

    results = list()

    for text, offset in zip(texts, offsets):
        results.append(pp_matcher.get_matches(text=text, tag="PERS") +
                       pm_matcher.get_matches(text=text, tag="ORGANIZATION") +
                       offset)

    return results
Beispiel #14
0
                                        flags=regex.VERSION1
                                        | regex.IGNORECASE)


def remove_org_type(original_text: str) -> str:
    """
    Remove corporation type name
    :param original_text: Name of company included its type
    :return: the cleaned string
    """
    return remove_org_type_pattern.sub(repl="", string=original_text).strip()


key_words_matcher = AcoraMatcher(content=[
    "Monsieur", "Madame", "Mme", "monsieur", "madame", "la société", "Me",
    "Maitre", "Maître"
],
                                 ignore_case=False)


def remove_key_words(text: str, offsets: List[Offset],
                     rate: int) -> Tuple[str, List[Offset]]:
    """
    Modify text to remove some key words, making the learning harder and the model more robust.
    :param text: original paragraph as a string
    :param offsets: list of extracted offsets
    :param rate: chance as an integer between 1 and 100 that a key word is removed
    :return: a tuple (new_text, offsets)
    """
    words_to_delete_offsets: List[Offset] = key_words_matcher.get_matches(
        text=text, tag="TO_DELETE")
class FrequentEntities(object):
    matcher = None
    frequent_entities_dict = None

    def __init__(self, path_trainset: str, threshold_occurrences: int,
                 type_name_to_not_load: List[str], load_data: bool):
        """
        Build an Acora matcher based on the dict of frequent entities
        :param path_trainset: path to a file storing the entity
        :param threshold_occurrences: minimum number of occurences of the entity
        :param type_name_to_not_load: type of entities that should not be loaded to avoid fake match
        :param load_data: boolean to decide if data should be loaded or not
        :return: an Acora matcher matcher
        """
        if load_data:
            self.frequent_entities_dict = self.__read_frequent_entities(
                path_trainset=path_trainset,
                threshold_occurrences=threshold_occurrences,
                type_name_to_not_load=type_name_to_not_load)
            self.matcher = AcoraMatcher(content=list(
                self.frequent_entities_dict.keys()),
                                        ignore_case=True)
        else:
            self.matcher = AcoraMatcher(content=["!@#$%^&*()"],
                                        ignore_case=True)

    @classmethod
    def test_builder(cls, content: Dict[str, str]):
        """
        Build an instance of this object for tests.
        In particular, don't try to read saved data
        :param content: a dictionary of entities to load
        :return: an instance of FrequentEntities class
        """
        instance = FrequentEntities(path_trainset="",
                                    threshold_occurrences=0,
                                    load_data=False,
                                    type_name_to_not_load=[])
        instance.frequent_entities_dict = content
        instance.matcher = AcoraMatcher(content=list(content.keys()),
                                        ignore_case=True)
        return instance

    @staticmethod
    def __read_frequent_entities(
            path_trainset: str, threshold_occurrences: int,
            type_name_to_not_load: List[str]) -> Dict[str, str]:
        """
        Analyze recognized entities and return those over a defined threshold in a dict entity -> type_name
        """
        try:
            with open(path_trainset, 'rb') as f:
                data = pickle.load(file=f)

            def get_default_dict_value() -> Dict[str, set]:
                default_dict_value = dict([(token, set())
                                           for token in entity_types])
                # default_dict_value['general_count'] = 0
                return default_dict_value

            exhaustive_dict = dict()

            for case_id, text, entities in data:
                for start_offset, end_offset, type_name in entities:
                    entity_span: str = text[start_offset:end_offset].lower()
                    current_count: Dict[str, set] = exhaustive_dict.get(
                        entity_span, get_default_dict_value())
                    current_count[type_name].add(case_id)
                    exhaustive_dict[entity_span] = current_count

            final_list = list()

            for entity_span, dict_counts in exhaustive_dict.items():

                max_count = 0
                max_type_name = None
                for type_name, case_ids in dict_counts.items():
                    current_count = len(case_ids)
                    if current_count > max_count:
                        max_type_name = type_name
                        max_count = current_count

                if (max_count > threshold_occurrences) and \
                        (len(entity_span) > 3) \
                        and (max_type_name not in type_name_to_not_load):
                    final_list.append((entity_span, max_type_name))

            return dict(final_list)

        except:
            warnings.warn("Empty dict of frequent entities", Warning)
            return dict()

    def get_matches(self, text: str) -> List[Offset]:
        """
        Find matches of frequent entities in the provided text
        :param text: original text
        :return: a list of offsets
        """
        match_results = self.matcher.findall(text)
        entities = list()
        for match_text, start_offset in match_results:
            end_offset = start_offset + len(match_text)
            entity_span = text[start_offset:end_offset]

            # end_offset is one character after the end of the selection,
            # so it can be equal to the last charcter offset of the text + 1
            last_char_ok = (end_offset
                            == len(text)) or (not text[end_offset].isalnum())

            first_char_ok = (start_offset == 0 or not text[start_offset - 1].isalnum()) and \
                            (text[start_offset].isupper() or text[start_offset].isdecimal())

            if first_char_ok and last_char_ok:
                type_name = self.frequent_entities_dict[entity_span.lower()]
                entities.append(Offset(start_offset, end_offset, type_name))

        return entities
Beispiel #16
0
class FirstName:
    first_name_dict = None
    matcher = None

    def __init__(self, ignore_case: bool):
        """
        Build a matcher of first name based on a French names dictionary
        :type ignore_case: True to ignore case during matching
        :return: Acora matcher
        """
        config = get_config_default()

        file1 = config["first_name_dict_1"]
        file2 = config["first_name_dict_2"]

        firs_name = set()
        with open(file1) as f1:
            for line in f1.readlines():
                fields = line.split(";")
                # all names start with a Upcase letter and finishes with a space
                text = fields[3].strip()
                if len(text) >= 4:
                    firs_name.add(text)

        with open(file2, encoding="ISO-8859-1") as f2:
            for line in f2.readlines():
                fields = line.split(";")
                text = fields[0].strip()
                if len(text) >= 4:
                    firs_name.add(get_title_case(text))

        to_remove = [
            "Elle", "France", "Mercedes", "Paris", "Alger", "Oran", "Sans"
        ]

        for item_to_remove in to_remove:
            firs_name.remove(item_to_remove)

        self.first_name_dict = firs_name
        self.matcher = AcoraMatcher(content=list(self.first_name_dict),
                                    ignore_case=ignore_case)

    def get_matches(self, text: str) -> List[Offset]:
        """
        Find match of first name in a text
        :param text: original text
        :return: list of offsets
        """
        offsets = self.matcher.get_matches(text=text, tag="PERS")
        # names include a space so we fix the point by removing 1 to the offset
        results = [
            Offset(offset.start, offset.end - 1, offset.type)
            for offset in offsets
        ]
        return results

    def contain_first_names(self, text: str) -> bool:
        """
        Check if a text contains a known first name
        :param text: original text
        :return: True if it contains a first name
        """
        matches = self.get_matches(text=text)
        for offset in matches:
            if (offset.end
                    == len(text) - 1) or (not text[offset.end + 1].isalpha()):
                return True

        return False