def test_should_be_able_to_set_tag(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1)]))
     lines = _get_all_lines(doc)
     tokens = list(doc.get_tokens_of_line(lines[0]))
     token = tokens[0]
     doc.set_tag(token, TAG_1)
     assert doc.get_tag(token) == TAG_1
 def test_should_be_able_to_set_tag_with_attribute(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1)]),
         tag_to_tei_path_mapping={TAG_1: 'div[@tag="tag1"]'})
     lines = _get_all_lines(doc)
     tokens = list(doc.get_tokens_of_line(lines[0]))
     token = tokens[0]
     doc.set_tag(token, TAG_1)
     assert doc.get_tag(token) == TAG_1
Ejemplo n.º 3
0
 def annotate(
     self, structured_document: GrobidTrainingTeiStructuredDocument
 ) -> GrobidTrainingTeiStructuredDocument:
     line_number_tokens = iter_find_line_number_tokens(
         structured_document,
         min_line_number=self.config.min_line_number,
         max_line_number_gap=self.config.max_line_number_gap,
         line_number_ratio_threshold=self.config.line_number_ratio_threshold
     )
     for t in line_number_tokens:
         structured_document.set_tag(t, self.config.tag)
     return structured_document
 def test_should_be_able_get_root_with_updated_single_token_tag(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1)]))
     lines = _get_all_lines(doc)
     tokens = list(doc.get_tokens_of_line(lines[0]))
     token = tokens[0]
     doc.set_tag(token, TAG_1)
     root = doc.root
     front = root.find('./text/front')
     child_elements = list(front)
     assert [c.tag for c in child_elements] == [TAG_1]
     assert [c.text for c in child_elements] == [TOKEN_1]
 def test_should_be_able_get_root_with_updated_single_token_tag_with_attribute(
         self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1)]),
         tag_to_tei_path_mapping={TAG_1: 'div[@tag="tag1"]'})
     lines = _get_all_lines(doc)
     tokens = list(doc.get_tokens_of_line(lines[0]))
     token = tokens[0]
     doc.set_tag(token, TAG_1)
     root = doc.root
     front = root.find('./text/front')
     child_elements = list(front)
     assert [c.tag for c in child_elements] == ['div']
     assert [c.attrib for c in child_elements] == [{'tag': 'tag1'}]
     assert [c.text for c in child_elements] == [TOKEN_1]
    def test_should_not_return_preserved_tag_as_tag_and_update_preserved_tag(
            self):
        original_tei_xml = _tei(front_items=[E.note(TOKEN_1)])
        LOGGER.debug('original tei xml: %s', _to_xml(original_tei_xml))
        doc = GrobidTrainingTeiStructuredDocument(original_tei_xml,
                                                  preserve_tags=True,
                                                  tag_to_tei_path_mapping={})
        LOGGER.debug('doc: %s', doc)
        lines = _get_all_lines(doc)
        token1 = list(doc.get_tokens_of_line(lines[0]))[0]
        assert not doc.get_tag(token1)
        doc.set_tag(token1, TAG_1)

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == (
            '<front><{tag1}>{token1}</{tag1}></front>'.format(token1=TOKEN_1,
                                                              tag1=TAG_1))
    def test_should_preserve_space_after_lb_in_updated_root(self):
        doc = GrobidTrainingTeiStructuredDocument(
            _tei(front_items=[TOKEN_1,
                              E(TeiTagNames.LB), ' ' + TOKEN_2]))
        lines = _get_all_lines(doc)

        line1_tokens = list(doc.get_tokens_of_line(lines[0]))
        doc.set_tag(line1_tokens[0], TAG_1)

        line2_tokens = list(doc.get_tokens_of_line(lines[1]))
        doc.set_tag(line2_tokens[-1], TAG_1)

        root = doc.root
        front = root.find('./text/front')
        child_elements = list(front)
        assert [c.tag for c in child_elements] == [TAG_1]
        assert _to_xml(child_elements[0]) == (
            '<{tag1}>{token1}<{lb}/> {token2}</{tag1}>'.format(
                tag1=TAG_1, token1=TOKEN_1, token2=TOKEN_2, lb=TeiTagNames.LB))
Ejemplo n.º 8
0
def _simple_document_with_tagged_token_lines(
        lines: List[List[Tuple[str,
                               str]]]) -> GrobidTrainingTeiStructuredDocument:
    tei_items = []
    for line in lines:
        tei_items.append(' '.join(token for _, token in line))
        tei_items.append(E.lb())
    doc = GrobidTrainingTeiStructuredDocument(
        _tei(tei_items), container_node_path=DEFAULT_CONTAINER_NODE_PATH)
    doc_lines = [
        line for page in doc.get_pages()
        for line in doc.get_lines_of_page(page)
    ]
    for line, doc_line in zip(lines, doc_lines):
        for (tag, token), doc_token in zip(line,
                                           doc.get_tokens_of_line(doc_line)):
            assert token == doc.get_text(doc_token)
            if tag:
                doc.set_tag(doc_token, tag)
    return doc
    def test_should_not_include_line_feed_in_tag_if_previous_token_has_different_tag(
            self):
        original_root = _tei(front_items=[TOKEN_1, '\n ' + TOKEN_2])
        LOGGER.debug('original_root: %s', _to_xml(original_root))
        doc = GrobidTrainingTeiStructuredDocument(original_root)
        lines = _get_all_lines(doc)

        line1_tokens = list(doc.get_all_tokens_of_line(lines[0]))
        space_tokens = [t for t in line1_tokens if isinstance(t, TeiSpace)]
        assert space_tokens
        for token in space_tokens:
            doc.set_tag(token, TAG_1)
        doc.set_tag(line1_tokens[0], TAG_1)
        doc.set_tag(line1_tokens[-1], TAG_2)

        LOGGER.debug('line1_tokens: %s', line1_tokens)

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == (
            '<front><{tag1}>{token1}</{tag1}>'
            '\n <{tag2}>{token2}</{tag2}></front>'.format(tag1=TAG_1,
                                                          tag2=TAG_2,
                                                          token1=TOKEN_1,
                                                          token2=TOKEN_2))
    def test_should_remove_untagged_including_line_feed(self):
        doc = GrobidTrainingTeiStructuredDocument(
            _tei(front_items=[
                TOKEN_1,
                E(TeiTagNames.LB), ' ' + TOKEN_2 + ' ' + TOKEN_3
            ]))
        lines = _get_all_lines(doc)

        line1_tokens = list(doc.get_tokens_of_line(lines[0]))
        doc.set_tag(line1_tokens[0], None)

        line2_tokens = list(doc.get_tokens_of_line(lines[1]))
        doc.set_tag(line2_tokens[-1], TAG_1)

        doc.remove_all_untagged()

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == (
            '<front><{tag1}>{token3}</{tag1}></front>'.format(tag1=TAG_1,
                                                              token3=TOKEN_3))
    def test_should_not_include_space_in_tag_if_previous_token_has_different_tag(
            self):
        doc = GrobidTrainingTeiStructuredDocument(
            _tei(front_items=[TOKEN_1,
                              E(TeiTagNames.LB), ' ' + TOKEN_2]))
        lines = _get_all_lines(doc)

        line1_tokens = list(doc.get_tokens_of_line(lines[0]))
        doc.set_tag(line1_tokens[0], TAG_1)

        line2_tokens = list(doc.get_tokens_of_line(lines[1]))
        doc.set_tag(line2_tokens[-1], TAG_2)

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == ('<front><{tag1}>{token1}<{lb}/></{tag1}>'
                                  ' <{tag2}>{token2}</{tag2}></front>'.format(
                                      tag1=TAG_1,
                                      tag2=TAG_2,
                                      token1=TOKEN_1,
                                      token2=TOKEN_2,
                                      lb=TeiTagNames.LB))