def test_should_be_able_to_set_tag(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1)]))
     lines = _get_all_lines(doc)
     tokens = list(doc.get_tokens_of_line(lines[0]))
     token = tokens[0]
     doc.set_tag(token, TAG_1)
     assert doc.get_tag(token) == TAG_1
 def test_should_be_able_to_set_tag_with_attribute(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1)]),
         tag_to_tei_path_mapping={TAG_1: 'div[@tag="tag1"]'})
     lines = _get_all_lines(doc)
     tokens = list(doc.get_tokens_of_line(lines[0]))
     token = tokens[0]
     doc.set_tag(token, TAG_1)
     assert doc.get_tag(token) == TAG_1
コード例 #3
0
def _get_document_tagged_token_lines(
        doc: GrobidTrainingTeiStructuredDocument
) -> List[List[Tuple[str, str]]]:
    document_tagged_token_lines = [[(doc.get_tag(token), doc.get_text(token))
                                    for token in doc.get_tokens_of_line(line)]
                                   for page in doc.get_pages()
                                   for line in doc.get_lines_of_page(page)]
    LOGGER.debug('document_tagged_token_lines: %s',
                 document_tagged_token_lines)
    return document_tagged_token_lines
    def test_should_preserve_space_after_lb_in_updated_root(self):
        doc = GrobidTrainingTeiStructuredDocument(
            _tei(front_items=[TOKEN_1,
                              E(TeiTagNames.LB), ' ' + TOKEN_2]))
        lines = _get_all_lines(doc)

        line1_tokens = list(doc.get_tokens_of_line(lines[0]))
        doc.set_tag(line1_tokens[0], TAG_1)

        line2_tokens = list(doc.get_tokens_of_line(lines[1]))
        doc.set_tag(line2_tokens[-1], TAG_1)

        root = doc.root
        front = root.find('./text/front')
        child_elements = list(front)
        assert [c.tag for c in child_elements] == [TAG_1]
        assert _to_xml(child_elements[0]) == (
            '<{tag1}>{token1}<{lb}/> {token2}</{tag1}>'.format(
                tag1=TAG_1, token1=TOKEN_1, token2=TOKEN_2, lb=TeiTagNames.LB))
 def test_should_be_able_get_root_with_updated_single_token_tag(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1)]))
     lines = _get_all_lines(doc)
     tokens = list(doc.get_tokens_of_line(lines[0]))
     token = tokens[0]
     doc.set_tag(token, TAG_1)
     root = doc.root
     front = root.find('./text/front')
     child_elements = list(front)
     assert [c.tag for c in child_elements] == [TAG_1]
     assert [c.text for c in child_elements] == [TOKEN_1]
    def test_should_remove_untagged_including_line_feed(self):
        doc = GrobidTrainingTeiStructuredDocument(
            _tei(front_items=[
                TOKEN_1,
                E(TeiTagNames.LB), ' ' + TOKEN_2 + ' ' + TOKEN_3
            ]))
        lines = _get_all_lines(doc)

        line1_tokens = list(doc.get_tokens_of_line(lines[0]))
        doc.set_tag(line1_tokens[0], None)

        line2_tokens = list(doc.get_tokens_of_line(lines[1]))
        doc.set_tag(line2_tokens[-1], TAG_1)

        doc.remove_all_untagged()

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == (
            '<front><{tag1}>{token3}</{tag1}></front>'.format(tag1=TAG_1,
                                                              token3=TOKEN_3))
    def test_should_not_include_space_in_tag_if_previous_token_has_different_tag(
            self):
        doc = GrobidTrainingTeiStructuredDocument(
            _tei(front_items=[TOKEN_1,
                              E(TeiTagNames.LB), ' ' + TOKEN_2]))
        lines = _get_all_lines(doc)

        line1_tokens = list(doc.get_tokens_of_line(lines[0]))
        doc.set_tag(line1_tokens[0], TAG_1)

        line2_tokens = list(doc.get_tokens_of_line(lines[1]))
        doc.set_tag(line2_tokens[-1], TAG_2)

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == ('<front><{tag1}>{token1}<{lb}/></{tag1}>'
                                  ' <{tag2}>{token2}</{tag2}></front>'.format(
                                      tag1=TAG_1,
                                      tag2=TAG_2,
                                      token1=TOKEN_1,
                                      token2=TOKEN_2,
                                      lb=TeiTagNames.LB))
 def test_should_be_able_get_root_with_updated_single_token_tag_with_attribute(
         self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1)]),
         tag_to_tei_path_mapping={TAG_1: 'div[@tag="tag1"]'})
     lines = _get_all_lines(doc)
     tokens = list(doc.get_tokens_of_line(lines[0]))
     token = tokens[0]
     doc.set_tag(token, TAG_1)
     root = doc.root
     front = root.find('./text/front')
     child_elements = list(front)
     assert [c.tag for c in child_elements] == ['div']
     assert [c.attrib for c in child_elements] == [{'tag': 'tag1'}]
     assert [c.text for c in child_elements] == [TOKEN_1]
    def test_should_not_return_preserved_tag_as_tag_and_update_preserved_tag(
            self):
        original_tei_xml = _tei(front_items=[E.note(TOKEN_1)])
        LOGGER.debug('original tei xml: %s', _to_xml(original_tei_xml))
        doc = GrobidTrainingTeiStructuredDocument(original_tei_xml,
                                                  preserve_tags=True,
                                                  tag_to_tei_path_mapping={})
        LOGGER.debug('doc: %s', doc)
        lines = _get_all_lines(doc)
        token1 = list(doc.get_tokens_of_line(lines[0]))[0]
        assert not doc.get_tag(token1)
        doc.set_tag(token1, TAG_1)

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == (
            '<front><{tag1}>{token1}</{tag1}></front>'.format(token1=TOKEN_1,
                                                              tag1=TAG_1))
コード例 #10
0
def _simple_document_with_tagged_token_lines(
        lines: List[List[Tuple[str,
                               str]]]) -> GrobidTrainingTeiStructuredDocument:
    tei_items = []
    for line in lines:
        tei_items.append(' '.join(token for _, token in line))
        tei_items.append(E.lb())
    doc = GrobidTrainingTeiStructuredDocument(
        _tei(tei_items), container_node_path=DEFAULT_CONTAINER_NODE_PATH)
    doc_lines = [
        line for page in doc.get_pages()
        for line in doc.get_lines_of_page(page)
    ]
    for line, doc_line in zip(lines, doc_lines):
        for (tag, token), doc_token in zip(line,
                                           doc.get_tokens_of_line(doc_line)):
            assert token == doc.get_text(doc_token)
            if tag:
                doc.set_tag(doc_token, tag)
    return doc
コード例 #11
0
def iter_first_tokens_of_lines(
        structured_document: GrobidTrainingTeiStructuredDocument, lines: list):
    for line in lines:
        text_tokens = structured_document.get_tokens_of_line(line)
        if text_tokens:
            yield text_tokens[0]