def test_should_find_one_line_with_one_token_at_front_level(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1)]),
         container_node_path='text/front')
     lines = _get_all_lines(doc)
     assert _get_token_texts_for_lines(doc, lines) == [[TOKEN_1]]
     assert doc.root.find('./text/front/note').text == TOKEN_1
 def test_should_set_empty_token_whitespace(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note('1.')]))
     lines = _get_all_lines(doc)
     assert _get_token_texts_for_lines(doc, lines) == [['1', '.']]
     tokens = doc.get_all_tokens_of_line(lines[0])
     assert tokens[0].whitespace == ''
    def test_should_not_include_line_feed_in_tag_if_previous_token_has_different_tag(
            self):
        original_root = _tei(front_items=[TOKEN_1, '\n ' + TOKEN_2])
        LOGGER.debug('original_root: %s', _to_xml(original_root))
        doc = GrobidTrainingTeiStructuredDocument(original_root)
        lines = _get_all_lines(doc)

        line1_tokens = list(doc.get_all_tokens_of_line(lines[0]))
        space_tokens = [t for t in line1_tokens if isinstance(t, TeiSpace)]
        assert space_tokens
        for token in space_tokens:
            doc.set_tag(token, TAG_1)
        doc.set_tag(line1_tokens[0], TAG_1)
        doc.set_tag(line1_tokens[-1], TAG_2)

        LOGGER.debug('line1_tokens: %s', line1_tokens)

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == (
            '<front><{tag1}>{token1}</{tag1}>'
            '\n <{tag2}>{token2}</{tag2}></front>'.format(tag1=TAG_1,
                                                          tag2=TAG_2,
                                                          token1=TOKEN_1,
                                                          token2=TOKEN_2))
 def test_should_be_able_to_set_tag(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1)]))
     lines = _get_all_lines(doc)
     tokens = list(doc.get_tokens_of_line(lines[0]))
     token = tokens[0]
     doc.set_tag(token, TAG_1)
     assert doc.get_tag(token) == TAG_1
 def test_should_be_able_to_set_tag_with_attribute(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1)]),
         tag_to_tei_path_mapping={TAG_1: 'div[@tag="tag1"]'})
     lines = _get_all_lines(doc)
     tokens = list(doc.get_tokens_of_line(lines[0]))
     token = tokens[0]
     doc.set_tag(token, TAG_1)
     assert doc.get_tag(token) == TAG_1
 def test_should_find_empty_first_line_and_text_outside_semantic_element(
         self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[TOKEN_1,
                           _tei_lb(), TOKEN_2,
                           E.note(TOKEN_3)]))
     lines = _get_all_lines(doc)
     assert _get_token_texts_for_lines(doc, lines) == [[TOKEN_1],
                                                       [TOKEN_2, TOKEN_3]]
 def test_should_be_able_get_root_with_updated_single_token_tag(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1)]))
     lines = _get_all_lines(doc)
     tokens = list(doc.get_tokens_of_line(lines[0]))
     token = tokens[0]
     doc.set_tag(token, TAG_1)
     root = doc.root
     front = root.find('./text/front')
     child_elements = list(front)
     assert [c.tag for c in child_elements] == [TAG_1]
     assert [c.text for c in child_elements] == [TOKEN_1]
    def test_should_reverse_map_tags(self):
        tag_to_tei_path_mapping = {TAG_1: 'docTitle/titlePart'}
        original_tei_xml = _tei(front_items=[E.docTitle(E.titlePart(TOKEN_1))])
        LOGGER.debug('original tei xml: %s', _to_xml(original_tei_xml))
        doc = GrobidTrainingTeiStructuredDocument(
            original_tei_xml,
            tag_to_tei_path_mapping=tag_to_tei_path_mapping,
            preserve_tags=True)
        LOGGER.debug('doc: %s', doc)

        assert [[
            doc.get_tag_or_preserved_tag_value(t)
            for t in doc.get_all_tokens_of_line(line)
        ] for line in _get_all_lines(doc)] == [[TAG_1]]
    def test_should_preserve_existing_tag(self):
        original_tei_xml = _tei(
            front_items=[E.docTitle(E.titlePart(TOKEN_1)), TOKEN_2])
        LOGGER.debug('original tei xml: %s', _to_xml(original_tei_xml))
        doc = GrobidTrainingTeiStructuredDocument(original_tei_xml,
                                                  preserve_tags=True,
                                                  tag_to_tei_path_mapping={})
        LOGGER.debug('doc: %s', doc)

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == (
            '<front><docTitle><titlePart>{token1}</titlePart></docTitle>{token2}</front>'
            .format(token1=TOKEN_1, token2=TOKEN_2))
 def test_should_be_able_get_root_with_updated_single_token_tag_with_attribute(
         self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1)]),
         tag_to_tei_path_mapping={TAG_1: 'div[@tag="tag1"]'})
     lines = _get_all_lines(doc)
     tokens = list(doc.get_tokens_of_line(lines[0]))
     token = tokens[0]
     doc.set_tag(token, TAG_1)
     root = doc.root
     front = root.find('./text/front')
     child_elements = list(front)
     assert [c.tag for c in child_elements] == ['div']
     assert [c.attrib for c in child_elements] == [{'tag': 'tag1'}]
     assert [c.text for c in child_elements] == [TOKEN_1]
    def test_should_preserve_separation_between_existing_tag(self):
        original_tei_xml = _tei(front_items=[
            E.byline(E.affiliation(TOKEN_1)),
            E.byline(E.affiliation(TOKEN_2))
        ])
        LOGGER.debug('original tei xml: %s', _to_xml(original_tei_xml))
        doc = GrobidTrainingTeiStructuredDocument(original_tei_xml,
                                                  preserve_tags=True,
                                                  tag_to_tei_path_mapping={})
        LOGGER.debug('doc: %s', doc)

        root = doc.root
        front = root.find('./text/front')
        affiliations = front.xpath('./byline/affiliation')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert [aff.text for aff in affiliations] == [TOKEN_1, TOKEN_2]
    def test_should_preserve_existing_tag_with_attrib(self):
        original_tei_xml = _tei(
            front_items=[E.div(TOKEN_1, {'tag': TAG_1}), TOKEN_2])
        LOGGER.debug('original tei xml: %s', _to_xml(original_tei_xml))
        doc = GrobidTrainingTeiStructuredDocument(
            original_tei_xml,
            preserve_tags=True,
            tag_to_tei_path_mapping={TAG_1: 'div[@tag="tag1"]'})
        LOGGER.debug('doc: %s', doc)

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == (
            '<front><div tag="{TAG_1}">{token1}</div>{token2}</front>'.format(
                token1=TOKEN_1, token2=TOKEN_2, TAG_1=TAG_1))
    def test_should_preserve_existing_nested_tag(self):
        original_tei_xml = _tei(
            front_items=[E.div(E.label(TOKEN_1), TOKEN_2), TOKEN_3])
        LOGGER.debug('original tei xml: %s', _to_xml(original_tei_xml))
        doc = GrobidTrainingTeiStructuredDocument(
            original_tei_xml,
            preserve_tags=True,
            tag_to_tei_path_mapping={TAG_1: 'div'})
        LOGGER.debug('doc: %s', doc)

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == (
            '<front><div><label>{token1}</label>{token2}</div>{token3}</front>'
            .format(token1=TOKEN_1, token2=TOKEN_2, token3=TOKEN_3))
    def test_should_preserve_space_after_lb_in_updated_root(self):
        doc = GrobidTrainingTeiStructuredDocument(
            _tei(front_items=[TOKEN_1,
                              E(TeiTagNames.LB), ' ' + TOKEN_2]))
        lines = _get_all_lines(doc)

        line1_tokens = list(doc.get_tokens_of_line(lines[0]))
        doc.set_tag(line1_tokens[0], TAG_1)

        line2_tokens = list(doc.get_tokens_of_line(lines[1]))
        doc.set_tag(line2_tokens[-1], TAG_1)

        root = doc.root
        front = root.find('./text/front')
        child_elements = list(front)
        assert [c.tag for c in child_elements] == [TAG_1]
        assert _to_xml(child_elements[0]) == (
            '<{tag1}>{token1}<{lb}/> {token2}</{tag1}>'.format(
                tag1=TAG_1, token1=TOKEN_1, token2=TOKEN_2, lb=TeiTagNames.LB))
    def test_should_not_return_preserved_tag_as_tag_and_update_preserved_tag(
            self):
        original_tei_xml = _tei(front_items=[E.note(TOKEN_1)])
        LOGGER.debug('original tei xml: %s', _to_xml(original_tei_xml))
        doc = GrobidTrainingTeiStructuredDocument(original_tei_xml,
                                                  preserve_tags=True,
                                                  tag_to_tei_path_mapping={})
        LOGGER.debug('doc: %s', doc)
        lines = _get_all_lines(doc)
        token1 = list(doc.get_tokens_of_line(lines[0]))[0]
        assert not doc.get_tag(token1)
        doc.set_tag(token1, TAG_1)

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == (
            '<front><{tag1}>{token1}</{tag1}></front>'.format(token1=TOKEN_1,
                                                              tag1=TAG_1))
Esempio n. 16
0
def _simple_document_with_tagged_token_lines(
        lines: List[List[Tuple[str,
                               str]]]) -> GrobidTrainingTeiStructuredDocument:
    tei_items = []
    for line in lines:
        tei_items.append(' '.join(token for _, token in line))
        tei_items.append(E.lb())
    doc = GrobidTrainingTeiStructuredDocument(
        _tei(tei_items), container_node_path=DEFAULT_CONTAINER_NODE_PATH)
    doc_lines = [
        line for page in doc.get_pages()
        for line in doc.get_lines_of_page(page)
    ]
    for line, doc_line in zip(lines, doc_lines):
        for (tag, token), doc_token in zip(line,
                                           doc.get_tokens_of_line(doc_line)):
            assert token == doc.get_text(doc_token)
            if tag:
                doc.set_tag(doc_token, tag)
    return doc
    def test_should_remove_untagged_including_line_feed(self):
        doc = GrobidTrainingTeiStructuredDocument(
            _tei(front_items=[
                TOKEN_1,
                E(TeiTagNames.LB), ' ' + TOKEN_2 + ' ' + TOKEN_3
            ]))
        lines = _get_all_lines(doc)

        line1_tokens = list(doc.get_tokens_of_line(lines[0]))
        doc.set_tag(line1_tokens[0], None)

        line2_tokens = list(doc.get_tokens_of_line(lines[1]))
        doc.set_tag(line2_tokens[-1], TAG_1)

        doc.remove_all_untagged()

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == (
            '<front><{tag1}>{token3}</{tag1}></front>'.format(tag1=TAG_1,
                                                              token3=TOKEN_3))
    def test_should_not_include_space_in_tag_if_previous_token_has_different_tag(
            self):
        doc = GrobidTrainingTeiStructuredDocument(
            _tei(front_items=[TOKEN_1,
                              E(TeiTagNames.LB), ' ' + TOKEN_2]))
        lines = _get_all_lines(doc)

        line1_tokens = list(doc.get_tokens_of_line(lines[0]))
        doc.set_tag(line1_tokens[0], TAG_1)

        line2_tokens = list(doc.get_tokens_of_line(lines[1]))
        doc.set_tag(line2_tokens[-1], TAG_2)

        root = doc.root
        front = root.find('./text/front')
        LOGGER.debug('xml: %s', _to_xml(front))
        assert _to_xml(front) == ('<front><{tag1}>{token1}<{lb}/></{tag1}>'
                                  ' <{tag2}>{token2}</{tag2}></front>'.format(
                                      tag1=TAG_1,
                                      tag2=TAG_2,
                                      token1=TOKEN_1,
                                      token2=TOKEN_2,
                                      lb=TeiTagNames.LB))
Esempio n. 19
0
 def test_should_not_fail_on_empty_document(self):
     structured_document = GrobidTrainingTeiStructuredDocument(_tei())
     TextLineNumberAnnotator().annotate(structured_document)
 def test_should_find_two_lines_separated_by_lb_element(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1, _tei_lb(), TOKEN_2)]))
     lines = _get_all_lines(doc)
     assert _get_token_texts_for_lines(doc, lines) == [[TOKEN_1], [TOKEN_2]]
 def test_should_find_text_outside_semantic_element_at_the_end(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(TOKEN_1, E(TeiTagNames.LB)), TOKEN_2]))
     lines = _get_all_lines(doc)
     assert _get_token_texts_for_lines(doc, lines) == [[TOKEN_1], [TOKEN_2]]
 def test_should_find_text_in_nested_top_level_element(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.docTitle(E.titlePart(TOKEN_1))]))
     lines = _get_all_lines(doc)
     assert _get_token_texts_for_lines(doc, lines) == [[TOKEN_1]]
 def test_should_split_words_as_separate_tokens(self):
     doc = GrobidTrainingTeiStructuredDocument(
         _tei(front_items=[E.note(' '.join([TOKEN_1, TOKEN_2]))]))
     lines = _get_all_lines(doc)
     assert _get_token_texts_for_lines(doc, lines) == [[TOKEN_1, TOKEN_2]]
def _structured_document_with_title(title=TITLE_1):
    return GrobidTrainingTeiStructuredDocument(_tei_with_title(title))
 def test_should_return_root_as_pages(self):
     root = _tei(front_items=[])
     doc = GrobidTrainingTeiStructuredDocument(root)
     assert list(doc.get_pages()) == [root]
def _structured_document_with_sub_elements(*args, **kwargs):
    return GrobidTrainingTeiStructuredDocument(
        _tei_with_sub_elements(*args, **kwargs),
        tag_to_tei_path_mapping=TAG_TO_TEI_PATH_MAPPING)
Esempio n. 27
0
 def test_should_not_fail_on_empty_document(self):
     structured_document = GrobidTrainingTeiStructuredDocument(_tei())
     SegmentationAnnotator(DEFAULT_CONFIG).annotate(structured_document)