예제 #1
0
def test_compress_context_simple():
    tokenized = [
        tokens.Verb(tokens.Verb.PUT, active=True),
        #  part 9876, subpart A
        tokens.Context(['9876', 'Subpart:A']),
        #  section 12
        tokens.Context([None, None, '12']),
        #  12(f)(4)
        tokens.Paragraph.make(paragraphs=['f', '4']),
        #  12(f)
        tokens.Context([None, None, None, 'g']),
        #  12(g)(1)
        tokens.Paragraph.make(paragraphs=[None, '1']),
    ]
    converted, final_ctx = amdparser.compress_context(tokenized, [])
    assert converted == [
        tokens.Verb(tokens.Verb.PUT, active=True),
        tokens.Paragraph.make(part='9876',
                              subpart='A',
                              section='12',
                              paragraphs=['f', '4']),
        tokens.Paragraph.make(part='9876',
                              subpart='A',
                              section='12',
                              paragraphs=['g', '1']),
    ]
    assert ['9876', 'Subpart:A', '12', 'g', '1'] == final_ctx
예제 #2
0
 def test_compress_context_initial_context(self):
     tokenized = [tokens.Paragraph(paragraph='q')]
     converted, _ = amdparser.compress_context(tokenized,
                                               ['111', None, '12'])
     self.assertEqual(
         converted,
         [tokens.Paragraph(part='111', section='12', paragraph='q')])
 def test_compress_context_initial_context(self):
     tokenized = [tokens.Paragraph(paragraph='q')]
     converted, _ = amdparser.compress_context(
         tokenized, ['111', None, '12'])
     self.assertEqual(
         converted,
         [tokens.Paragraph(part='111', section='12', paragraph='q')])
예제 #4
0
def test_compress_context_interpretations():
    tokenized = [
        tokens.Context(['123', 'Interpretations']),
        tokens.Paragraph.make(section='12', paragraphs=['a', '2', 'iii']),
        tokens.Paragraph.make(is_interp=True, paragraphs=[None, '3', 'v']),
        tokens.Context([None, 'Appendix:R']),
        tokens.Paragraph.make(is_interp=True, paragraphs=[None, '5'])
    ]
    converted, _ = amdparser.compress_context(tokenized, [])
    assert converted == [
        tokens.Paragraph.make(part='123', is_interp=True, section='12',
                              paragraphs=['(a)(2)(iii)', '3', 'v']),
        #   None because we are missing a layer
        tokens.Paragraph.make(part='123', is_interp=True, section='Appendix:R',
                              paragraphs=[None, '5'])
    ]
예제 #5
0
def test_compress_context_interpretations():
    tokenized = [
        tokens.Context(['123', 'Interpretations']),
        tokens.Paragraph.make(section='12', paragraphs=['a', '2', 'iii']),
        tokens.Paragraph.make(is_interp=True, paragraphs=[None, '3', 'v']),
        tokens.Context([None, 'Appendix:R']),
        tokens.Paragraph.make(is_interp=True, paragraphs=[None, '5'])
    ]
    converted, _ = amdparser.compress_context(tokenized, [])
    assert converted == [
        tokens.Paragraph.make(part='123',
                              is_interp=True,
                              section='12',
                              paragraphs=['(a)(2)(iii)', '3', 'v']),
        #   None because we are missing a layer
        tokens.Paragraph.make(part='123',
                              is_interp=True,
                              section='Appendix:R',
                              paragraphs=[None, '5'])
    ]
예제 #6
0
def test_compress_context_simple():
    tokenized = [
        tokens.Verb(tokens.Verb.PUT, active=True),
        #  part 9876, subpart A
        tokens.Context(['9876', 'Subpart:A']),
        #  section 12
        tokens.Context([None, None, '12']),
        #  12(f)(4)
        tokens.Paragraph.make(paragraphs=['f', '4']),
        #  12(f)
        tokens.Context([None, None, None, 'g']),
        #  12(g)(1)
        tokens.Paragraph.make(paragraphs=[None, '1']),
    ]
    converted, final_ctx = amdparser.compress_context(tokenized, [])
    assert converted == [
        tokens.Verb(tokens.Verb.PUT, active=True),
        tokens.Paragraph.make(part='9876', subpart='A', section='12',
                              paragraphs=['f', '4']),
        tokens.Paragraph.make(part='9876', subpart='A', section='12',
                              paragraphs=['g', '1']),
    ]
    assert ['9876', 'Subpart:A', '12', 'g', '1'] == final_ctx
예제 #7
0
def test_compress_context_initial_context():
    tokenized = [tokens.Paragraph.make(paragraph='q')]
    converted, _ = amdparser.compress_context(tokenized, ['111', None, '12'])
    assert converted == [
        tokens.Paragraph.make(part='111', section='12', paragraph='q')
    ]
예제 #8
0
def test_compress_context_initial_context():
    tokenized = [tokens.Paragraph.make(paragraph='q')]
    converted, _ = amdparser.compress_context(
        tokenized, ['111', None, '12'])
    assert converted == [
        tokens.Paragraph.make(part='111', section='12', paragraph='q')]