Example #1
0
def test_switch_part_context():
    initial_context = ['105', '2']

    tokenized = [
        tokens.Paragraph.make(part='203', sub='2', section='x'),
        tokens.Verb(tokens.Verb.DESIGNATE, True)
    ]

    assert amdparser.switch_part_context(tokenized, initial_context) == []

    tokenized = [
        tokens.Paragraph.make(part='105', sub='4', section='j',
                              paragraph='iv'),
        tokens.Verb(tokens.Verb.DESIGNATE, True)
    ]

    assert initial_context == amdparser.switch_part_context(
        tokenized, initial_context)

    tokenized = [
        tokens.Context(['', '4', 'j', 'iv']),
        tokens.Verb(tokens.Verb.DESIGNATE, True)
    ]

    assert initial_context == amdparser.switch_part_context(
        tokenized, initial_context)
Example #2
0
def test_compress_context_in_tokenlists():
    tokenized = [
        tokens.Context(['123', 'Interpretations']),
        tokens.Paragraph.make(part='123', section='23', paragraph='a'),
        tokens.Verb(tokens.Verb.PUT, True),
        tokens.TokenList([
            tokens.Verb(tokens.Verb.POST, True),
            tokens.Paragraph.make(part='123',
                                  section='23',
                                  paragraphs=['a', '1']),
            tokens.Paragraph.make(paragraphs=[None, None, 'i']),
            tokens.Paragraph.make(section='23', paragraph='b')
        ])
    ]
    assert amdparser.compress_context_in_tokenlists(tokenized) == [
        tokens.Context(['123', 'Interpretations']),
        tokens.Paragraph.make(part='123', section='23', paragraph='a'),
        tokens.Verb(tokens.Verb.PUT, True),
        tokens.TokenList([
            tokens.Verb(tokens.Verb.POST, True),
            tokens.Paragraph.make(part='123',
                                  section='23',
                                  paragraphs=['a', '1']),
            tokens.Paragraph.make(part='123',
                                  section='23',
                                  paragraphs=['a', '1', 'i']),
            tokens.Paragraph.make(part='123', section='23', paragraph='b')
        ])
    ]
Example #3
0
def test_compress_context_simple():
    tokenized = [
        tokens.Verb(tokens.Verb.PUT, active=True),
        #  part 9876, subpart A
        tokens.Context(['9876', 'Subpart:A']),
        #  section 12
        tokens.Context([None, None, '12']),
        #  12(f)(4)
        tokens.Paragraph.make(paragraphs=['f', '4']),
        #  12(f)
        tokens.Context([None, None, None, 'g']),
        #  12(g)(1)
        tokens.Paragraph.make(paragraphs=[None, '1']),
    ]
    converted, final_ctx = amdparser.compress_context(tokenized, [])
    assert converted == [
        tokens.Verb(tokens.Verb.PUT, active=True),
        tokens.Paragraph.make(part='9876',
                              subpart='A',
                              section='12',
                              paragraphs=['f', '4']),
        tokens.Paragraph.make(part='9876',
                              subpart='A',
                              section='12',
                              paragraphs=['g', '1']),
    ]
    assert ['9876', 'Subpart:A', '12', 'g', '1'] == final_ctx
    def test_switch_context(self):
        initial_context = ['105', '2']

        tokenized = [
            tokens.Paragraph(['203', '2', 'x']),
            tokens.Verb(tokens.Verb.DESIGNATE, True)
        ]

        self.assertEqual(switch_context(tokenized, initial_context), [])

        tokenized = [
            tokens.Paragraph(['105', '4', 'j', 'iv']),
            tokens.Verb(tokens.Verb.DESIGNATE, True)
        ]

        self.assertEqual(switch_context(tokenized, initial_context),
                         initial_context)

        tokenized = [
            tokens.Context(['', '4', 'j', 'iv']),
            tokens.Verb(tokens.Verb.DESIGNATE, True)
        ]

        self.assertEqual(switch_context(tokenized, initial_context),
                         initial_context)
Example #5
0
 def test_compress_context_in_tokenlists(self):
     tokenized = [
         tokens.Context(['123', 'Interpretations']),
         tokens.Paragraph(part='123', section='23', paragraph='a'),
         tokens.Verb(tokens.Verb.PUT, True),
         tokens.TokenList([
             tokens.Verb(tokens.Verb.POST, True),
             tokens.Paragraph(part='123',
                              section='23',
                              paragraphs=['a', '1']),
             tokens.Paragraph(paragraphs=[None, None, 'i']),
             tokens.Paragraph(section='23', paragraph='b')
         ])
     ]
     converted = amdparser.compress_context_in_tokenlists(tokenized)
     self.assertEqual(converted, [
         tokens.Context(['123', 'Interpretations']),
         tokens.Paragraph(part='123', section='23', paragraph='a'),
         tokens.Verb(tokens.Verb.PUT, True),
         tokens.TokenList([
             tokens.Verb(tokens.Verb.POST, True),
             tokens.Paragraph(
                 part='123', section='23', paragraphs=['a', '1']),
             tokens.Paragraph(
                 part='123', section='23', paragraphs=['a', '1', 'i']),
             tokens.Paragraph(part='123', section='23', paragraph='b')
         ])
     ])
Example #6
0
    def test_switch_part_context(self):
        initial_context = ['105', '2']

        tokenized = [
            tokens.Paragraph(part='203', sub='2', section='x'),
            tokens.Verb(tokens.Verb.DESIGNATE, True)
        ]

        self.assertEqual(
            amdparser.switch_part_context(tokenized, initial_context), [])

        tokenized = [
            tokens.Paragraph(part='105', sub='4', section='j', paragraph='iv'),
            tokens.Verb(tokens.Verb.DESIGNATE, True)
        ]

        self.assertEqual(
            amdparser.switch_part_context(tokenized, initial_context),
            initial_context)

        tokenized = [
            tokens.Context(['', '4', 'j', 'iv']),
            tokens.Verb(tokens.Verb.DESIGNATE, True)
        ]

        self.assertEqual(
            amdparser.switch_part_context(tokenized, initial_context),
            initial_context)
 def test_insert_in_order(self):
     text = ('11. [label:1234-123-p123456789] is removed. '
             '[insert-in-order] [label:1234-123-p987654321]')
     result = parse_text(text)
     self.assertEqual(result, [
         tokens.Paragraph(part='1234', sub='123', section='p123456789'),
         tokens.Verb(tokens.Verb.DELETE, active=False, and_prefix=False),
         tokens.Verb(tokens.Verb.INSERT, active=True, and_prefix=False),
         tokens.Paragraph(part='1234', sub='123', section='p987654321')
     ])
    def test_is_designate_token(self):
        class Noun(tokens.Token):
            def __init__(self, noun):
                self.noun = noun

        token = tokens.Verb(tokens.Verb.DESIGNATE, True)
        self.assertTrue(is_designate_token(token))

        token = tokens.Verb(tokens.Verb.MOVE, True)
        self.assertFalse(is_designate_token(token))

        token = Noun('TABLE')
        self.assertFalse(is_designate_token(token))
Example #9
0
def test_switch_passive3():
    tokenized = [
        tokens.Context(['1']),
        tokens.Verb(tokens.Verb.MOVE, active=False),
        tokens.Context(['2']),
        tokens.Context(['3']),
        tokens.Verb(tokens.Verb.PUT, active=False)
    ]
    assert amdparser.switch_passive(tokenized) == [
        tokens.Verb(tokens.Verb.MOVE, active=True),
        tokens.Context(['1']),
        tokens.Context(['2']),
        tokens.Verb(tokens.Verb.PUT, active=True),
        tokens.Context(['3']),
    ]
Example #10
0
def test_context_to_paragraph():
    tokenized = [
        tokens.Context(['1']),
        tokens.Verb(tokens.Verb.PUT, active=True),
        tokens.Context(['2']),
        tokens.Context(['3'], certain=True),
        tokens.Context(['4'])
    ]
    assert amdparser.context_to_paragraph(tokenized) == [
        tokens.Context(['1']),
        tokens.Verb(tokens.Verb.PUT, active=True),
        tokens.Paragraph.make(part='2'),
        tokens.Context(['3'], certain=True),
        tokens.Paragraph.make(part='4')
    ]
    def test_reserving(self):
        text = "Section 105.32 is amended by"
        text += " removing and reserving paragraph (b)(2)"

        result = parse_text(text)
        reserve_token = tokens.Verb(tokens.Verb.RESERVE, active=True)
        self.assertTrue(reserve_token in result)
 def test_example3(self):
     text = "6. Add subpart B to read as follows:"
     result = parse_text(text)
     self.assertEqual(result, [
         tokens.Verb(tokens.Verb.POST, active=True),
         tokens.Context([None, 'Subpart:B'], certain=False)
     ])
 def test_context_to_paragraph(self):
     tokenized = [
         tokens.Context(['1']),
         tokens.Verb(tokens.Verb.PUT, active=True),
         tokens.Context(['2']),
         tokens.Context(['3'], certain=True),
         tokens.Context(['4'])
     ]
     converted = context_to_paragraph(tokenized)
     self.assertEqual(converted, [
         tokens.Context(['1']),
         tokens.Verb(tokens.Verb.PUT, active=True),
         tokens.Paragraph(['2']),
         tokens.Context(['3'], certain=True),
         tokens.Paragraph(['4'])
     ])
    def test_context_to_paragraph_exceptions(self):
        tokenized = [
            tokens.Verb(tokens.Verb.PUT, active=True),
            tokens.Context(['2']),
            tokens.Paragraph(['3'])
        ]
        converted = context_to_paragraph(tokenized)
        self.assertEqual(tokenized, converted)

        tokenized = [
            tokens.Verb(tokens.Verb.PUT, active=True),
            tokens.Context(['2']),
            tokens.TokenList([tokens.Paragraph(['3'])])
        ]
        converted = context_to_paragraph(tokenized)
        self.assertEqual(tokenized, converted)
    def test_contains_one_designate_token(self):
        tokenized = self.list_of_tokens()
        self.assertTrue(contains_one_designate_token(tokenized))

        designate_token_2 = tokens.Verb(tokens.Verb.DESIGNATE, True)
        tokenized.append(designate_token_2)
        self.assertFalse(contains_one_designate_token(tokenized))
    def test_deal_with_subpart_adds_no_subpart(self):
        designate_token = tokens.Verb(tokens.Verb.DESIGNATE, True)
        token_list = self.paragraph_token_list()
        tokenized = [designate_token, token_list]

        toks, subpart_added = deal_with_subpart_adds(tokenized)
        self.assertFalse(subpart_added)
Example #17
0
def test_multiple_moves_paragraphs_on_either_side_of_a_move():
    tokenized = [
        tokens.Paragraph.make(part='444', sub='1'),
        tokens.Verb(tokens.Verb.MOVE, active=False),
        tokens.Paragraph.make(part='444', sub='3')
    ]
    assert tokenized == amdparser.multiple_moves(tokenized)
 def test_example11(self):
     text = u"Amend § 1005.36 to revise the section heading and "
     text += "paragraphs (a) and (b), and to add paragraph (d) to read "
     text += "as follows:"
     result = parse_text(text)
     self.assertEqual(result, [
         tokens.Context(['1005', None, '36']),
         tokens.Verb(tokens.Verb.PUT, active=True),
         tokens.Paragraph([], field=tokens.Paragraph.HEADING_FIELD),
         tokens.AndToken(),
         tokens.TokenList([tokens.Paragraph(paragraph='a'),
                           tokens.Paragraph(paragraph='b')]),
         tokens.AndToken(),
         tokens.Verb(tokens.Verb.POST, active=True),
         tokens.Paragraph(paragraph='d'),
     ])
Example #19
0
def test_subpart_designation_no_subpart():
    designate_token = tokens.Verb(tokens.Verb.DESIGNATE, True)
    token_list = _paragraph_token_list()
    tokenized = [designate_token, token_list]

    toks, subpart_added = amdparser.subpart_designation(tokenized)
    assert not subpart_added
    def test_reserving(self):
        text = "Section 105.32 is amended by"
        text += " removing and reserving paragraph (b)(2)"

        result = [m[0] for m, _, _ in token_patterns.scanString(text)]
        reserve_token = tokens.Verb(tokens.Verb.RESERVE, active=True)
        self.assertTrue(reserve_token in result)
Example #21
0
def test_switch_passive1():
    tokenized = [
        tokens.Context(['1']),
        tokens.Verb(tokens.Verb.PUT, active=True),
        tokens.Context(['2'])
    ]
    assert tokenized == amdparser.switch_passive(tokenized)
Example #22
0
def test_context_to_paragraph_exceptions2():
    tokenized = [
        tokens.Verb(tokens.Verb.PUT, active=True),
        tokens.Context(['2']),
        tokens.TokenList([tokens.Paragraph.make(part='3')])
    ]
    assert tokenized == amdparser.context_to_paragraph(tokenized)
 def test_example1(self):
     text = u"In § 9876.1, revise paragraph (b) to read as follows"
     result = parse_text(text)
     self.assertEqual(result, [
         tokens.Context(['9876', None, '1'], certain=True),
         tokens.Verb(tokens.Verb.PUT, active=True),
         tokens.Paragraph([None, None, None, 'b'])
     ])
    def test_example_23(self):
        text = "comment 33(c)-5 is redesignated comment 33(c)-6 and revised"

        result = parse_text(text)
        self.assertEqual(4, len(result))
        old, redes, new, revised = result
        self.assertEqual(revised, tokens.Verb(tokens.Verb.PUT, active=False,
                                              and_prefix=True))
 def test_example_35(self):
     text = "5. Section 100.94 is added to subpart C to read as follows:"
     result = parse_text(text)
     self.assertEqual(result, [
         tokens.Context(['100', None, '94'], certain=False),
         tokens.Verb(tokens.Verb.POST, active=False, and_prefix=False),
         tokens.Context([None, 'Subpart:C'], certain=True)
     ])
Example #26
0
def test_separate_tokenlist():
    tokenized = [
        tokens.Context(['1']),
        tokens.TokenList([
            tokens.Verb(tokens.Verb.MOVE, active=True),
            tokens.Context([None, '2'])
        ]),
        tokens.Paragraph.make(sub='3'),
        tokens.TokenList([tokens.Paragraph.make(section='b')])
    ]
    assert amdparser.separate_tokenlist(tokenized) == [
        tokens.Context(['1']),
        tokens.Verb(tokens.Verb.MOVE, active=True),
        tokens.Context([None, '2']),
        tokens.Paragraph.make(sub='3'),
        tokens.Paragraph.make(section='b')
    ]
 def test_example14(self):
     text = "and removing paragraph (c)(5) to read as follows:"
     result = parse_text(text)
     self.assertEqual(result, [
         tokens.AndToken(),
         tokens.Verb(tokens.Verb.DELETE, active=True),
         tokens.Paragraph([None, None, None, 'c', '5'])
     ])
Example #28
0
def test_remove_false_deletes():
    tokenized = [
        tokens.Paragraph.make(part='444'),
        tokens.Verb(tokens.Verb.DELETE, active=True)
    ]

    text = "Remove the semi-colong at the end of paragraph 444"
    new_tokenized = amdparser.remove_false_deletes(tokenized, text)
    assert new_tokenized == []
    def test_remove_false_deletes(self):
        tokenized = [
            tokens.Paragraph(['444']),
            tokens.Verb(tokens.Verb.DELETE, active=True)
        ]

        text = "Remove the semi-colong at the end of paragraph 444"
        new_tokenized = remove_false_deletes(tokenized, text)
        self.assertEqual([], new_tokenized)
 def test_separate_tokenlist(self):
     tokenized = [
         tokens.Context(['1']),
         tokens.TokenList([
             tokens.Verb(tokens.Verb.MOVE, active=True),
             tokens.Context([None, '2'])
         ]),
         tokens.Paragraph([None, '3']),
         tokens.TokenList([tokens.Paragraph([None, None, 'b'])])
     ]
     converted = separate_tokenlist(tokenized)
     self.assertEqual(converted, [
         tokens.Context(['1']),
         tokens.Verb(tokens.Verb.MOVE, active=True),
         tokens.Context([None, '2']),
         tokens.Paragraph([None, '3']),
         tokens.Paragraph([None, None, 'b'])
     ])