コード例 #1
0
 def test_compress_context_interpretations(self):
     tokenized = [
         tokens.Context(['123', 'Interpretations']),
         tokens.Paragraph(section='12', paragraphs=['a', '2', 'iii']),
         tokens.Paragraph(is_interp=True, paragraphs=[None, '3', 'v']),
         tokens.Context([None, 'Appendix:R']),
         tokens.Paragraph(is_interp=True, paragraphs=[None, '5'])
     ]
     converted, _ = diff.compress_context(tokenized, [])
     self.assertEqual(converted, [
         tokens.Paragraph(part='123', is_interp=True, section='12',
                          paragraphs=['(a)(2)(iii)', '3', 'v']),
         #   None because we are missing a layer
         tokens.Paragraph(part='123', is_interp=True, section='Appendix:R',
                          paragraphs=[None, '5'])
     ])
コード例 #2
0
 def test_compress_context_interpretations(self):
     tokenized = [
         tokens.Context(["123", "Interpretations"]),
         tokens.Paragraph([None, None, "12", "a", "2", "iii"]),
         tokens.Paragraph([None, "Interpretations", None, None, "3", "v"]),
         tokens.Context([None, "Appendix:R"]),
         tokens.Paragraph([None, "Interpretations", None, None, "5"]),
     ]
     converted, _ = diff.compress_context(tokenized, [])
     self.assertEqual(
         converted,
         [
             tokens.Paragraph(["123", "Interpretations", "12", "(a)(2)(iii)", "3", "v"]),
             #   None because we are missing a layer
             tokens.Paragraph(["123", "Interpretations", "Appendix:R", None, "5"]),
         ],
     )
コード例 #3
0
 def test_compress_context_interpretations(self):
     tokenized = [
         tokens.Context(['123', 'Interpretations']),
         tokens.Paragraph([None, None, '12', 'a', '2', 'iii']),
         tokens.Paragraph([None, 'Interpretations', None, None, '3', 'v']),
         tokens.Context([None, 'Appendix:R']),
         tokens.Paragraph([None, 'Interpretations', None, None, '5'])
     ]
     converted, _ = diff.compress_context(tokenized, [])
     self.assertEqual(
         converted,
         [
             tokens.Paragraph(
                 ['123', 'Interpretations', '12', '(a)(2)(iii)', '3', 'v']),
             #   None because we are missing a layer
             tokens.Paragraph(
                 ['123', 'Interpretations', 'Appendix:R', None, '5'])
         ])
コード例 #4
0
 def test_compress_context_simple(self):
     tokenized = [
         tokens.Verb(tokens.Verb.PUT, active=True),
         #  part 9876, subpart A
         tokens.Context(['9876', 'Subpart:A']),
         #  section 12
         tokens.Context([None, None, '12']),
         #  12(f)(4)
         tokens.Paragraph([None, None, None, 'f', '4']),
         #  12(f)
         tokens.Context([None, None, None, 'g']),
         #  12(g)(1)
         tokens.Paragraph([None, None, None, None, '1']),
     ]
     converted, final_ctx = diff.compress_context(tokenized, [])
     self.assertEqual(converted, [
         tokens.Verb(tokens.Verb.PUT, active=True),
         tokens.Paragraph(['9876', 'Subpart:A', '12', 'f', '4']),
         tokens.Paragraph(['9876', 'Subpart:A', '12', 'g', '1'])
     ])
     self.assertEqual(['9876', 'Subpart:A', '12', 'g', '1'], final_ctx)
コード例 #5
0
 def test_compress_context_simple(self):
     tokenized = [
         tokens.Verb(tokens.Verb.PUT, active=True),
         #  part 9876, subpart A
         tokens.Context(['9876', 'Subpart:A']),
         #  section 12
         tokens.Context([None, None, '12']),
         #  12(f)(4)
         tokens.Paragraph(paragraphs=['f', '4']),
         #  12(f)
         tokens.Context([None, None, None, 'g']),
         #  12(g)(1)
         tokens.Paragraph(paragraphs=[None, '1']),
     ]
     converted, final_ctx = diff.compress_context(tokenized, [])
     self.assertEqual(converted, [
         tokens.Verb(tokens.Verb.PUT, active=True),
         tokens.Paragraph(part='9876', subpart='A', section='12',
                          paragraphs=['f', '4']),
         tokens.Paragraph(part='9876', subpart='A', section='12',
                          paragraphs=['g', '1']),
     ])
     self.assertEqual(['9876', 'Subpart:A', '12', 'g', '1'], final_ctx)
コード例 #6
0
 def test_compress_context_simple(self):
     tokenized = [
         tokens.Verb(tokens.Verb.PUT, active=True),
         #  part 9876, subpart A
         tokens.Context(["9876", "Subpart:A"]),
         #  section 12
         tokens.Context([None, None, "12"]),
         #  12(f)(4)
         tokens.Paragraph([None, None, None, "f", "4"]),
         #  12(f)
         tokens.Context([None, None, None, "g"]),
         #  12(g)(1)
         tokens.Paragraph([None, None, None, None, "1"]),
     ]
     converted, final_ctx = diff.compress_context(tokenized, [])
     self.assertEqual(
         converted,
         [
             tokens.Verb(tokens.Verb.PUT, active=True),
             tokens.Paragraph(["9876", "Subpart:A", "12", "f", "4"]),
             tokens.Paragraph(["9876", "Subpart:A", "12", "g", "1"]),
         ],
     )
     self.assertEqual(["9876", "Subpart:A", "12", "g", "1"], final_ctx)
コード例 #7
0
 def test_compress_context_initial_context(self):
     tokenized = [tokens.Paragraph([None, None, None, "q"])]
     converted, _ = diff.compress_context(tokenized, ["111", None, "12"])
     self.assertEqual(converted, [tokens.Paragraph(["111", None, "12", "q"])])
コード例 #8
0
 def test_compress_context_initial_context(self):
     tokenized = [tokens.Paragraph(paragraph='q')]
     converted, _ = diff.compress_context(tokenized, ['111', None, '12'])
     self.assertEqual(
         converted,
         [tokens.Paragraph(part='111', section='12', paragraph='q')])
コード例 #9
0
 def test_compress_context_initial_context(self):
     tokenized = [tokens.Paragraph([None, None, None, 'q'])]
     converted, _ = diff.compress_context(tokenized, ['111', None, '12'])
     self.assertEqual(converted,
                      [tokens.Paragraph(['111', None, '12', 'q'])])