コード例 #1
0
ファイル: test_tokenizer.py プロジェクト: agustinhenze/dlt
 def test_one_paragraph_one_token(self):
     tokenizer = Tokenizer()
     data = "Files: foobar.foo"
     tokenized_data = tokenizer.get_paragraphs(data.splitlines(True))
     self.assertEqual(len(tokenized_data), 1)
     first_paragraph = tokenized_data[0]
     self.assertEqual(len(first_paragraph), 1)
     first_field = list(first_paragraph)[0]
     self.assertEqual(first_field.line_number, 1)
     self.assertEqual(first_field.name, "Files")
     self.assertEqual(list(first_field)[0], " foobar.foo")
コード例 #2
0
ファイル: test_tokenizer.py プロジェクト: agustinhenze/dlt
    def test_two_paragraph_one_token(self):
        tokenizer = Tokenizer()
        data = """Files: foobar.foo

Copyright: Foo Bar <*****@*****.**>"""
        tokenized_data = tokenizer.get_paragraphs(data.splitlines(True))
        self.assertEqual(len(tokenized_data), 2)
        first_paragraph = tokenized_data[0]
        self.assertEqual(len(first_paragraph), 1)
        first_field = list(first_paragraph)[0]
        self.assertEqual(first_field.line_number, 1)
        self.assertEqual(first_field.name, "Files")
        self.assertEqual(list(first_field)[0], " foobar.foo\n")
コード例 #3
0
ファイル: test_coverage.py プロジェクト: agustinhenze/dlt
 def setUp(self):
     self.tokenizer = Tokenizer()
     self.parser = Parser()
     self.test_dir = mkdtemp()
     self.debian_dir = os.path.join(self.test_dir, "debian")
     self.copyright_file_path = os.path.join(self.debian_dir, "copyright")
     os.makedirs(self.debian_dir)
コード例 #4
0
ファイル: test_coverage.py プロジェクト: agustinhenze/dlt
class CoverageTest(unittest.TestCase):
    def setUp(self):
        self.tokenizer = Tokenizer()
        self.parser = Parser()
        self.test_dir = mkdtemp()
        self.debian_dir = os.path.join(self.test_dir, "debian")
        self.copyright_file_path = os.path.join(self.debian_dir, "copyright")
        os.makedirs(self.debian_dir)

    def fake_file(self, filename, dir=None):
        if dir is None:
            dir = self.test_dir
        with open(os.path.join(dir, filename), 'w'):
            pass

    def get_paragraphs(self, txt):
        open(self.copyright_file_path, 'w').write("".join(txt))
        paragraphs = self.tokenizer.get_paragraphs(txt)
        self.parser._guess_types(paragraphs)
        self.parser.process(paragraphs)
        return paragraphs

    def test_test(self):
        self.fake_file("foobar.foo")
        self.fake_file("sara.sa", self.debian_dir)
        paragraphs = self.get_paragraphs(two_fp_without_header)
        coverage = Coverage(paragraphs, self.test_dir)
コード例 #5
0
ファイル: test_tokenizer.py プロジェクト: agustinhenze/dlt
    def test_two_paragraph_three_tokens(self):
        tokenizer = Tokenizer()
        data = """

Files: foobar.foo
Copyright: Foo Bar <*****@*****.**>
License: Beerware


Files: sara.sa
Copyright: Sara Sa <*****@*****.**>
License: Vaporware

"""
        tokenized_data = tokenizer.get_paragraphs(data.splitlines(True))
        self.assertEqual(len(tokenized_data), 2)
        first_paragraph = tokenized_data[0]
        self.assertEqual(len(first_paragraph), 3)
        first_field = list(first_paragraph)[0]
        self.assertEqual(first_field.line_number, 3)
        self.assertEqual(first_field.name, "Files")
        self.assertEqual(list(first_field)[0], " foobar.foo\n")
        second_field = list(first_paragraph)[1]
        self.assertEqual(second_field.line_number, 4)
        self.assertEqual(second_field.name, "Copyright")
        self.assertEqual(list(second_field)[0], " Foo Bar <*****@*****.**>\n")
        third_field = list(first_paragraph)[2]
        self.assertEqual(third_field.line_number, 5)
        self.assertEqual(third_field.name, "License")
        self.assertEqual(list(third_field)[0], " Beerware\n")

        second_paragraph = tokenized_data[1]
        self.assertEqual(len(second_paragraph), 3)
        first_field = list(second_paragraph)[0]
        self.assertEqual(first_field.line_number, 8)
        self.assertEqual(first_field.name, "Files")
        self.assertEqual(list(first_field)[0], " sara.sa\n")
        second_field = list(second_paragraph)[1]
        self.assertEqual(second_field.line_number, 9)
        self.assertEqual(second_field.name, "Copyright")
        self.assertEqual(list(second_field)[0], " Sara Sa <*****@*****.**>\n")
        third_field = list(second_paragraph)[2]
        self.assertEqual(third_field.line_number, 10)
        self.assertEqual(third_field.name, "License")
        self.assertEqual(list(third_field)[0], " Vaporware\n")
コード例 #6
0
ファイル: test_rules.py プロジェクト: agustinhenze/dlt
class RuleTest(unittest.TestCase):
    def setUp(self):
        self.tokenizer = Tokenizer()
        self.parser = Parser()

    def get_paragraphs(self, txt):
        paragraphs = self.tokenizer.get_paragraphs(txt)
        self.parser._guess_types(paragraphs)
        return paragraphs

    def print_messages(self, rule):
        for message in rule.messages:
            self.parser._print_message(message)
コード例 #7
0
ファイル: test_rules.py プロジェクト: agustinhenze/dlt
 def setUp(self):
     self.tokenizer = Tokenizer()
     self.parser = Parser()
コード例 #8
0
ファイル: test_parser.py プロジェクト: agustinhenze/dlt
 def test_default_init(self):
     tokenizer = Tokenizer()
     paragraphs = tokenizer.get_paragraphs(two_fp_with_invalid_field)
     parser = Parser()
     self.assertFalse(parser.process(paragraphs))