Esempio n. 1
0
 def test_neuter_encoding_declaration(self):
     for source in ENCODING_DECLARATION_SOURCES:
         neutered = neuter_encoding_declaration(source.decode("ascii"))
         neutered = neutered.encode("ascii")
         self.assertEqual(
             source_encoding(neutered),
             DEF_ENCODING,
             "Wrong encoding in %r" % neutered
         )
Esempio n. 2
0
 def test_one_encoding_declaration(self):
     input_src = textwrap.dedent(u"""\
         # -*- coding: utf-16 -*-
         # Just a comment.
         # -*- coding: ascii -*-
         """)
     expected_src = textwrap.dedent(u"""\
         # (deleted declaration) -*-
         # Just a comment.
         # -*- coding: ascii -*-
         """)
     output_src = neuter_encoding_declaration(input_src)
     self.assertEqual(expected_src, output_src)
Esempio n. 3
0
 def test_one_encoding_declaration(self):
     input_src = textwrap.dedent(u"""\
         # -*- coding: utf-16 -*-
         # Just a comment.
         # -*- coding: ascii -*-
         """)
     expected_src = textwrap.dedent(u"""\
         # (deleted declaration) -*-
         # Just a comment.
         # -*- coding: ascii -*-
         """)
     output_src = neuter_encoding_declaration(input_src)
     self.assertEqual(expected_src, output_src)
Esempio n. 4
0
 def test_two_encoding_declarations(self):
     input_src = textwrap.dedent("""\
         # -*- coding: ascii -*-
         # -*- coding: utf-8 -*-
         # -*- coding: utf-16 -*-
         """)
     expected_src = textwrap.dedent("""\
         # (deleted declaration) -*-
         # (deleted declaration) -*-
         # -*- coding: utf-16 -*-
         """)
     output_src = neuter_encoding_declaration(input_src)
     assert expected_src == output_src
    def __init__(self, text, statements, multiline):
        self.root_node = parse(neuter_encoding_declaration(text))
        self.statements = set(multiline.get(l, l) for l in statements)
        self.multiline = multiline

        self.arcs = set()

        # A map from arc pairs to a pair of sentence fragments:
        #     (startmsg, endmsg).
        # For an arc from line 17, they should be usable like:
        #    "Line 17 {endmsg}, because {startmsg}"
        self.missing_arc_fragments = collections.defaultdict(list)
        self.block_stack = []

        self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
Esempio n. 6
0
    def __init__(self, text, statements, multiline):
        self.root_node = ast.parse(neuter_encoding_declaration(text))
        # TODO: I think this is happening in too many places.
        self.statements = set(multiline.get(l, l) for l in statements)
        self.multiline = multiline

        if int(os.environ.get("COVERAGE_ASTDUMP", 0)):      # pragma: debugging
            # Dump the AST so that failing tests have helpful output.
            print("Statements: {}".format(self.statements))
            print("Multiline map: {}".format(self.multiline))
            ast_dump(self.root_node)

        self.arcs = self.arcs_to_return = set()
        if int(os.environ.get("COVERAGE_TRACK_ARCS", 0)):   # pragma: debugging
            self.arcs = SetSpy(self.arcs)
        self.block_stack = []
Esempio n. 7
0
    def test_neuter_encoding_declaration(self):
        for lines_diff_expected, source, _ in ENCODING_DECLARATION_SOURCES:
            neutered = neuter_encoding_declaration(source.decode("ascii"))
            neutered = neutered.encode("ascii")

            # The neutered source should have the same number of lines.
            source_lines = source.splitlines()
            neutered_lines = neutered.splitlines()
            assert len(source_lines) == len(neutered_lines)

            # Only one of the lines should be different.
            lines_different = sum(
                int(nline != sline) for nline, sline in zip(neutered_lines, source_lines)
            )
            assert lines_diff_expected == lines_different

            # The neutered source will be detected as having no encoding
            # declaration.
            assert source_encoding(neutered) == DEF_ENCODING, "Wrong encoding in %r" % neutered
Esempio n. 8
0
    def __init__(self, text, statements, multiline):
        self.root_node = ast.parse(neuter_encoding_declaration(text))
        # TODO: I think this is happening in too many places.
        self.statements = set(multiline.get(l, l) for l in statements)
        self.multiline = multiline

        if int(os.environ.get("COVERAGE_ASTDUMP", 0)):  # pragma: debugging
            # Dump the AST so that failing tests have helpful output.
            print("Statements: {}".format(self.statements))
            print("Multiline map: {}".format(self.multiline))
            ast_dump(self.root_node)

        self.arcs = set()

        # A map from arc pairs to a pair of sentence fragments: (startmsg, endmsg).
        # For an arc from line 17, they should be usable like:
        #    "Line 17 {endmsg}, because {startmsg}"
        self.missing_arc_fragments = collections.defaultdict(list)
        self.block_stack = []

        self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
Esempio n. 9
0
    def __init__(self, text, statements, multiline):
        self.root_node = ast.parse(neuter_encoding_declaration(text))
        # TODO: I think this is happening in too many places.
        self.statements = set(multiline.get(l, l) for l in statements)
        self.multiline = multiline

        if int(os.environ.get("COVERAGE_ASTDUMP", 0)):      # pragma: debugging
            # Dump the AST so that failing tests have helpful output.
            print("Statements: {}".format(self.statements))
            print("Multiline map: {}".format(self.multiline))
            ast_dump(self.root_node)

        self.arcs = set()

        # A map from arc pairs to a pair of sentence fragments: (startmsg, endmsg).
        # For an arc from line 17, they should be usable like:
        #    "Line 17 {endmsg}, because {startmsg}"
        self.missing_arc_fragments = collections.defaultdict(list)
        self.block_stack = []

        self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
Esempio n. 10
0
    def test_neuter_encoding_declaration(self):
        for lines_diff_expected, source in ENCODING_DECLARATION_SOURCES:
            neutered = neuter_encoding_declaration(source.decode("ascii"))
            neutered = neutered.encode("ascii")

            # The neutered source should have the same number of lines.
            source_lines = source.splitlines()
            neutered_lines = neutered.splitlines()
            self.assertEqual(len(source_lines), len(neutered_lines))

            # Only one of the lines should be different.
            lines_different = sum(
                int(nline != sline) for nline, sline in zip(neutered_lines, source_lines)
            )
            self.assertEqual(lines_diff_expected, lines_different)

            # The neutered source will be detected as having no encoding
            # declaration.
            self.assertEqual(
                source_encoding(neutered),
                DEF_ENCODING,
                "Wrong encoding in %r" % neutered
            )
Esempio n. 11
0
    def __init__(self, text, statements, multiline):
        self.root_node = ast.parse(neuter_encoding_declaration(text))
        # TODO: I think this is happening in too many places.
        self.statements = {multiline.get(l, l) for l in statements}
        self.multiline = multiline

        if AST_DUMP:  # pragma: debugging
            # Dump the AST so that failing tests have helpful output.
            print(f"Statements: {self.statements}")
            print(f"Multiline map: {self.multiline}")
            ast_dump(self.root_node)

        self.arcs = set()

        # A map from arc pairs to a list of pairs of sentence fragments:
        #   { (start, end): [(startmsg, endmsg), ...], }
        #
        # For an arc from line 17, they should be usable like:
        #    "Line 17 {endmsg}, because {startmsg}"
        self.missing_arc_fragments = collections.defaultdict(list)
        self.block_stack = []

        # $set_env.py: COVERAGE_TRACK_ARCS - Trace every arc added while parsing code.
        self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
Esempio n. 12
0
 def test_neuter_encoding_declaration(self):
     for source in ENCODING_DECLARATION_SOURCES:
         neutered = neuter_encoding_declaration(source.decode("ascii"))
         neutered = neutered.encode("ascii")
         self.assertEqual(source_encoding(neutered), DEF_ENCODING,
                          "Wrong encoding in %r" % neutered)