def test_get_line(self):
        """Test that the context generator returns the proper line."""

        d = open("tests/resources/contextgenerator/data.txt").read()
        c = ContextGenerator(d)

        eq_(c.get_line(30), 3)
        eq_(c.get_line(11), 2)
        eq_(c.get_line(10000), 11)
Пример #2
0
    def test_get_line(self):
        """Test that the context generator returns the proper line."""

        d = open("tests/resources/contextgenerator/data.txt").read()
        c = ContextGenerator(d)

        eq_(c.get_line(30), 3)
        eq_(c.get_line(11), 2)
        eq_(c.get_line(10000), 11)
Пример #3
0
    def test_get_context_trimming_inverse(self):
        """
        Tests that surrounding lines are trimmed properly; the error line is
        ignored if it is less than 140 characters.
        """

        d = open("tests/resources/contextgenerator/longdata.txt").read()
        c = ContextGenerator(d)

        trimmed = c.get_context(line=6, column=0)

        eq_(trimmed[1], "This line should be entirely visible.")
        assert trimmed[0][0] != "X"
        assert trimmed[2][-1] != "X"
Пример #4
0
    def test_get_context_trimming(self):
        """
        Test that contexts are generated properly when lines are >140
        characters.
        """

        d = open("tests/resources/contextgenerator/longdata.txt").read()
        c = ContextGenerator(d)

        trimmed = c.get_context(line=2, column=89)
        proper_lengths = (140, 148, 140)

        for i, length in enumerate([140, 148, 140]):
            eq_(len(trimmed[i]), length)
    def test_get_context_trimming_inverse(self):
        """
        Tests that surrounding lines are trimmed properly; the error line is
        ignored if it is less than 140 characters.
        """

        d = open("tests/resources/contextgenerator/longdata.txt").read()
        c = ContextGenerator(d)

        trimmed = c.get_context(line=6, column=0)

        eq_(trimmed[1], "This line should be entirely visible.")
        assert trimmed[0][0] != "X"
        assert trimmed[2][-1] != "X"
    def test_get_context_trimming(self):
        """
        Test that contexts are generated properly when lines are >140
        characters.
        """

        d = open("tests/resources/contextgenerator/longdata.txt").read()
        c = ContextGenerator(d)

        trimmed = c.get_context(line=2, column=89)
        proper_lengths = (140, 148, 140)

        for i, length in enumerate([140, 148, 140]):
            eq_(len(trimmed[i]), length)
    def test_json_constructs(self):
        """This tests some of the internal JSON stuff so we don't break zamboni."""

        self.err.warning((), "Context test",
                         context=ContextGenerator("x\ny\nz\n"),
                         line=2, column=0)
        self.err.notice((), "none")
        self.err.notice((), "line", line=1)
        self.err.notice((), "column", column=0)
        self.err.notice((), "line column", line=1, column=1)

        j = self.get_json_results()

        assert "messages" in j
        assert all(m["context"] == ["x", "y", "z"] for m in j["messages"] if
                   m["type"] == "warning"), "Warning had wrong context."

        for m in (m for m in j["messages"] if m["type"] == "notice"):
            if "line" in m["message"]:
                assert m["line"] is not None
                assert isinstance(m["line"], int)
                assert m["line"] > 0
            else:
                assert m["line"] is None

            if "column" in m["message"]:
                assert m["column"] is not None
                assert isinstance(m["column"], int)
                assert m["column"] > -1
            else:
                assert m["column"] is None
def test_css_file(err, filename, data, line_start=1):
    "Parse and test a whole CSS file."

    tokenizer = cssutils.tokenize2.Tokenizer()
    context = ContextGenerator(data)

    data = "".join(c for c in data if 8 < ord(c) < 127)

    token_generator = tokenizer.tokenize(data)

    try:
        _run_css_tests(err,
                       tokens=token_generator,
                       filename=filename,
                       line_start=line_start - 1,
                       context=context)
    except:  # pragma: no cover
        # This happens because tokenize is a generator.
        # Bravo, Mr. Bond, Bravo.
        err.warning(
            ("testcases_markup_csstester", "test_css_file", "could_not_parse"),
            "Could not parse CSS file",
            "CSS file could not be parsed by the tokenizer.", filename)
        #raise
        return
Пример #9
0
    def test_load_data(self):
        """Test that data is loaded properly into the CG."""

        d = """abc
        def
        ghi"""
        c = ContextGenerator(d)
        eq_(len(c.data), 3)

        # Through inductive reasoning, we can assert that every other line
        # is imported properly.
        eq_(c.data[0].strip(), "abc")
        eq_(c.data[1].strip(), "def")
Пример #10
0
    def process(self, filename, data, extension="html"):
        """Processes data by splitting it into individual lines, then
        incrementally feeding each line into the parser, increasing the
        value of the line number with each line."""

        self.line = 0
        self.filename = filename
        self.extension = extension.lower()

        self.reported = set()

        self.context = ContextGenerator(data)

        lines = data.split("\n")

        buffering = False
        pline = 0
        for line in lines:
            self.line += 1

            search_line = line
            while True:
                # If a CDATA element is found, push it and its contents to the
                # buffer. Push everything previous to it to the parser.
                if "<![CDATA[" in search_line and not buffering:
                    # Find the CDATA element.
                    cdatapos = search_line.find("<![CDATA[")

                    # If the element isn't at the start of the line, pass
                    # everything before it to the parser.
                    if cdatapos:
                        self._feed_parser(search_line[:cdatapos])
                    # Collect the rest of the line to send it to the buffer.
                    search_line = search_line[cdatapos:]
                    buffering = True
                    continue

                elif "]]>" in search_line and buffering:
                    # If we find the end element on the line being scanned,
                    # buffer everything up to the end of it, and let the rest
                    # of the line pass through for further processing.
                    end_cdatapos = search_line.find("]]>") + 3
                    self._save_to_buffer(search_line[:end_cdatapos])
                    search_line = search_line[end_cdatapos:]
                    buffering = False
                break

            if buffering:
                self._save_to_buffer(search_line + "\n")
            else:
                self._feed_parser(search_line)
Пример #11
0
    def test_get_context(self):
        """Test that contexts are generated properly."""

        d = open("tests/resources/contextgenerator/data.txt").read()
        c = ContextGenerator(d)

        c_start = c.get_context(line=1, column=0)
        c_end = c.get_context(line=11, column=0)

        # Contexts are always length 3
        eq_(len(c_start), 3)
        eq_(c_start[0], None)
        eq_(len(c_end), 3)
        eq_(c_end[2], None)

        eq_(c_start[1], "0123456789")
        eq_(c_end[0], "9012345678")
        eq_(c_end[1], "")

        c_mid = c.get_context(line=5)
        eq_(len(c_mid), 3)
        eq_(c_mid[0], "3456789012")
        eq_(c_mid[2], "5678901234")
    def test_get_context(self):
        """Test that contexts are generated properly."""

        d = open("tests/resources/contextgenerator/data.txt").read()
        c = ContextGenerator(d)

        c_start = c.get_context(line=1, column=0)
        c_end = c.get_context(line=11, column=0)

        # Contexts are always length 3
        eq_(len(c_start), 3)
        eq_(c_start[0], None)
        eq_(len(c_end), 3)
        eq_(c_end[2], None)

        eq_(c_start[1], "0123456789")
        eq_(c_end[0], "9012345678")
        eq_(c_end[1], "")

        c_mid = c.get_context(line=5)
        eq_(len(c_mid), 3)
        eq_(c_mid[0], "3456789012")
        eq_(c_mid[2], "5678901234")
Пример #13
0
def get_tree(code, err=None, filename=None, shell=None):
    """Retrieve the parse tree for a JS snippet."""

    if not code:
        return None

    try:
        return _get_tree(code, shell or SPIDERMONKEY_INSTALLATION)
    except JSReflectException as exc:
        str_exc = str(exc).strip("'\"")
        if "SyntaxError" in str_exc or "ReferenceError" in str_exc:
            err.warning(err_id=("testcases_scripting", "test_js_file",
                                "syntax_error"),
                        warning="JavaScript Compile-Time Error",
                        description=[
                            "A compile-time error in the JavaScript halted "
                            "validation of that file.",
                            "Message: %s" % str_exc.split(":", 1)[-1].strip()
                        ],
                        filename=filename,
                        line=exc.line,
                        context=ContextGenerator(code))
        elif "InternalError: too much recursion" in str_exc:
            err.notice(
                err_id=("testcases_scripting", "test_js_file",
                        "recursion_error"),
                notice="JS too deeply nested for validation",
                description="A JS file was encountered that could not be "
                "valiated due to limitations with Spidermonkey. "
                "It should be manually inspected.",
                filename=filename)
        else:
            err.error(err_id=("testcases_scripting", "test_js_file",
                              "retrieving_tree"),
                      error="JS reflection error prevented validation",
                      description=[
                          "An error in the JavaScript file prevented it "
                          "from being properly read by the Spidermonkey JS "
                          "engine.",
                          str(exc)
                      ],
                      filename=filename)
Пример #14
0
 def run(data, expectation, line=2):
     # Strip blank lines.
     data = '\n'.join(filter(None, data.split('\n')))
     # Get the context and assert its equality.
     c = ContextGenerator(data)
     eq_(c.get_context(line), expectation)
 def run(data, expectation, line=2):
     # Strip blank lines.
     data = '\n'.join(filter(None, data.split('\n')))
     # Get the context and assert its equality.
     c = ContextGenerator(data)
     eq_(c.get_context(line), expectation)