def test_split_lines_3():
    " Edge cases: inputs ending with newlines. "
    # -1-
    lines = list(split_lines([
        (Token.A, 'line1\nline2\n')
    ]))

    assert lines == [
        [(Token.A, 'line1')],
        [(Token.A, 'line2')],
        [(Token.A, '')],
    ]

    # -2-
    lines = list(split_lines([
        (Token.A, '\n'),
    ]))

    assert lines == [
        [],
        [(Token.A, '')],
    ]

    # -3-
    lines = list(split_lines([
        (Token.A, ''),
    ]))

    assert lines == [
        [(Token.A, '')],
    ]
Example #2
0
def test_split_lines_3():
    " Edge cases: inputs ending with newlines. "
    # -1-
    lines = list(split_lines([(Token.A, 'line1\nline2\n')]))

    assert lines == [
        [(Token.A, 'line1')],
        [(Token.A, 'line2')],
        [(Token.A, '')],
    ]

    # -2-
    lines = list(split_lines([
        (Token.A, '\n'),
    ]))

    assert lines == [
        [],
        [(Token.A, '')],
    ]

    # -3-
    lines = list(split_lines([
        (Token.A, ''),
    ]))

    assert lines == [
        [(Token.A, '')],
    ]
Example #3
0
    def _print_lines_2(self, lines, start, end, breaks=(), frame=None):
        """
        Similar to `Pdb._print_lines`, except that this takes all the lines
        of the given file as input, it uses Pygments for the highlighting,
        it does slicing, and it prints everything in color.
        """
        if frame:
            current_lineno = frame.f_lineno
        else:
            current_lineno = exc_lineno = -1

        # Highlight everything. (Highlighting works much better from the
        # beginning of the file.)
        all_tokens = python_lexer.get_tokens(''.join(lines))

        # Slice lines.
        lines = list(split_lines(all_tokens))[start-1:end]

        # Add left margin. (Numbers + 'B' or '->'.)
        def add_margin(lineno, tokens):
            is_break = lineno in breaks
            is_current_line = lineno == current_lineno

            return get_line_prefix_tokens(is_break, is_current_line) \
                + [(Token.LineNumber, str(lineno).rjust(3) + ' ')] \
                + tokens + [(Token, '\n')]

        lines = [add_margin(i + start, tokens) for i, tokens in enumerate(lines)]

        for l in lines:
            self.cli.print_tokens(l)
Example #4
0
    def lex_document(self, cli, document):
        parts = document.text.split(None, 1)
        first_word = parts[0] if parts else ''

        # When the first word is a PDB command:
        if first_word in shortcuts.keys(
        ) or first_word in commands_with_help.keys():
            # PDB:
            if cli.is_done:
                tokens = [
                    (Token.PdbCommand, ' %s ' % first_word),
                    (Token, ' '),
                    (Token, parts[1] if len(parts) > 1 else ''),
                ]
            else:
                tokens = [(Token.Text, document.text)]

            token_lines = list(split_lines(tokens))

            def get_line(lineno):
                return token_lines[lineno]

            return get_line

        # Otherwise, highlight as Python code.
        else:
            return self.python_lexer.lex_document(cli, document)
Example #5
0
    def _print_lines_2(self, lines, start, end, breaks=(), frame=None):
        """
        Similar to `Pdb._print_lines`, except that this takes all the lines
        of the given file as input, it uses Pygments for the highlighting,
        it does slicing, and it prints everything in color.
        """
        if frame:
            current_lineno = frame.f_lineno
        else:
            current_lineno = exc_lineno = -1

        # Highlight everything. (Highlighting works much better from the
        # beginning of the file.)
        all_tokens = python_lexer.get_tokens(''.join(lines))

        # Slice lines.
        lines = list(split_lines(all_tokens))[start - 1:end]

        # Add left margin. (Numbers + 'B' or '->'.)
        def add_margin(lineno, tokens):
            is_break = lineno in breaks
            is_current_line = lineno == current_lineno

            return get_line_prefix_tokens(is_break, is_current_line) \
                + [(Token.LineNumber, str(lineno).rjust(3) + ' ')] \
                + tokens + [(Token, '\n')]

        lines = [
            add_margin(i + start, tokens) for i, tokens in enumerate(lines)
        ]

        for l in lines:
            self.cli.print_tokens(l)
    def test_split_lines(self):
        lines = list(split_lines([(Token.A, 'line1\nline2\nline3')]))

        self.assertEqual(lines, [
            [(Token.A, 'line1')],
            [(Token.A, 'line2')],
            [(Token.A, 'line3')],
        ])
def test_split_lines():
    lines = list(split_lines([(Token.A, 'line1\nline2\nline3')]))

    assert lines == [
        [(Token.A, 'line1')],
        [(Token.A, 'line2')],
        [(Token.A, 'line3')],
    ]
    def test_split_lines(self):
        lines = list(split_lines([(Token.A, 'line1\nline2\nline3')]))

        self.assertEqual(lines, [
            [(Token.A, 'line1')],
            [(Token.A, 'line2')],
            [(Token.A, 'line3')],
        ])
Example #9
0
def test_split_lines():
    lines = list(split_lines([(Token.A, 'line1\nline2\nline3')]))

    assert lines == [
        [(Token.A, 'line1')],
        [(Token.A, 'line2')],
        [(Token.A, 'line3')],
    ]
Example #10
0
    def lex_document(self, cli, document):
        lines = list(split_lines(self._get_tokens(cli, document.text)))

        def get_line(lineno):
            try:
                return lines[lineno]
            except IndexError:
                return []

        return get_line
Example #11
0
    def lex_document(self, cli, document):
        lines = list(split_lines(self._get_tokens(cli, document.text)))

        def get_line(lineno):
            try:
                return lines[lineno]
            except IndexError:
                return []

        return get_line
Example #12
0
    def lex_document(self, cli, document):
        parts = document.text.split(None, 1)
        first_word = parts[0] if parts else ''

        # When the first word is a PDB command:
        if first_word in shortcuts.keys() or first_word in commands_with_help.keys():
            # PDB:
            if cli.is_done:
                tokens = [
                    (Token.PdbCommand, ' %s ' % first_word),
                    (Token, ' '),
                    (Token, parts[1] if len(parts) > 1 else ''),
                ]
            else:
                tokens = [(Token.Text, document.text)]

            token_lines = list(split_lines(tokens))
            def get_line(lineno):
                return token_lines[lineno]
            return get_line

        # Otherwise, highlight as Python code.
        else:
            return self.python_lexer.lex_document(cli, document)