コード例 #1
0
 def append(self, string):  # type: (str) -> None
     pos = self._stream.tell()
     self._stream.seek(0, SEEK_END)
     self._stream.write(encode(string))
     self._stream.seek(pos)
コード例 #2
0
 def readline():
     return encode(formatter.remove_format(decode(
         source_io.readline())))
コード例 #3
0
    def set(self, string):  # type: (str) -> None
        self.clear()

        self._stream.write(encode(string))
        self._stream.seek(0)
コード例 #4
0
    def split_to_lines(self, source):
        lines = []
        current_line = 1
        current_col = 0
        buffer = ""
        current_type = None
        source_io = io.BytesIO(encode(source))
        formatter = PlainFormatter()

        def readline():
            return encode(formatter.remove_format(decode(
                source_io.readline())))

        tokens = tokenize.tokenize(readline)
        line = ""
        for token_info in tokens:
            token_type, token_string, start, end, _ = token_info
            lineno = start[0]
            if lineno == 0:
                # Encoding line
                continue

            if token_type == tokenize.ENDMARKER:
                # End of source
                lines.append(line)
                break

            if lineno > current_line:
                diff = lineno - current_line
                if diff > 1:
                    lines += [""] * (diff - 1)

                line += "<{}>{}</>".format(self._theme[current_type],
                                           buffer.rstrip("\n"))

                # New line
                lines.append(line)
                line = ""
                current_line = lineno
                current_col = 0
                buffer = ""

            if token_string in self.KEYWORDS:
                new_type = self.TOKEN_KEYWORD
            elif token_string in self.BUILTINS or token_string == "self":
                new_type = self.TOKEN_BUILTIN
            elif token_type == tokenize.STRING:
                new_type = self.TOKEN_STRING
            elif token_type == tokenize.NUMBER:
                new_type = self.TOKEN_NUMBER
            elif token_type == tokenize.COMMENT:
                new_type = self.TOKEN_COMMENT
            elif token_type == tokenize.OP:
                new_type = self.TOKEN_OP
            elif token_type == tokenize.NEWLINE:
                continue
            else:
                new_type = self.TOKEN_DEFAULT

            if current_type is None:
                current_type = new_type

            if start[1] > current_col:
                buffer += token_info.line[current_col:start[1]]

            if current_type != new_type:
                line += "<{}>{}</>".format(self._theme[current_type], buffer)
                buffer = ""
                current_type = new_type

            if lineno < end[0]:
                # The token spans multiple lines
                lines.append(line)
                token_lines = token_string.split("\n")
                for l in token_lines[1:-1]:
                    lines.append("<{}>{}</>".format(self._theme[current_type],
                                                    l))

                current_line = end[0]
                buffer = token_lines[-1][:end[1]]
                line = ""
                continue

            buffer += token_string
            current_col = end[1]
            current_line = lineno

        return lines