Пример #1
0
    def apply_transformation(self, cli, document, tokens):
        if self._len_before or self._len_after:
            tokens = explode_tokens(tokens)
            pos_after = len(tokens) - self._len_after

            text_before = ''.join(t[1] for t in tokens[:self._len_before])
            text_after = ''.join(t[1] for t in tokens[pos_after:])

            return Transformation(
                document=document,
                tokens=explode_tokens([(Token.History.ExistingInput, text_before)] +
                       tokens[self._len_before:pos_after] +
                       [(Token.History.ExistingInput, text_after)]))
        else:
            return Transformation(document, tokens)
Пример #2
0
 def read_chunk(self):
     " Read data from input. Return a list of token/text tuples. "
     try:
         return explode_tokens(next(self.generator))
     except StopIteration:
         self._eof = True
         return []
Пример #3
0
 def first_input_line(cli):
     result = []
     for token, char in reversed(explode_tokens(get_prompt_tokens(cli))):
         if char == '\n':
             break
         else:
             result.insert(0, (token, char))
     return result
Пример #4
0
 def first_input_line(cli):
     result = []
     for token, char in reversed(explode_tokens(get_prompt_tokens(cli))):
         if char == '\n':
             break
         else:
             result.insert(0, (token, char))
     return result
Пример #5
0
 def before(cli):
     result = []
     found_nl = False
     for token, char in reversed(explode_tokens(get_prompt_tokens(cli))):
         if char == '\n':
             found_nl = True
         elif found_nl:
             result.insert(0, (token, char))
     return result
Пример #6
0
 def before(cli):
     result = []
     found_nl = False
     for token, char in reversed(explode_tokens(get_prompt_tokens(cli))):
         if char == '\n':
             found_nl = True
         elif found_nl:
             result.insert(0, (token, char))
     return result
Пример #7
0
    def apply_transformation(self, cli, document, lineno, source_to_display, tokens):
        if self.editor_buffer.report_errors:
            for error in self.editor_buffer.report_errors:
                if error.lineno == lineno:
                    tokens = explode_tokens(tokens)
                    for i in range(error.start_column, error.end_column):
                        if i < len(tokens):
                            tokens[i] = (Token.FlakesError, tokens[i][1])

        return Transformation(tokens)
Пример #8
0
 def read_chunk(self):
     if self._read:
         return []
     else:
         self._read = True
         return explode_tokens([(Token, self.text)])