def apply_transformation(self, cli, document, lineno, source_to_display, tokens): if (lineno < self._lines_before or lineno >= self._lines_before + len(self.history_mapping.selected_lines)): text = token_list_to_text(tokens) return Transformation(tokens=[(Token.History.ExistingInput, text)]) else: return Transformation(tokens=tokens)
def apply_transformation(self, cli, document, lineno, source_to_display, tokens): search_text = self._get_search_text(cli) if search_text and not cli.is_returning: # For each search match, replace the Token. line_text = token_list_to_text(tokens) tokens = explode_tokens(tokens) flags = re.IGNORECASE if cli.is_ignoring_case else 0 # Get cursor column. if document.cursor_position_row == lineno: cursor_column = source_to_display(document.cursor_position_col) else: cursor_column = None for match in re.finditer(re.escape(search_text), line_text, flags=flags): if cursor_column is not None: on_cursor = match.start() <= cursor_column < match.end() else: on_cursor = False for i in range(match.start(), match.end()): if on_cursor: tokens[i] = (Token.SearchMatch.Current, tokens[i][1]) else: tokens[i] = (Token.SearchMatch, tokens[i][1]) return Transformation(tokens)
def apply_transformation(self, cli, document, lineno, source_to_display, tokens): search_text = self._get_search_text(cli) searchmatch_current_token = (':', ) + Token.SearchMatch.Current searchmatch_token = (':', ) + Token.SearchMatch if search_text and not cli.is_returning: # For each search match, replace the Token. line_text = token_list_to_text(tokens) tokens = explode_tokens(tokens) flags = re.IGNORECASE if cli.is_ignoring_case else 0 # Get cursor column. if document.cursor_position_row == lineno: cursor_column = source_to_display(document.cursor_position_col) else: cursor_column = None for match in re.finditer(re.escape(search_text), line_text, flags=flags): if cursor_column is not None: on_cursor = match.start() <= cursor_column < match.end() else: on_cursor = False for i in range(match.start(), match.end()): old_token, text = tokens[i] if on_cursor: tokens[i] = (old_token + searchmatch_current_token, tokens[i][1]) else: tokens[i] = (old_token + searchmatch_token, tokens[i][1]) return Transformation(tokens)
def apply_transformation(self, cli, document, tokens): tokens_before = self.get_tokens(cli) shift_position = token_list_len(tokens_before) return Transformation(document=document.insert_before( token_list_to_text(tokens_before)), tokens=tokens_before + tokens, source_to_display=lambda i: i + shift_position, display_to_source=lambda i: i - shift_position)
def apply_transformation(self, cli, document, tokens): tokens_before = self.get_tokens(cli) shift_position = token_list_len(tokens_before) return Transformation( document=document.insert_before(token_list_to_text(tokens_before)), tokens=tokens_before + tokens, source_to_display=lambda i: i + shift_position, display_to_source=lambda i: i - shift_position)
def apply_transformation(self, cli, document, lineno, source_to_display, tokens): # Walk through all te tokens. if tokens and token_list_to_text(tokens).startswith(' '): t = (self.token, self.get_char(cli)) tokens = explode_tokens(tokens) for i in range(len(tokens)): if tokens[i][1] == ' ': tokens[i] = t else: break return Transformation(tokens)
def apply_transformation(self, cli, document, tokens): # Get text before cursor. if cli.is_searching: before = _get_isearch_tokens(cli) elif cli.input_processor.arg is not None: before = _get_arg_tokens(cli) else: before = self.get_tokens(cli) # Insert before buffer text. shift_position = token_list_len(before) return Transformation( document=document.insert_before(token_list_to_text(before)), tokens=before + tokens, source_to_display=lambda i: i + shift_position, display_to_source=lambda i: i - shift_position)
def apply_transformation(self, cli, document, tokens): tokens_after = self.get_tokens(cli) return Transformation(document=document.insert_after( token_list_to_text(tokens_after)), tokens=tokens + self.get_tokens(cli))
def apply_transformation(self, cli, document, tokens): tokens_after = self.get_tokens(cli) return Transformation( document=document.insert_after(token_list_to_text(tokens_after)), tokens=tokens + self.get_tokens(cli))