def get_completions(self, document, complete_event): for match in self._search(document.text): yield Completion(match, -document.cursor_position)
def test_cte_qualified_columns(completer, text): result = result_set(completer, text) expected = [Completion('foo', 0, display_meta='column')] assert set(expected) == result
def get_completions(self, document, complete_event): text_before_cursor = document.text_before_cursor command = "" is_command = True branch = self.command_tree not_command = False if len(text_before_cursor.split()) > 0\ and text_before_cursor.split()[0] == 'az': # remove optional az text_before_cursor = ' '.join(text_before_cursor.split()[1:]) if SELECT_SYMBOL['default'] in text_before_cursor: text_before_cursor = text_before_cursor.replace( SELECT_SYMBOL['default'], "") if SELECT_SYMBOL['undefault'] in text_before_cursor: text_before_cursor = text_before_cursor.replace( SELECT_SYMBOL['undefault'], "") if default_command(): # print(DEFAULT_COMMAND) text_before_cursor = default_command() + ' ' + text_before_cursor if text_before_cursor.split(): if text_before_cursor.split(): for words in text_before_cursor.split(): if words.startswith("-") and not words.startswith("--"): is_command = False if self.has_parameters(command): for param in self.get_param(command): if self.validate_param_completion(param, words, text_before_cursor)\ and not param.startswith("--"): yield Completion(param, -len(words), display_meta=\ self.get_param_description( command + " " + str(param)).replace('\n', '')) elif words.startswith("--"): is_command = False if self.has_parameters( command ): # Everything should, map to empty list for param in self.get_param(command): if self.validate_param_completion( param, words, text_before_cursor): yield Completion(param, -len(words),\ display_meta=self.get_param_description( command + " " + str(param)).replace('\n', '')) else: if is_command: if command: command += " " + str(words) else: command += str(words) try: if branch.has_child(words): branch = branch.get_child( words, branch.children) elif text_before_cursor.split( )[0] in SELECT_SYMBOL.values(): # print('help') not_command = True except ValueError: continue # do something if branch.children is not None and not not_command: for kid in branch.children: if kid.data.lower().startswith( text_before_cursor.split()[-1].lower()): yield Completion(str(kid.data),\ -len(text_before_cursor.split()[-1])) if not text_before_cursor: if branch.children is not None: for com in branch.children: yield Completion(com.data) elif text_before_cursor[-1].isspace(): if branch is not self.command_tree: for com in branch.children: yield Completion(com.data) is_param = False started_param = False prefix = "" param = "" if text_before_cursor.split(): param = text_before_cursor.split()[-1] if param.startswith("-"): is_param = True elif len(text_before_cursor.split()) > 2 and text_before_cursor.split()[-2]\ and text_before_cursor.split()[-2].startswith('-'): is_param = True param = text_before_cursor.split()[-2] started_param = True prefix = text_before_cursor.split()[-1] arg_name = "" if command in self.cmdtab: if is_param: for arg in self.cmdtab[command].arguments: for name in self.cmdtab[command].arguments[ arg].options_list: if name == param: arg_name = arg break if arg_name: break if arg_name and (text_before_cursor.split()[-1].startswith('-') or\ text_before_cursor.split()[-2].startswith('-')): try: for choice in self.cmdtab[command].arguments[ arg_name].choices: if started_param: if choice.lower().startswith(prefix.lower())\ and choice not in text_before_cursor.split(): yield Completion(choice, -len(prefix)) else: yield Completion(choice, -len(prefix)) except TypeError: pass if self.cmdtab[command].arguments[arg_name].completer: try: for comp in self.cmdtab[command].\ arguments[arg_name].completer(prefix=prefix, action=None,\ parser=self.parser, parsed_args=None): if started_param: if comp.lower().startswith(prefix.lower())\ and comp not in text_before_cursor.split(): yield Completion(comp, -len(prefix)) else: yield Completion(comp, -len(prefix)) except TypeError: try: for comp in self.cmdtab[command].\ arguments[arg_name].completer(prefix): if started_param: if comp.lower().startswith(prefix.lower())\ and comp not in text_before_cursor.split(): yield Completion( comp, -len(prefix)) else: yield Completion(comp, -len(prefix)) except TypeError: try: for comp in self.cmdtab[command].\ arguments[arg_name].completer(): if started_param: if comp.lower().startswith(prefix.lower())\ and comp not in text_before_cursor.split(): yield Completion( comp, -len(prefix)) else: yield Completion( comp, -len(prefix)) except TypeError: print("TypeError: " + TypeError.message)
def get_completions(self, document, complete_event): w = document.get_word_before_cursor() if w in suggestions: yield Completion(suggestions[w], start_position=-1 * len(w))
def get_completions(self, document, complete_event): buffer = document.text.lower() line = document.text.lower().split() # show all commands if not line: for i in self.commands: yield Completion(i, start_position=-1 * len(document.text), display=i) else: cmd = line[0].strip() if cmd in self.commands: if buffer.startswith("use") or buffer.startswith("info"): if len( line ) > 1: # if any part of a liner is after the command result = [] # Search for liners starts with the part typed for l in self.liners: if l.startswith(line[1]): result.append(l) # If no liners found, search for liners that contains the word typed ;) # Example: `use iis` (tab) would become `use windows/powershell/get_iis_config` if len(result) == 0: for l in self.liners: if line[1] in l: result.append(l) # All liners should start with command typed because the line would be overwritten if len(result): for i in range(len(result)): if buffer.startswith("use"): result[i] = "use " + result[i] else: result[i] = "info " + result[i] for l in result: yield Completion(l, -document.cursor_position, display=l) else: # If no liner typed, return all liners for l in self.liners: yield Completion(l, -document.cursor_position, display=l) elif buffer.startswith("set"): # No need for comments, it's the same logic as the above... if len(line) > 1: result = [] for v in self.variables: if v.startswith(line[1]): result.append("set " + v) if len(result) == 0: result = [ v for v in self.variables if line[1] in v ] for v in result: yield Completion(v, -document.cursor_position, display=v) else: for v in self.variables: yield Completion(v, -document.cursor_position, display=v) else: result = [] for c in self.commands: if c.startswith(cmd): result.append(c) if len(result) == 0: for i in reversed( range(1, 5) ): # Fixing typos to return matches if there's no matches :D result.extend([ c for c in self.commands if (c[:i] == cmd[:i] and c not in result) ]) if len(result) > 0: result = sorted(result) break for c in result: yield Completion(c, start_position=-document.cursor_position, display=c)
def get_completions(self, document, complete_event): prefix = document.text_before_cursor magic = len(prefix) > 0 and prefix[0] == '%' and not any( map(lambda c: c.isspace(), prefix)) tokens = list(self._lexer.get_tokens(prefix))[:-1] # 0.toString() is invalid syntax, # but pygments doesn't seem to know that for i in range(len(tokens) - 1): if tokens[i][0] == Token.Literal.Number.Integer \ and tokens[i + 1][0] == Token.Punctuation and tokens[i + 1][1] == '.': tokens[i] = (Token.Literal.Number.Float, tokens[i][1] + tokens[i + 1][1]) del tokens[i + 1] before_dot = '' after_dot = '' encountered_dot = False for t in tokens[::-1]: if t[0] in Token.Name.subtypes: before_dot = t[1] + before_dot elif t[0] == Token.Punctuation and t[1] == '.': before_dot = '.' + before_dot if not encountered_dot: encountered_dot = True after_dot = before_dot[1:] before_dot = '' else: if encountered_dot: # The value/contents of the string, number or array doesn't matter, # so we just use the simplest value with that type if t[0] in Token.Literal.String.subtypes: before_dot = '""' + before_dot elif t[0] in Token.Literal.Number.subtypes: before_dot = '0.0' + before_dot elif t[0] == Token.Punctuation and t[1] == ']': before_dot = '[]' + before_dot break try: if encountered_dot: if before_dot == "" or before_dot.endswith("."): return for key in self._get_keys("""\ (() => { let o; try { o = """ + before_dot + """; } catch (e) { return []; } if (o === undefined || o === null) return []; let k = Object.getOwnPropertyNames(o); let p; if (typeof o !== 'object') p = o.__proto__; else p = Object.getPrototypeOf(o); if (p !== null && p !== undefined) k = k.concat(Object.getOwnPropertyNames(p)); return k; })();"""): if self._pattern_matches(after_dot, key): yield Completion(key, -len(after_dot)) else: if magic: keys = self._repl._magic_command_args.keys() else: keys = self._get_keys( "Object.getOwnPropertyNames(this)") for key in keys: if not self._pattern_matches(before_dot, key) or ( key.startswith('_') and before_dot == ''): continue yield Completion(key, -len(before_dot)) except frida.InvalidOperationError: pass except frida.OperationCancelledError: pass except Exception as e: self._repl._print(e)
def _get_item_lookup_completions( self, document: Document, complete_event: CompleteEvent, temp_locals: Dict[str, Any], ) -> Iterable[Completion]: """ Complete dictionary keys. """ def abbr_meta(text: str) -> str: " Abbreviate meta text, make sure it fits on one line. " # Take first line, if multiple lines. if len(text) > 20: text = text[:20] + "..." if "\n" in text: text = text.split("\n", 1)[0] + "..." return text match = self.item_lookup_pattern.search(document.text_before_cursor) if match is not None: object_var, key = match.groups() # Do lookup of `object_var` in the context. result = self._lookup(object_var, temp_locals) # If this object is a dictionary, complete the keys. if isinstance(result, dict): # Try to evaluate the key. key_obj = key for k in [key, key + '"', key + "'"]: try: key_obj = ast.literal_eval(k) except (SyntaxError, ValueError): continue else: break for k in result: if str(k).startswith(str(key_obj)): try: k_repr = self._do_repr(k) yield Completion( k_repr + "]", -len(key), display=f"[{k_repr}]", display_meta=abbr_meta(self._do_repr(result[k])), ) except ReprFailedError: pass # Complete list/tuple index keys. elif isinstance(result, (list, tuple)): if not key or key.isdigit(): for k in range(min(len(result), 1000)): if str(k).startswith(key): try: k_repr = self._do_repr(k) yield Completion( k_repr + "]", -len(key), display=f"[{k_repr}]", display_meta=abbr_meta(self._do_repr(result[k])), ) except ReprFailedError: pass
def get_completions(self, document, complete_event): word_before_cursor = document.get_word_before_cursor(WORD=True) matches = fuzzyfinder(word_before_cursor, self.words) for m in matches: yield Completion(m, start_position=-len(word_before_cursor))
def test_select_keyword_completion(completer, complete_event): text = "SEL" position = len("SEL") result = completer.get_completions( Document(text=text, cursor_position=position), complete_event) assert list(result) == list([Completion(text="SELECT", start_position=-3)])
def set_completion(self, filename, file_type): completion = self._set_completion(file_type, filename) kwargs = self._set_completion_kwargs(file_type, filename) return Completion(completion, **kwargs)
def test_special_name_completion(completer, complete_event): text = '\\d' position = len('\\d') result = completer.get_completions( Document(text=text, cursor_position=position), complete_event) assert result == [Completion(text='\\dt', start_position=-2)]
def get_completions(self, document, complete_event): word_before_cursor = document.get_word_before_cursor() try: cmd_line = list(map(lambda s: s.lower(), shlex.split(document.current_line))) except ValueError: pass else: if len(cmd_line): if self.cli_menu.current_context.name == 'teamservers': if cmd_line[0] in self.cli_menu.current_context._cmd_registry: for conn in self.cli_menu.current_context.connections: if conn.alias.startswith(word_before_cursor): yield Completion(conn.alias, -len(word_before_cursor)) if self.cli_menu.teamservers.selected: if cmd_line[0] == 'use': for loadable in self.cli_menu.current_context.available: if word_before_cursor in loadable: # Apperently document.get_word_before_cursor() breaks if there's a forward slash in the command line ? try: yield Completion(loadable, -len(cmd_line[1])) except IndexError: yield Completion(loadable, -len(word_before_cursor)) return if hasattr(self.cli_menu.current_context, 'selected') and self.cli_menu.current_context.selected: if cmd_line[0] == 'set': if len(cmd_line) >= 2 and cmd_line[1] == 'bindip': for ip in self.cli_menu.teamservers.selected.stats.IPS: if ip.startswith(word_before_cursor): yield Completion(ip, -len(word_before_cursor)) return for option in self.cli_menu.current_context.selected['options'].keys(): if option.lower().startswith(word_before_cursor.lower()): yield Completion(option, -len(word_before_cursor)) return elif cmd_line[0] == 'generate': for listener in self.cli_menu.teamservers.selected.stats.LISTENERS.keys(): if listener.startswith(word_before_cursor): yield Completion(listener, -len(word_before_cursor)) return elif cmd_line[0] in ['run', 'info', 'sleep', 'kill', 'jitter', 'checkin', 'rename']: for session in self.cli_menu.teamservers.selected.stats.SESSIONS.values(): if session['alias'].startswith(word_before_cursor): yield Completion(session['alias'], -len(word_before_cursor)) return if hasattr(self.cli_menu.current_context, "_cmd_registry"): for cmd in self.cli_menu.current_context._cmd_registry: if cmd.startswith(word_before_cursor): yield Completion(cmd, -len(word_before_cursor)) for ctx in self.cli_menu.get_context(): if ctx.name.startswith(word_before_cursor) and ctx.name is not self.cli_menu.current_context.name: yield Completion(ctx.name, -len(word_before_cursor)) if self.cli_menu.current_context.name != 'main': for cmd in self.cli_menu._cmd_registry: if cmd.startswith(word_before_cursor): yield Completion(cmd, -len(word_before_cursor)) #https://stackoverflow.com/questions/46528473/how-to-reuse-completions-from-pathcompleter-in-prompt-toolkit """
def get_completions(self, document, complete_event): # this completion method handles the following formats: # 1) COMMAND VALUE ; e.g. create my-project # 2) COMMAND KEY VALUE ; e.g. set LHOST 192.168.1.1 # first, tokenize document.text and initialize some shorcut variables d = document.text tokens = self.console._get_tokens(d) l = len(tokens) ts = len(d) - len(d.rstrip(" ")) # trailing spaces try: cmd, t1, t2 = tokens + [None] * (3 - l) except: # occurs when l > 3 ; no need to complete anything as it corresponds to an invalid command return bc = len(document.text_before_cursor) it = len(d) - bc > 0 o1 = len(cmd) + 1 - bc if cmd else 0 o2 = len(cmd) + len(t1 or "") + 2 - bc if cmd and t2 else 0 cmds = {k: v for k, v in self.console.commands.items()} c = cmds[cmd]._instance if cmd in cmds else None nargs = len(c.args) if c is not None else 0 # then handle tokens ; # when no token is provided, just yield the list of available commands if l == 0: for x in _filter_sort(cmds.keys(), sort=True): yield Completion(x, start_position=0) # when one token is provided, handle format: # [PARTIAL_]COMMAND ... elif l == 1: # when a partial token is provided, yield the list of valid commands if ts == 0 and c not in cmds: for x in _filter_sort(cmds, cmd, True): yield Completion(x, start_position=-bc) # when a valid command is provided, yield the list of valid keys or values, depending on the type of command elif ts > 0 and c is not None: if nargs == 1: # COMMAND VALUE for x in _filter_sort(c._complete_values(), sort=True): yield Completion(x, start_position=0) # e.g. set ---> ["WORKSPACE", ...] elif nargs == 2: # COMMAND KEY VALUE for x in _filter_sort(c._complete_keys(), sort=True): yield Completion(x, start_position=0) # when two tokens are provided, handle format: # COMMAND [PARTIAL_](KEY ...|VALUE) elif l == 2 and c is not None: # when a partial value token is given, yield the list of valid ones # e.g. select my-pro ---> ["my-project", ...] if nargs == 1 and ts == 0: for x in _filter_sort(c._complete_values(), t1, True): yield Completion(x, start_position=o1) # when a partial key token is given, yield the list of valid ones # e.g. set W ---> ["WORKSPACE"] elif nargs == 2 and ts == 0: for x in _filter_sort(c._complete_keys(), t1, True): yield Completion(x, start_position=o1) # when a valid key token is given, yield the list of values # e.g. set WORKSPACE ---> ["/home/user/...", "..."] elif nargs == 2 and ts > 0 and t1 in c._complete_keys(): for x in _filter_sort(c._complete_values(t1), sort=True): yield Completion(x, start_position=0) # when three tokens are provided, handle format: # COMMAND KEY [PARTIAL_]VALUE elif l == 3 and c is not None and t1 in c._complete_keys(): if nargs == 2 and ts == 0: for x in _filter_sort(c._complete_values(t1), sort=True): for y in _filter_sort(x, t2): yield Completion(y, start_position=o2)
def get_completions( self, document: Document, complete_event: CompleteEvent ) -> Iterable[Completion]: text = document.text_before_cursor _text = text try: if "'" in text: text = text.split("'") if '"' in text: text = text.split('"') else: text = text.split() if _text.endswith(" "): text = "" else: text = text[-1] except: text = document.text_before_cursor def expandvars(string, default=None, skip_escaped=False): """Expand environment variables of form $var and ${var}. If parameter 'skip_escaped' is True, all escaped variable references (i.e. preceded by backslashes) are skipped. Unknown variables are set to 'default'. If 'default' is None, they are left unchanged. """ def replace_var(m): return os.environ.get(m.group(2) or m.group(1), m.group(0) if default is None else default) reVar = (r'(?<!\\)' if skip_escaped else '') + r'\$(\w+|\{([^}]*)\})' return re.sub(reVar, replace_var, string) if text.find("%") != -1: try: spl = text.split("%")[1] env = os.environ[spl] text = text.replace(f"%{spl}%",env) except: pass text = expandvars(text) # Complete only when we have at least the minimal input length, # otherwise, we can too many results and autocompletion will become too # heavy. if len(text) < self.min_input_len: return try: # Do tilde expansion. if self.expanduser: text = os.path.expanduser(text) # Directories where to look. dirname = os.path.dirname(text) if dirname: directories = [ os.path.dirname(os.path.join(p, text)) for p in self.get_paths() ] else: directories = self.get_paths() # Start of current file. prefix = os.path.basename(text) # Get all filenames. filenames = [] for directory in directories: # Look for matches in this directory. if os.path.isdir(directory): for filename in os.listdir(directory): if filename.startswith(prefix): filenames.append((directory, filename)) # Sort filenames = sorted(filenames, key=lambda k: k[1]) # Yield them. for directory, filename in filenames: completion = filename[len(prefix):] full_name = os.path.join(directory, filename) if os.path.isdir(full_name): # For directories, add a slash to the filename. # (We don't add them to the `completion`. Users can type it # to trigger the autocompletion themselves.) filename = filename+"\\" elif self.only_directories: continue if not self.file_filter(full_name): continue yield Completion(completion, 0, display=filename) except OSError: pass
def get_completions(self, word_before_cursor, context, option=None): for cmd_name, cmd in self.list: if cmd_name.startswith(word_before_cursor): yield Completion(cmd_name, -len(word_before_cursor), display_meta=cmd.description)
def get_completions(self, document, complete_event): word_before_cursor = document.get_word_before_cursor() for keyword in self.keywords: if keyword.startswith(word_before_cursor): yield Completion(keyword, -len(word_before_cursor))
def find_matches(self, text, collection, mode='fuzzy', meta=None, meta_collection=None): """Find completion matches for the given text. Given the user's input text and a collection of available completions, find completions matching the last word of the text. `mode` can be either 'fuzzy', or 'strict' 'fuzzy': fuzzy matching, ties broken by name prevalance `keyword`: start only matching, ties broken by keyword prevalance yields prompt_toolkit Completion instances for any matches found in the collection of available completions. """ text = last_word(text, include='most_punctuations').lower() text_len = len(text) if text and text[0] == '"': # text starts with double quote; user is manually escaping a name # Match on everything that follows the double-quote. Note that # text_len is calculated before removing the quote, so the # Completion.position value is correct text = text[1:] if mode == 'fuzzy': fuzzy = True priority_func = self.prioritizer.name_count else: fuzzy = False priority_func = self.prioritizer.keyword_count # Construct a `_match` function for either fuzzy or non-fuzzy matching # The match function returns a 2-tuple used for sorting the matches, # or None if the item doesn't match # Note: higher priority values mean more important, so use negative # signs to flip the direction of the tuple if fuzzy: regex = '.*?'.join(map(re.escape, text)) pat = re.compile('(%s)' % regex) def _match(item): r = pat.search(self.unescape_name(item)) if r: return -len(r.group()), -r.start() else: match_end_limit = len(text) def _match(item): match_point = item.lower().find(text, 0, match_end_limit) if match_point >= 0: # Use negative infinity to force keywords to sort after all # fuzzy matches return -float('Infinity'), -match_point # Lexical order of items in the collection, used for tiebreaking items # with the same match group length and start position. In Python, # 'user' < 'users', i.e. the "lower" value comes first. Since we use # *higher* priority to mean "more important," we need to flip Python's # usual position ranking, hence -position. lexical_order = dict([ (name, -position) for position, name in enumerate(sorted(collection)) ]) if meta_collection: # Each possible completion in the collection has a corresponding # meta-display string collection = zip(collection, meta_collection) else: # All completions have an identical meta collection = zip(collection, itertools.repeat(meta)) matches = [] for item, meta in collection: sort_key = _match(item) if sort_key: if meta and len(meta) > 50: # Truncate meta-text to 50 characters, if necessary meta = meta[:47] + u'...' priority = sort_key, priority_func(item), lexical_order[item] matches.append( Match(completion=Completion(item, -text_len, display_meta=meta), priority=priority)) return matches
def test_select_keyword_completion(completer, complete_event): text = 'SEL' position = len('SEL') result = completer.get_completions( Document(text=text, cursor_position=position), complete_event) assert result == list([Completion(text='SELECT', start_position=-3)])
def get_completions( self, document: Document, complete_event: CompleteEvent ) -> Iterable[Completion]: """ Get Python completions. """ # Do dictionary key completions. if self.get_enable_dictionary_completion(): has_dict_completions = False for c in self.dictionary_completer.get_completions( document, complete_event ): if c.text not in "[.": # If we get the [ or . completion, still include the other # completions. has_dict_completions = True yield c if has_dict_completions: return # Do Path completions (if there were no dictionary completions). if complete_event.completion_requested or self._complete_path_while_typing( document ): for c in self._path_completer.get_completions(document, complete_event): yield c # If we are inside a string, Don't do Jedi completion. if self._path_completer_grammar.match(document.text_before_cursor): return # Do Jedi Python completions. if complete_event.completion_requested or self._complete_python_while_typing( document ): script = get_jedi_script_from_document( document, self.get_locals(), self.get_globals() ) if script: try: jedi_completions = script.complete( column=document.cursor_position_col, line=document.cursor_position_row + 1, ) except TypeError: # Issue #9: bad syntax causes completions() to fail in jedi. # https://github.com/jonathanslenders/python-prompt-toolkit/issues/9 pass except UnicodeDecodeError: # Issue #43: UnicodeDecodeError on OpenBSD # https://github.com/jonathanslenders/python-prompt-toolkit/issues/43 pass except AttributeError: # Jedi issue #513: https://github.com/davidhalter/jedi/issues/513 pass except ValueError: # Jedi issue: "ValueError: invalid \x escape" pass except KeyError: # Jedi issue: "KeyError: u'a_lambda'." # https://github.com/jonathanslenders/ptpython/issues/89 pass except IOError: # Jedi issue: "IOError: No such file or directory." # https://github.com/jonathanslenders/ptpython/issues/71 pass except AssertionError: # In jedi.parser.__init__.py: 227, in remove_last_newline, # the assertion "newline.value.endswith('\n')" can fail. pass except SystemError: # In jedi.api.helpers.py: 144, in get_stack_at_position # raise SystemError("This really shouldn't happen. There's a bug in Jedi.") pass except NotImplementedError: # See: https://github.com/jonathanslenders/ptpython/issues/223 pass except Exception: # Supress all other Jedi exceptions. pass else: for jc in jedi_completions: if jc.type == "function": suffix = "()" else: suffix = "" yield Completion( jc.name_with_symbols, len(jc.complete) - len(jc.name_with_symbols), display=jc.name_with_symbols + suffix, display_meta=jc.type, style=_get_style_for_name(jc.name_with_symbols), )
def match_completions(cur_word, word_dict): words = word_dict.keys() suggestions = fuzzyfinder(cur_word, words) for word in suggestions: desc = word_dict.get(word, '') yield Completion(word, -len(cur_word), display_meta=desc)
def get_completions(self, document, complete_event): # word = document.get_word_before_cursor(WORD=True) line = document.current_line_before_cursor line_minus_current = line.replace(word, "").strip() source = line_search_subject(line) # DEBUG # click.secho("\nAutocomplete running..", dim=True) # click.secho("WORD=" + word, dim=True) # click.secho("LINE=" + line, dim=True) # click.secho("LINE_MINUS_CURRENT=" + line_minus_current, dim=True) # line_minus_current = line candidates = [] if not line: # no letter, all empty candidates = G.allowed_starts_dsl_query() elif len(line_minus_current) == 0: # single word, beginning of line candidates = G.allowed_starts() elif line_minus_current and word.endswith("."): entity_facet = line_last_word(line).replace(".", "") entity = G.entity_type_for_source_facet(source, entity_facet) candidates = G.fields_for_entity_from_source_facet( source, entity_facet) elif in_categories_search(line): this_category = in_categories_search(line) # print(this_category) candidates = G.categories(this_category) elif in_square_brackets(line): # https://docs.dimensions.ai/dsl/language.html#return-specific-fields # search publications for "bmw" return journal[id + title]" test_return_obj = line_last_return_subject(line) if test_return_obj == source: # print("*" + test_return_obj + "*") candidates = G.fields_for_source(test_return_obj) elif test_return_obj in G.facets_for_source(source): entity = G.entity_type_for_source_facet( source, test_return_obj) candidates = G.fields_for_entity_from_source_facet( source, test_return_obj) elif line_last_word(line_minus_current) in [".docs"]: candidates = G.sources() + G.entities() elif line_last_word(line_minus_current) in ["describe"]: candidates = G.allowed_starts("describe") elif line_last_word(line_minus_current) in ["search"]: candidates = G.sources() elif line_last_word(line_minus_current) in ["return"]: if source in G.sources(): candidates = G.facets_for_source(source) + [source] elif line_search_subject_is_valid(line_minus_current): candidates = G.lang_after_search() elif line_return_subject_is_valid(line_minus_current): test_return_obj = line_last_return_subject(line) if test_return_obj == source: candidates = [ x for x in G.lang_after_return() if x != 'aggregate' ] else: candidates = G.lang_after_return() elif line_last_word(line_minus_current) == "in": candidates = G.search_fields_for_source(source) elif line_last_word(line_minus_current) in [ "where", "and", "or", "not" ]: candidates = G.filters_for_source(source) elif line_filter_is_partial(line_minus_current): candidates = G.lang_filter_operators() elif line_for_text_search_inner(line_minus_current): candidates = G.lang_text_operators() elif line_last_two_words(line_minus_current).startswith("limit"): if line_last_return_subject(line) == source: candidates = G.lang_after_limit() elif line_last_word(line_minus_current) == "aggregate": # aggr. can be used only when returning facets! return_object = line_search_return(line) if return_object in G.facets_for_source(source): candidates = G.metrics_for_source(source) elif line_last_two_words(line_minus_current) == "sort by": return_object = line_search_return(line) if return_object in G.sources(): # if source, can sort by fields FIXME candidates = G.fields_for_source(source) + ['relevance'] elif return_object in G.facets_for_source(source): # if facet, can sort by aggregrates metrics if available, otherwise count aggreg_object = line_search_aggregates(line) if aggreg_object: candidates = [aggreg_object] else: candidates = ['count'] elif line_last_three_words(line_minus_current).startswith("sort by"): candidates = G.lang_after_sort_by() # IMP following two must go last elif line_filter_is_complete(line_minus_current): candidates = G.lang_after_filter() elif line_for_text_is_complete(line_minus_current): candidates = G.lang_after_for_text() # finally else: candidates = [] # # now build the candidates list # if line_minus_current and word.endswith("."): # print("***" + str(candidates) + "***") candidates = sorted([word + x for x in candidates]) for keyword in candidates: yield Completion( keyword, start_position=-len(word), display=keyword.replace(word, ""), display_meta=build_help_string(keyword.replace(word, ""), entity=entity), ) elif in_square_brackets(line): # print("***" + str(word) + "***") candidates = sorted(candidates) for keyword in candidates: if word.rfind("+") > 0: word = word[word.find("+") + 1:] else: word = word[word.find("[") + 1:] if keyword.startswith(word): yield Completion( keyword, start_position=-len(word), display=keyword, display_meta=build_help_string(keyword, source=source), ) elif in_categories_search(line): candidates = sorted([word + x for x in candidates]) for keyword in candidates: yield Completion( keyword, start_position=-len(word), display=keyword.replace(word, ""), display_meta=build_help_string(keyword), ) else: candidates = sorted(candidates) for keyword in candidates: if keyword.startswith(word): yield Completion( keyword, start_position=-len(word), display=keyword, display_meta=build_help_string(keyword, source=source), )
def _subcommands(self, p_word_before_cursor): subcommands = [sc for sc in sorted(_SUBCOMMAND_MAP.keys()) if sc.startswith(p_word_before_cursor)] for command in subcommands: yield Completion(command, -len(p_word_before_cursor))
def complete_commands(self, last_word): """Complete the available commands.""" compl_words = self.filter_words(self.commands, last_word) for compl_word in compl_words: yield Completion(compl_word, -len(last_word))
def _projects(self, p_word_before_cursor): projects = [p for p in self.todolist.projects() if p.startswith(p_word_before_cursor[1:])] for project in projects: yield Completion("+" + project, -len(p_word_before_cursor))
def get_column_matches(self, suggestion, word_before_cursor): tables = suggestion.table_refs do_qualify = suggestion.qualifiable and {'always': True, 'never': False, 'if_more_than_one_table': \ len(tables) > 1}[self.qualify_columns] def qualify(col, tbl): return (tbl + '.' + self.case(col)) if do_qualify else self.case(col) self.logger.debug("Completion column scope: %r", tables) scoped_cols = self.populate_scoped_cols2( tables, suggestion.local_tables) def make_cand(name, ref): synonyms = (name, generate_alias(self.case(name))) return Candidate(qualify(name, ref), 0, 'column', synonyms) def flat_cols(): return [make_cand(c.name, t.ref) for t, cols in scoped_cols.items() for c in cols] if suggestion.require_last_table: # require_last_table is used for 'tb11 JOIN tbl2 USING (...' which should # suggest only columns that appear in the last table and one more ltbl = tables[-1].ref other_tbl_cols = set( c.name for t, cs in scoped_cols.items() if t.ref != ltbl for c in cs) scoped_cols = { t: [col for col in cols if col.name in other_tbl_cols] for t, cols in scoped_cols.items() if t.ref == ltbl } lastword = last_word(word_before_cursor, include='most_punctuations') if lastword == '*': if suggestion.context == 'insert': def filter_col(col): if not col.has_default: return True return not any( p.match(col.default) for p in self.insert_col_skip_patterns ) scoped_cols = { t: [col for col in cols if filter_col(col)] for t, cols in scoped_cols.items() } if self.asterisk_column_order == 'alphabetic': for cols in scoped_cols.values(): cols.sort(key=operator.attrgetter('name')) if lastword != word_before_cursor \ and len(tables) == 1 \ and word_before_cursor[-len(lastword) - 1] == '.': # User typed x.*; replicate "x." for all columns except the # first, which gets the original (as we only replace the "*"") sep = ', ' + word_before_cursor[:-1] collist = sep.join(self.case(c.completion) for c in flat_cols()) else: collist = ', '.join(qualify(c.name, t.ref) for t, cs in scoped_cols.items() for c in cs) return [Match( completion=Completion( collist, -1, display_meta='columns', display='*' ), priority=(1, 1, 1) )] return self.find_matches(word_before_cursor, flat_cols(), meta='column')
def _contexts(self, p_word_before_cursor): contexts = [c for c in self.todolist.contexts() if c.startswith(p_word_before_cursor[1:])] for context in contexts: yield Completion("@" + context, -len(p_word_before_cursor))
def test_keyword_after_alter(completer): text = 'ALTER TABLE users ALTER ' expected = Completion('COLUMN', start_position=0, display_meta='keyword') completions = result_set(completer, text) assert expected in set(completions)
def get_completions(self, doc, complete_event): """Yields the completions for doc. Args: doc: A Document instance containing the shell command line to complete. complete_event: The CompleteEvent that triggered this completion. Yields: Completion instances for doc. """ tokens = lexer.GetShellTokens(doc.text_before_cursor) if not tokens: return if tokens[0].value != 'gcloud': gcloud_token = lexer.ShellToken( 'gcloud', lex=lexer.ShellTokenType.ARG, start=0, end=0) tokens = ([gcloud_token] + tokens) node = self.root info = None last = '' path = [] i = 0 # Autocomplete commands and groups after spaces. if doc.text_before_cursor and doc.text_before_cursor[-1].isspace(): for completion in CompleteCommandGroups(tokens): yield Completion(completion) return # If there is a terminator, do not complete. for token in tokens: if token.lex == lexer.ShellTokenType.TERMINATOR: return # Traverse the cli tree. while i < len(tokens): token = tokens[i] if token.lex == lexer.ShellTokenType.ARG and token.value.startswith('-'): if i == len(tokens) - 1: last = token.value elif token.value in node: info = node[token.value] path.append(info) node = info.get('commands', {}) else: break i += 1 last = tokens[-1].value offset = -len(last) # Check for flags. if last.startswith('-') and info: # Collect all flags of current command and parents into node. node = info.get('flags', {}).copy() for info in path: node.update(info.get('flags', {})) value = last.find('=') if value > 0: if doc.text_before_cursor[-1].isspace(): return name = last[:value] else: name = last if name in node: info = node[name] if info.get('type', None) != 'bool': choices = info.get('choices', None) if choices: # A flag with static choices. prefix = last if value < 0: prefix += '=' offset -= 1 for choice in choices: yield Completion(name + '=' + choice, offset) return # Check for subcommands. for choice in sorted(node): if choice.startswith(last): yield Completion(choice, offset)
def find_matches(self, text, collection, mode='fuzzy', meta=None): """Find completion matches for the given text. Given the user's input text and a collection of available completions, find completions matching the last word of the text. `collection` can be either a list of strings or a list of Candidate namedtuples. `mode` can be either 'fuzzy', or 'strict' 'fuzzy': fuzzy matching, ties broken by name prevalance `keyword`: start only matching, ties broken by keyword prevalance yields prompt_toolkit Completion instances for any matches found in the collection of available completions. """ if not collection: return [] prio_order = [ 'keyword', 'function', 'view', 'table', 'datatype', 'database', 'schema', 'column', 'table alias', 'join', 'name join', 'fk join' ] type_priority = prio_order.index(meta) if meta in prio_order else -1 text = last_word(text, include='most_punctuations').lower() text_len = len(text) if text and text[0] == '"': # text starts with double quote; user is manually escaping a name # Match on everything that follows the double-quote. Note that # text_len is calculated before removing the quote, so the # Completion.position value is correct text = text[1:] if mode == 'fuzzy': fuzzy = True priority_func = self.prioritizer.name_count else: fuzzy = False priority_func = self.prioritizer.keyword_count # Construct a `_match` function for either fuzzy or non-fuzzy matching # The match function returns a 2-tuple used for sorting the matches, # or None if the item doesn't match # Note: higher priority values mean more important, so use negative # signs to flip the direction of the tuple if fuzzy: regex = '.*?'.join(map(re.escape, text)) pat = re.compile('(%s)' % regex) def _match(item): if item.lower()[:len(text) + 1] in (text, text + ' '): # Exact match of first word in suggestion # This is to get exact alias matches to the top # E.g. for input `e`, 'Entries E' should be on top # (before e.g. `EndUsers EU`) return float('Infinity'), -1 r = pat.search(self.unescape_name(item.lower())) if r: return -len(r.group()), -r.start() else: match_end_limit = len(text) def _match(item): match_point = item.lower().find(text, 0, match_end_limit) if match_point >= 0: # Use negative infinity to force keywords to sort after all # fuzzy matches return -float('Infinity'), -match_point matches = [] for cand in collection: if isinstance(cand, _Candidate): item, prio, display_meta, synonyms, prio2, display = cand if display_meta is None: display_meta = meta syn_matches = (_match(x) for x in synonyms) # Nones need to be removed to avoid max() crashing in Python 3 syn_matches = [m for m in syn_matches if m] sort_key = max(syn_matches) if syn_matches else None else: item, display_meta, prio, prio2, display = cand, meta, 0, 0, cand sort_key = _match(cand) if sort_key: if display_meta and len(display_meta) > 50: # Truncate meta-text to 50 characters, if necessary display_meta = display_meta[:47] + u'...' # Lexical order of items in the collection, used for # tiebreaking items with the same match group length and start # position. Since we use *higher* priority to mean "more # important," we use -ord(c) to prioritize "aa" > "ab" and end # with 1 to prioritize shorter strings (ie "user" > "users"). # We first do a case-insensitive sort and then a # case-sensitive one as a tie breaker. # We also use the unescape_name to make sure quoted names have # the same priority as unquoted names. lexical_priority = ( tuple(0 if c in (' _') else -ord(c) for c in self.unescape_name(item.lower())) + (1, ) + tuple(c for c in item)) item = self.case(item) display = self.case(display) priority = (sort_key, type_priority, prio, priority_func(item), prio2, lexical_priority) item = decode(item) display_meta = decode(display_meta) display = decode(display) matches.append( Match(completion=Completion(text=item, start_position=-text_len, display_meta=display_meta, display=display), priority=priority)) return matches
def find_matches(text, collection): text = last_word(text, include='most_punctuations') for item in sorted(collection): if (item.startswith(text) or item.startswith(text.upper()) or item.startswith(text.lower())): yield Completion(item, -len(text))