def gather_candidates(self, context): line = context['position'][1] candidates = [] # lines above words = parse_buffer_pattern( reversed(getlines(self.vim, max([1, line - LINES_ABOVE]), line)), context['keyword_patterns']) candidates += [{'word': x, 'menu': 'A'} for x in words] # grab ':changes' command output p = re.compile(r'[\s\d]+') lines = set() for change_line in [x[p.search(x).span()[1]:] for x in self.vim.call( 'execute', 'changes').split('\n') if p.search(x)]: if change_line and change_line != '-invalid-': lines.add(change_line) words = parse_buffer_pattern(lines, context['keyword_patterns']) candidates += [{'word': x, 'menu': 'C'} for x in words] # lines below words = parse_buffer_pattern( getlines(self.vim, line, line + LINES_BELOW), context['keyword_patterns']) candidates += [{'word': x, 'menu': 'B'} for x in words] return candidates
def gather_candidates(self, context): line = context['position'][1] candidates = [] # lines above words = parse_buffer_pattern( reversed(getlines(self.vim, max([1, line - LINES_ABOVE]), line)), context['keyword_patterns'], context['complete_str']) candidates += [{'word': x, 'menu': 'A'} for x in words] # grab ':changes' command output p = re.compile(r'[\s\d]+') lines = set() for change_line in [x[p.search(x).span()[1]:] for x in self.vim.call( 'execute', 'changes').split('\n') if p.search(x)]: if change_line and change_line != '-invalid-': lines.add(change_line) words = parse_buffer_pattern(lines, context['keyword_patterns'], context['complete_str']) candidates += [{'word': x, 'menu': 'C'} for x in words] # lines below words = parse_buffer_pattern( getlines(self.vim, line, line + LINES_BELOW), context['keyword_patterns'], context['complete_str']) candidates += [{'word': x, 'menu': 'B'} for x in words] return candidates
def gather_candidates(self, context): # If enabled self.cgo, and matched self.cgo_complete_pattern pattern if self.cgo and self.cgo_complete_pattern.search(context['input']): return self.cgo_completion(getlines(self.vim)) result = self.get_cache(context, getlines(self.vim)) if result is None: bufname = self.vim.current.buffer.name if not os.path.isfile(bufname): bufname = self.vim.call('tempname') result = self.get_complete_result(context, getlines(self.vim), bufname) try: if result[1][0]['class'] == 'PANIC': error(self.vim, 'gocode panicked') return [] if self.sort_class: class_dict = OrderedDict((x, []) for x in self.sort_class) out = [] sep = ' ' for complete in result[1]: word = complete['name'] info = complete['type'] _class = complete['class'] abbr = str(word + sep + info).replace(' func', '', 1) kind = _class if _class == 'package' and self.package_dot: word += '.' if self.pointer and \ str(context['input'] [context['complete_position']:]) == '*': word = '*' + word candidates = dict(word=word, abbr=abbr, kind=kind, info=info, dup=1) if not self.sort_class or _class == 'import': out.append(candidates) elif _class in class_dict.keys(): class_dict[_class].append(candidates) if self.sort_class: for v in class_dict.values(): out += v return out except Exception: return []
def gather_candidates(self, context): # If enabled self.cgo, and matched self.cgo_complete_pattern pattern if self.cgo and self.cgo_complete_pattern.search(context["input"]): return self.cgo_completion(getlines(self.vim)) if self.cgo_only: return [] bufname = self.vim.current.buffer.name if not os.path.isfile(bufname): bufname = self.vim.call("tempname") result = self.get_complete_result(context, getlines(self.vim), bufname) try: if result[1][0]["class"] == "PANIC": self.print_error("gocode panicked") return [] if self.sort_class: class_dict = OrderedDict((x, []) for x in self.sort_class) out = [] sep = " " for complete in result[1]: word = complete["name"] info = complete["type"] _class = complete["class"] abbr = str(word + sep + info).replace(" func", "", 1) kind = _class if _class == "package" and self.package_dot: word += "." if (self.pointer and str( context["input"][context["complete_position"]:]) == "*"): word = "*" + word candidates = dict(word=word, abbr=abbr, kind=kind, info=info, dup=1) if not self.sort_class or _class == "import": out.append(candidates) elif _class in class_dict.keys(): class_dict[_class].append(candidates) if self.sort_class: for v in class_dict.values(): out += v return out except Exception: return []
def gather_candidates(self, context): # If enabled self.cgo, and matched self.cgo_complete_pattern pattern if self.cgo and self.cgo_complete_pattern.search(context['input']): return self.cgo_completion(getlines(self.vim)) result = self.get_cache(context, getlines(self.vim)) if result is None: bufname = self.vim.current.buffer.name if not os.path.isfile(bufname): bufname = self.vim.call('tempname') result = self.get_complete_result( context, getlines(self.vim), bufname) try: if result[1][0]['class'] == 'PANIC': error(self.vim, 'gocode panicked') return [] if self.sort_class: class_dict = OrderedDict((x, []) for x in self.sort_class) out = [] sep = ' ' for complete in result[1]: word = complete['name'] info = complete['type'] _class = complete['class'] abbr = str(word + sep + info).replace(' func', '', 1) kind = _class if _class == 'package' and self.package_dot: word += '.' if self.pointer and \ str(context['input'] [context['complete_position']:]) == '*': word = '*' + word candidates = dict( word=word, abbr=abbr, kind=kind, info=info, dup=1 ) if not self.sort_class or _class == 'import': out.append(candidates) elif _class in class_dict.keys(): class_dict[_class].append(candidates) if self.sort_class: for v in class_dict.values(): out += v return out except Exception: return []
def __getitem__(self, idx): """Get a buffer line or slice by integer index. Indexes may be negative to specify positions from the end of the buffer. For example, -1 is the last line, -2 is the line before that and so on. When retrieving slices, omiting indexes(eg: `buffer[:]`) will bring the whole buffer. """ if not isinstance(idx, slice): i = self.adjust_index(idx) return getlines(self.vim, i, i) start = self.adjust_index(idx.start, 0) end = self.adjust_index(idx.stop, -1) return getlines(self.vim, start, end)
def gather_candidates(self, context): return [{'word': x} for x in parse_buffer_pattern( getlines(self.vim), r'(?<=' + re.escape(self._prefix) + r')\w+' ) if x != context['complete_str']]
def gather_candidates(self, context): try: post_data = { "Row": context['position'][1], "Col": context['complete_position'] - 1, "Line": context['input'], "FilePath": self.filePath, "Source": '\n'.join(getlines(self.vim)), "Init": 'false' } self.util.send(json.dumps(post_data)) messages = self.util.read() return [{ "word": json_data['word'], "info": '\n'.join( functools.reduce(lambda a, b: a + b, json_data['info'])) } for json_data in [json.loads(s) for s in messages]] except Exception as e: return [str(e)]
def gather_candidates(self, context): current = context['complete_str'] complete_position = context['complete_position'] line = context['position'][1] line_text = getlines(self.vim, line, line)[0] candidates = [] if complete_position != 0 and line_text[complete_position - 1] == '-': if not self.previous_cmdlet_suggestions: return [] else: for cmdlet in self.previous_cmdlet_suggestions: if re.findall('\\b{}\\b'.format(cmdlet), line_text): params = self.cmdlets_to_params[cmdlet] candidates = [ p for p in params if current.lower() in p.lower() ] else: candidates = [ c for c in self.cmdlets if current.lower() in c.lower() ] self.previous_cmdlet_suggestions = candidates if not candidates: return [] else: out = [] for c in candidates: out.append(dict(word=c, abbr=c, info='', dup=1)) return out
def gather_candidates(self, context): try: if self.standby == False: return ['=== can not initialize deopletefs ==='] else: post_data = { "Row": context['position'][1], "Col": context['complete_position'] - 1, "Line": context['input'], "FilePath": self.filePath, "Source": '\n'.join(getlines(self.vim)), "Init": 'false' } self.util.send(json.dumps(post_data)) s = (self.util.read())[0] s = base64.b64decode(s) s = s.decode(encoding='utf-8') lst = s.split("\n") return [{ "word": json_data['word'], "info": '\n'.join( functools.reduce(lambda a, b: a + b, json_data['info'])) } for json_data in [json.loads(s) for s in lst]] except Exception as e: return [str(e)]
def gather_candidates(self, context): if not self.start_server(): return [] line = context['position'][1] - 1 column = context['complete_position'] text = '\n'.join(getlines(self.vim)).encode(self.encoding) filename = context['bufpath'] workspace = self.find_workspace_directory(context['bufpath']) result = self.client.suggest(text=text, line=line, column=column, filename=filename, workspace=workspace) if result['status'] != 'ok': self.print_error(result) return [] output = result['suggestions'] return [ { 'word': cand['insert'], 'kind': cand['kind'], 'dup': 1, 'abbr': self.build_abbr(cand), # in popup menu instead of 'word' 'info': cand['label'], # in preview window 'menu': cand['detail'], # after 'word' or 'abbr' } for cand in result['suggestions'] ]
def on_event(self, context): if context['event'] == 'Init': start = time.time() self.vim.command("echo '*** deopletefs initializing... ***'") self.util.read() elapsed_time = time.time() - start self.vim.command("echo 'finish initialize! ( time : " + str(round(elapsed_time, 6)) + " s )'") post_data = { "Row": -9999 # dummy row , "Col": 1, "Line": '', "FilePath": self.filePath, "Source": '\n'.join(getlines(self.vim)), "Init": 'real_init' } self.util.send(json.dumps(post_data)) elif context['event'] == 'BufWritePost': pass else: pass
def gather_candidates(self, context: UserContext) -> Candidates: line = context['position'][1] candidates: Candidates = [] # lines above words = parse_buffer_pattern( reversed( getlines(self.vim, max([1, line - self.vars['range_above']]), line)), context['keyword_pattern'], ) candidates += [{ 'word': x, 'menu': self.vars['mark_above'] } for x in words] # grab ':changes' command output p = re.compile(r'[\s\d]+') lines = set() for change_line in [ x[p.search(x).span()[1]:] # type: ignore for x in self.vim.call('execute', 'changes').split('\n')[2:] if p.search(x) ]: if change_line and change_line != '-invalid-': lines.add(change_line) words = parse_buffer_pattern(lines, context['keyword_pattern']) candidates += [{ 'word': x, 'menu': self.vars['mark_changes'] } for x in words] # lines below words = parse_buffer_pattern( getlines(self.vim, line, line + self.vars['range_below']), context['keyword_pattern'], ) candidates += [{ 'word': x, 'menu': self.vars['mark_below'] } for x in words] return candidates
def gather_candidates(self, context): line = context['position'][1] candidates = [] # lines above words = parse_buffer_pattern( reversed( getlines( self.vim, max([1, line - self.vars['range_above']]), line ) ), context['keyword_pattern'], ) candidates += [ {'word': x, 'menu': self.vars['mark_above']} for x in words ] # grab ':changes' command output p = re.compile(r'[\s\d]+') lines = set() for change_line in [ x[p.search(x).span()[1]:] for x in self.vim.call('execute', 'changes').split('\n')[2:] if p.search(x) ]: if change_line and change_line != '-invalid-': lines.add(change_line) words = parse_buffer_pattern(lines, context['keyword_pattern']) candidates += [ {'word': x, 'menu': self.vars['mark_changes']} for x in words ] # lines below words = parse_buffer_pattern( getlines(self.vim, line, line + self.vars['range_below']), context['keyword_pattern'], ) candidates += [ {'word': x, 'menu': self.vars['mark_below']} for x in words ] return candidates
def __iter__(self): """Iterate lines of a buffer. This will retrieve all lines locally before iteration starts. This approach is used because for most cases, the gain is much greater by minimizing the number of API calls by transfering all data needed to work. """ lines = getlines(self.vim) for line in lines: yield line
def _get_response(self, context): limit = self.get_var('line_limit') _, line, col, _ = self.vim.call('getpos', '.') last_line = self.vim.call('line', '$') before_line = max(1, line - limit) before_lines = getlines(self.vim, before_line, line) before_lines[-1] = before_lines[-1][:col - 1] after_line = min(last_line, line + limit) after_lines = getlines(self.vim, line, after_line) after_lines[0] = after_lines[0][col:] return self._request( 'Autocomplete', filename=context['bufpath'], before='\n'.join(before_lines), after='\n'.join(after_lines), region_includes_beginning=(before_line == 1), region_includes_end=(after_line == last_line), max_num_results=self.get_var('max_num_results'), )
def on_event(self, context: UserContext) -> None: self._cache = {} start = max([1, context['position'][1] - LINES_MAX]) linenr = start for line in getlines(self.vim, start, start + LINES_MAX): for m in re.finditer(context['keyword_pattern'], line): k = m.group(0) if k not in self._cache: self._cache[k] = set() self._cache[k].add(linenr) linenr += 1
def on_event(self, context): line = context['position'][1] lines = getlines(self.vim, max([1, line - LINES_ABOVE]), line + LINES_BELOW) self._cache = {} for m in re.finditer(context['keyword_pattern'], '\n'.join(lines)): k = m.group(0) if k in self._cache: self._cache[k] += 1 else: self._cache[k] = 1
def __make_cache(self, context, bufnr): try: self.__buffers[bufnr] = { 'bufnr': bufnr, 'filetype': self.vim.current.buffer.options['filetype'], 'candidates': parse_buffer_pattern( getlines(self.vim), context['keyword_patterns'], context['complete_str']) } except UnicodeDecodeError: return []
def __make_cache(self, context, bufnr): try: self.__buffers[bufnr] = { 'bufnr': bufnr, 'filetype': context['filetype'], 'candidates': parse_buffer_pattern( getlines(self.vim), context['keyword_patterns'], context['complete_str']) } except UnicodeDecodeError: return []
def __make_cache(self, context): try: if (context['bufnr'] in self.__buffers and context['event'] != 'BufWritePost' and len(self.vim.current.buffer) > self.__max_lines): line = context['position'][1] buffer = self.__buffers[context['bufnr']] buffer['candidates'] += parse_buffer_pattern( getlines(self.vim, max([1, line-500]), line+500), context['keyword_patterns'], context['complete_str']) buffer['candidates'] = list(set(buffer['candidates'])) else: self.__buffers[context['bufnr']] = { 'filetype': context['filetype'], 'candidates': parse_buffer_pattern( getlines(self.vim), context['keyword_patterns'], context['complete_str']) } except UnicodeDecodeError: return []
def __make_cache(self, context): try: if (context['bufnr'] in self.__buffers and context['event'] != 'BufWritePost' and len(self.vim.current.buffer) > self.__max_lines): line = context['position'][1] buffer = self.__buffers[context['bufnr']] buffer['candidates'] += parse_buffer_pattern( getlines(self.vim, max([1, line - 500]), line + 500), context['keyword_patterns'], context['complete_str']) buffer['candidates'] = list(set(buffer['candidates'])) else: self.__buffers[context['bufnr']] = { 'filetype': context['filetype'], 'candidates': parse_buffer_pattern(getlines(self.vim), context['keyword_patterns'], context['complete_str']) } except UnicodeDecodeError: return []
def gather_candidates(self, context): LINE_LIMIT = 1000 _, line, col, _ = context['position'] last_line = self.vim.call('line', '$') before_line = max(1, line - LINE_LIMIT) before_lines = getlines(self.vim, before_line, line) before_lines[-1] = before_lines[-1][:col - 1] after_line = min(last_line, line + LINE_LIMIT) after_lines = getlines(self.vim, line, after_line) after_lines[0] = after_lines[0][col:] response = self.request( 'Autocomplete', filename=context['bufpath'], before='\n'.join(before_lines), after='\n'.join(after_lines), region_includes_beginning=(before_line == 1), region_includes_end=(after_line == last_line), max_num_results=10, ) if response is None: return [] if response['promotional_message']: self.print(' '.join(response['promotional_message'])) candidates = [] self.debug(repr(response)) for result in response['results']: candidate = {} word = result['result'] prefix_to_substitute = result['prefix_to_substitute'] candidate['word'] = word if word.endswith(prefix_to_substitute): candidate['word'] = word[:len(word) - len(prefix_to_substitute)] candidate['abbr'] = word candidates.append(candidate) self.debug(repr(candidates)) return candidates
def __make_cache(self, context, bufnr): try: self.__buffers[bufnr] = { 'bufnr': bufnr, 'filetype': self.vim.current.buffer.options['filetype'], 'candidates': [ {'word': x} for x in sorted(parse_buffer_pattern(getlines(self.vim), context['keyword_patterns']), key=str.lower) ] } except UnicodeDecodeError: return []
def _make_cache(self, context, bufnr): try: self._buffers[bufnr] = { 'bufnr': bufnr, 'filetype': self.vim.eval('&l:filetype'), 'candidates': [{ 'word': x } for x in sorted(parse_buffer_pattern( getlines(self.vim), context['keyword_patterns']), key=str.lower)] } except UnicodeDecodeError: return []
def on_init(self, context): ### input pattern dotHints = [r"(\(|<|[a-zA-Z]|\"|\[)*(?<=(\)|>|[a-zA-Z0-9]|\"|\]))\."] oneWordHints = [ r"^[a-zA-Z]$", "\s*[a-zA-Z]$", "typeof\<[a-zA-Z]$", "(\(\))[a-zA-Z]$", "(\<|\>)[a-zA-Z]$", "(\[|\])[a-zA-Z]$" ] attributeHints = [r"\[<[a-zA-Z]*"] self.input_pattern = '|'.join(dotHints + oneWordHints + attributeHints) ### initialize of deopletefs self.standby = False self.filePath = expand( self.vim.eval("substitute( expand('%:p') , '\#', '\\#' , 'g' )")) fsc_path = expand( re.split('rplugin', __file__)[0] + expand('bin/deopletefs.exe')) post_data = { "Row": -9999 # dummy row , "Col": 1, "Line": '', "FilePath": self.filePath, "Source": '\n'.join(getlines(self.vim)), "Init": 'true' } self.util = Util(fsc_path, 20) self.util.send(json.dumps(post_data)) start = time.time() self.vim.command("echo '*** deopletefs initializing... ***'") if str(self.util.read()) != '': self.standby = True elapsed_time = time.time() - start self.vim.command("echo '*** finish initialize! *** ( time : " + str(round(elapsed_time, 6)) + " s )'") else: elapsed_time = time.time() - start self.vim.command( "echo '*** Sorry! Please Re-open file! *** ( time : " + str(round(elapsed_time, 6)) + " s )'")
def gather_candidates(self, context): if not self._jedi: return [] python_path = None if 'deoplete#sources#jedi#python_path' in context['vars']: python_path = context['vars'][ 'deoplete#sources#jedi#python_path'] if python_path != self._python_path: self.set_env(python_path) line = context['position'][1] col = bytepos2charpos( context['encoding'], context['input'], context['complete_position']) buf = self.vim.current.buffer filename = str(buf.name) # Only use source if buffer is modified, to skip transferring, joining, # and splitting the buffer lines unnecessarily. modified = buf.options['modified'] if not modified and os.path.exists(filename): source = None else: source = '\n'.join(getlines(self.vim)) if (line != self.vim.call('line', '.') or context['complete_position'] >= self.vim.call('col', '$')): return [] self.debug('Line: %r, Col: %r, Filename: %r, modified: %r', line, col, filename, modified) script = self.get_script(source, line, col, filename, environment=self._env) try: completions = self.get_completions(script) except BaseException: if not self.ignore_errors: raise return [] return self.finalize_completions(completions)
def gather_candidates(self, context): if not self.executable_clang: return [] line = context['position'][1] column = context['complete_position'] + 1 lang = 'c++' if context['filetype'] == 'cpp' else 'c' bufnr = int(context['bufnr']) buf = '\n'.join(getlines(self.vim)).encode(self.encoding) if self.completing_word in self.cache: return self.cache[self.completing_word] args = [ self.vars['clang_binary'], '-x', lang, '-fsyntax-only', '-Xclang', '-code-completion-macros', '-Xclang', '-code-completion-at=-:{}:{}'.format(line, column), '-', '-I', os.path.dirname(context['bufpath']), ] args += self._args try: proc = subprocess.Popen(args=args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, cwd=self.run_dir) result, errs = proc.communicate(buf, timeout=10) result = result.decode(self.encoding) except subprocess.TimeoutExpired as e: proc.kill() rv = [] else: rv = self._parse_lines(result.splitlines()) finally: self.cache[self.completing_word] = rv return rv
def _make_cache(self, context: UserContext) -> None: # Bufsize check size = self.vim.call('line2byte', self.vim.call('line', '$') + 1) - 1 if size > self._limit: return try: self._buffers[context['bufnr']] = { 'bufnr': context['bufnr'], 'filetype': self.get_buf_option('filetype'), 'candidates': [ {'word': x} for x in sorted(parse_buffer_pattern(getlines(self.vim), context['keyword_pattern']), key=str.lower) ] } except UnicodeDecodeError: return
def _make_cache(self, context): # Bufsize check size = self.vim.call('line2byte', self.vim.call('line', '$') + 1) - 1 if size > self._limit: return try: self._buffers[context['bufnr']] = { 'bufnr': context['bufnr'], 'filetype': self.get_buf_option('filetype'), 'candidates': [ {'word': x} for x in sorted(parse_buffer_pattern(getlines(self.vim), context['keyword_pattern']), key=str.lower) ] } except UnicodeDecodeError: return []
def gather_candidates(self, context): if not self.executable_clang: return [] if not self.run_dir: self.run_dir = context['cwd'] line = context['position'][1] column = context['complete_position'] + 1 lang = lang_for_ft.get(context['filetype'], 'c') buf = '\n'.join(getlines(self.vim)).encode(self.encoding) args = [ self.get_var('clang_binary'), '-x', lang, '-fsyntax-only', '-Xclang', '-code-completion-macros', '-Xclang', '-code-completion-at=-:{}:{}'.format(line, column), '-', '-I', os.path.dirname(context['bufpath']), ] args += self._args try: proc = subprocess.Popen(args=args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, cwd=self.run_dir) result, errs = proc.communicate(buf, timeout=10) result = result.decode(self.encoding) except subprocess.TimeoutExpired as e: proc.kill() return [] return self._parse_lines(result.splitlines())
def _gather_candidates(self, context): line = context['position'][1] lines = getlines(self.vim, 0, line) content = '\n'.join(lines) path = get_query_path(content, context['position']) if len(path) > 0 and path[0].lower() is not 'query' and path[0].lower( ) is not 'mutation': path.insert(0, 'query') schema = self.vim.vars.get('graphiql#interface#current_schema') types = {} if schema is not None: types = json.loads(schema) current_type = get_type_from_path(path, types) results = [] for k, v in types[current_type].items(): results.append({'word': k, 'kind': v}) return results
def _make_cache(self, context): # gather databases file_exists = os.path.isfile(CACHE_PICKLE) file_created_ago = 0 if file_exists: file_created_ago = ( datetime.datetime.now() - datetime.datetime.fromtimestamp(os.stat(CACHE_PICKLE).st_ctime) ).days if file_exists and file_created_ago < 1: self.vim.command("echo 'file already exists'") with open(CACHE_PICKLE, "rb") as f: self._cache = pickle.load(f) else: self._cache_db() # th = threading.Thread(target=self._cache_db) # th.start() # if not file_exists: # self.vim.command("echo 'waiting to write the file'") # th.join() # # gather aliases alias_hits = parse_buffer_pattern( getlines(self.vim), r"(FROM|JOIN|from|join)\s+(\w+)[.](\w+)([.](\w*))?\s+(\w*)" ) # clear existing aliases for table in self._cache["tables"]: self._cache["tables"][table]["aliases"] = [] for alias_hit in alias_hits: table = alias_hit[4].upper() or alias_hit[2].upper() alias = alias_hit[5].upper() if table not in self._cache["tables"]: continue if alias not in self._cache["tables"][table]["aliases"]: self._cache["tables"][table]["aliases"].append(alias)
def _make_cache(self, context): # Bufsize check size = self.vim.call('line2byte', self.vim.call('line', '$') + 1) - 1 if size > self._limit: return keyword_pattern = self.vim.call( 'deoplete#util#get_keyword_pattern', context['filetype'], self.keyword_patterns) try: self._buffers[context['bufnr']] = { 'bufnr': context['bufnr'], 'filetype': self.vim.eval('&l:filetype'), 'candidates': [ {'word': x} for x in sorted(parse_buffer_pattern(getlines(self.vim), keyword_pattern), key=str.lower) ] } except UnicodeDecodeError: return []
def _make_cache(self, context): # Bufsize check size = self.vim.call('line2byte', self.vim.call('line', '$') + 1) - 1 if size > self._limit: return keyword_pattern = self.vim.call('deoplete#util#get_keyword_pattern', context['filetype'], self.keyword_patterns) try: self._buffers[context['bufnr']] = { 'bufnr': context['bufnr'], 'filetype': self.vim.eval('&l:filetype'), 'candidates': [{ 'word': x } for x in sorted(parse_buffer_pattern( getlines(self.vim), keyword_pattern), key=str.lower)] } except UnicodeDecodeError: return []
def on_init(self, context): self.filePath = expand( self.vim.eval( "substitute( expand('%:p:r') . '_deoplete-fsharp_temporary_file.fsx' , '\#', '\\#' , 'g' )" )) fsc_path = expand( re.split('rplugin', __file__)[0] + expand('ftplugin/bin_deopletefs/deopletefs.exe')) post_data = { "Row": -9999 # dummy row , "Col": -9999 # dummy col , "Line": '' # dummy line , "FilePath": self.filePath, "Source": '\n'.join(getlines(self.vim)), "Init": 'dummy_init' } self.util = Util(fsc_path, 20, json.dumps(post_data)) self.util.start()
def gather_candidates(self, context): refresh_boilerplate = False if not self.boilerplate: bp = cache.retrieve(('boilerplate~', )) if bp: self.boilerplate = bp.completions[:] refresh_boilerplate = True else: # This should be the first time any completion happened, so # `wait` will be True. worker.work_queue.put( (('boilerplate~', ), [], '', 1, 0, '', None)) line = context['position'][1] col = context['complete_position'] buf = self.vim.current.buffer src = getlines(self.vim) extra_modules = [] cache_key = None cached = None refresh = True wait = False # Inclusion filters for the results filters = [] if re.match('^\s*(from|import)\s+', context['input']) \ and not re.match('^\s*from\s+\S+\s+', context['input']): # If starting an import, only show module results filters.append('module') cache_key, extra_modules = cache.cache_context(buf.name, context, src, self.extra_path) cached = cache.retrieve(cache_key) if cached and not cached.refresh: modules = cached.modules if all([filename in modules for filename in extra_modules]) \ and all([utils.file_mtime(filename) == mtime for filename, mtime in modules.items()]): # The cache is still valid refresh = False if cache_key and (cache_key[-1] in ('dot', 'vars', 'import', 'import~') or (cached and cache_key[-1] == 'package' and not len(cached.modules))): # Always refresh scoped variables and module imports. Additionally # refresh cached items that did not have associated module files. refresh = True # Extra options to pass to the server. options = { 'cwd': context.get('cwd'), 'extra_path': self.extra_path, 'runtimepath': context.get('runtimepath'), } if (not cached or refresh) and cache_key and cache_key[-1] == 'package': # Create a synthetic completion for a module import as a fallback. synthetic_src = ['import {0}; {0}.'.format(cache_key[0])] options.update({ 'synthetic': { 'src': synthetic_src, 'line': 1, 'col': len(synthetic_src[0]), } }) if not cached: wait = True # Note: This waits a very short amount of time to give the server or # cache a chance to reply. If there's no reply during this period, # empty results are returned and we defer to deoplete's async refresh. # The current requests's async status is tracked in `_async_keys`. # If the async cache result is older than 5 seconds, the completion # request goes back to the default behavior of attempting to refresh as # needed by the `refresh` and `wait` variables above. self.debug('Key: %r, Refresh: %r, Wait: %r, Async: %r', cache_key, refresh, wait, cache_key in self._async_keys) context['is_async'] = cache_key in self._async_keys if context['is_async']: if not cached: self.debug('[async] waiting for completions: %r', cache_key) return [] else: self._async_keys.remove(cache_key) context['is_async'] = False if time.time() - cached.time < 5: self.debug('[async] finished: %r', cache_key) return self.finalize_cached(cache_key, filters, cached) else: self.debug('[async] outdated: %r', cache_key) if cache_key and (not cached or refresh): n = time.time() wait_complete = False worker.work_queue.put( (cache_key, extra_modules, '\n'.join(src), line, col, str(buf.name), options)) while wait and time.time() - n < 0.25: cached = cache.retrieve(cache_key) if cached and cached.time >= n: self.debug('Got updated cache, stopped waiting.') wait_complete = True break time.sleep(0.01) if wait and not wait_complete: self._async_keys.add(cache_key) context['is_async'] = True self.debug('[async] deferred: %r', cache_key) return [] if refresh_boilerplate: # This should only occur the first time completions happen. # Refresh the boilerplate to ensure it's always up to date (just in # case). self.debug('Refreshing boilerplate') worker.work_queue.put((('boilerplate~', ), [], '', 1, 0, '', None)) return self.finalize_cached(cache_key, filters, cached)
def set_cache(self): self.cache = getlines(self.vim)
def gather_candidates(self, context): refresh_boilerplate = False if not self.boilerplate: bp = cache.retrieve(('boilerplate~',)) if bp: self.boilerplate = bp.completions[:] refresh_boilerplate = True else: # This should be the first time any completion happened, so # `wait` will be True. worker.work_queue.put((('boilerplate~',), [], '', 1, 0, '', None)) line = context['position'][1] col = context['complete_position'] buf = self.vim.current.buffer src = getlines(self.vim) extra_modules = [] cache_key = None cached = None refresh = True wait = False # Inclusion filters for the results filters = [] if re.match('^\s*(from|import)\s+', context['input']) \ and not re.match('^\s*from\s+\S+\s+', context['input']): # If starting an import, only show module results filters.append('module') cache_key, extra_modules = cache.cache_context(buf.name, context, src, self.extra_path) cached = cache.retrieve(cache_key) if cached and not cached.refresh: modules = cached.modules if all([filename in modules for filename in extra_modules]) \ and all([utils.file_mtime(filename) == mtime for filename, mtime in modules.items()]): # The cache is still valid refresh = False if cache_key and (cache_key[-1] in ('dot', 'vars', 'import', 'import~') or (cached and cache_key[-1] == 'package' and not len(cached.modules))): # Always refresh scoped variables and module imports. Additionally # refresh cached items that did not have associated module files. refresh = True # Extra options to pass to the server. options = { 'cwd': context.get('cwd'), 'extra_path': self.extra_path, 'runtimepath': context.get('runtimepath'), } if (not cached or refresh) and cache_key and cache_key[-1] == 'package': # Create a synthetic completion for a module import as a fallback. synthetic_src = ['import {0}; {0}.'.format(cache_key[0])] options.update({ 'synthetic': { 'src': synthetic_src, 'line': 1, 'col': len(synthetic_src[0]), } }) if not cached: wait = True # Note: This waits a very short amount of time to give the server or # cache a chance to reply. If there's no reply during this period, # empty results are returned and we defer to deoplete's async refresh. # The current requests's async status is tracked in `_async_keys`. # If the async cache result is older than 5 seconds, the completion # request goes back to the default behavior of attempting to refresh as # needed by the `refresh` and `wait` variables above. self.debug('Key: %r, Refresh: %r, Wait: %r, Async: %r', cache_key, refresh, wait, cache_key in self._async_keys) context['is_async'] = cache_key in self._async_keys if context['is_async']: if not cached: self.debug('[async] waiting for completions: %r', cache_key) return [] else: self._async_keys.remove(cache_key) context['is_async'] = False if time.time() - cached.time < 5: self.debug('[async] finished: %r', cache_key) return self.finalize_cached(cache_key, filters, cached) else: self.debug('[async] outdated: %r', cache_key) if cache_key and (not cached or refresh): n = time.time() wait_complete = False worker.work_queue.put((cache_key, extra_modules, '\n'.join(src), line, col, str(buf.name), options)) while wait and time.time() - n < 0.25: cached = cache.retrieve(cache_key) if cached and cached.time >= n: self.debug('Got updated cache, stopped waiting.') wait_complete = True break time.sleep(0.01) if wait and not wait_complete: self._async_keys.add(cache_key) context['is_async'] = True self.debug('[async] deferred: %r', cache_key) return [] if refresh_boilerplate: # This should only occur the first time completions happen. # Refresh the boilerplate to ensure it's always up to date (just in # case). self.debug('Refreshing boilerplate') worker.work_queue.put((('boilerplate~',), [], '', 1, 0, '', None)) return self.finalize_cached(cache_key, filters, cached)
def gather_candidates(self, context): self.darwin_version = 0 if 'clang2_include' in context: return list(self.gather_includes(context)) input = context['input'] filetype = context.get('filetype', '') complete_str = context['complete_str'] min_length = context['vars'].get( 'deoplete#auto_complete_start_length', 2) pattern = context.get('clang2_pattern') length_exemption = pattern and re.search(pattern + r'$', input) if not length_exemption and len(complete_str) < min_length: # Since the source doesn't have a global pattern, its our # responsibility to honor the user settings. return [] pos = context['complete_position'] line = context['position'][1] last_input = self.last.get('input', '') same_line = self.last.get('line', 0) == line # Completions from clang will include all results that are relevant to # a delimiter position--not just the current word completion. This # means the results can be reused to drastically reduce the completion # time. if same_line and self.last.get('col', 0) == pos: self.debug('Reusing previous completions') return self.last.get('completions', []) # Additionally, if the completion is happeing in a position that will # result in completions for the current scope, reuse it. scope_pos = re.search(r'(?:\s+|(?:[\[\(:])\s*|@)$', input) # Check objc where spaces can be significant. scope_reuse = (filetype not in ('objc', 'objcpp') or (input != last_input and input.rstrip() == last_input.rstrip())) if scope_reuse and same_line and scope_pos and self.scope_completions: self.debug('Reusing scope completions') return self.scope_completions buf = self.nvim.current.buffer # src = buf[:] src = getlines(self.nvim) max_lines = context['vars'].get( 'deoplete#sources#clang#preproc_max_lines', 50) if max_lines: for i, l in enumerate(reversed(src[max(0, line-max_lines):line])): l = l.lstrip() if l.startswith('#'): l = l.lstrip('# ') if l.startswith('ifdef'): self.debug('Ignoring preproc line %d', line - i) src[line - i - 1] = '' break elif l.startswith('endif'): self.debug('Stopped preproc search on line %d', line - i) break code_flags = [ '-code-completion-macros', '-code-completion-patterns', # '-code-completion-brief-comments', - Not very useful atm. '-code-completion-at=-:%d:%d' % (line, pos+1), ] cmd, flags = self.build_flags(context) cmd, flags = self.generate_pch(context, cmd, flags) completions = [] cmd = (['-cc1', '-fsyntax-only'] + cmd + code_flags + flags + ['-O0', '-w'] + ['-']) pattern = '' for item in self.call_clang(src, cmd, cwd=os.path.dirname(buf.name)): if item.startswith('COMPLETION:'): if pattern: completions.append(self.parse_completion(pattern)) pattern = '' item = item[11:].strip() if item.startswith('Pattern :'): pattern = item continue completions.append(self.parse_completion(item)) elif pattern: pattern += item if pattern: completions.append(self.parse_completion(pattern)) self.last = { 'input': input, 'line': line, 'col': pos, 'completions': completions, } if scope_pos: self.scope_completions = completions return completions