def parse(self, base): if not self.ft or not base or base.endswith((' ', '\t')): return [] if self.ft not in _cache: try: _cache[self.ft] = self._get_candidates() except Exception: _cache[self.ft] = [] # token = base.split()[-1] token = to_bytes(base, get_encoding())[self.start_column():] token = to_unicode(token, 'utf-8') if len(token) < self.get_option('min_chars'): return [] candidates = [ dict(item) for item in _cache[self.ft] if item['abbr'].startswith(token.encode('utf-8')) ] offset = len(to_bytes(base[:-len(token)], get_encoding())) for c in candidates: c['word'] = c['abbr'] c['offset'] = offset return candidates
def parse(self, base): match = word.search(base) if not match: return [] identifier = match.group() if len(identifier) < self.get_option('min_chars'): return [] token_store.parse_buffers(identifier) res = set() for token, factor in token_store.search(identifier): if token == identifier: continue res.add((token, factor)) if len(res) >= LIMIT: break # NOTE: base class Completor expects the offset in nr of bytes in the # buffer's encoding (Completor.start_column will also be in nr of bytes) current_buf_encoding = get_current_buffer_encoding() offset = (len(to_bytes(base, current_buf_encoding)) - len(to_bytes(identifier, current_buf_encoding))) res = list(res) res.sort(key=lambda x: (x[1], x[0])) return [{ 'word': token, 'menu': '[ID]', 'offset': offset } for token, _ in res]
def gen_entry(self, base): gather_candidates = vim.Function('necovim#gather_candidates') binput_data = to_bytes(self.input_data, get_encoding()) bbase = to_bytes(base, get_encoding()) candidates = gather_candidates(binput_data, bbase) for entry in candidates: score = test_subseq(base, to_unicode(entry[b'word'], 'utf-8')) if score is None: continue yield entry, score
def on_complete(self, items): if self.is_comment_or_string() or '///' in self.input_data: return [] input_data = to_bytes(self.input_data, get_encoding()) completions = [] for item in items: if not item.startswith(b'MATCH'): continue parts = item.split(b',') if len(parts) < 6: continue name = parts[0][6:] kind = parts[4].lower() spec = b'mod' if kind == b'module' else b', '.join(parts[5:]) if spec.startswith(b'pub '): spec = spec[4:] if spec.startswith(input_data): continue completions.append({'word': name, 'menu': spec, 'dup': 0}) return completions
def parse(self, items): match = trigger.search(self.input_data) if match: _, prefix = match.groups() else: match = word_patten.search(self.input_data) if not match: return [] prefix = match.group() prefix = to_bytes(prefix) res = [] for item in items: if not item.startswith(b'COMPLETION:'): continue parts = [e.strip() for e in item.split(b':')] if len(parts) < 2 or not parts[1].startswith(prefix): continue data = {'word': parts[1], 'dup': 1, 'menu': ''} if len(parts) > 2: if parts[1] == b'Pattern': subparts = parts[2].split(b' ', 1) data['word'] = subparts[0] if len(subparts) > 1: data['menu'] = subparts[1] else: data['menu'] = b':'.join(parts[2:]) data['menu'] = sanitize(data['menu']) res.append(data) return res
def parse(self, base): if not self.ft or not base: return [] logger.info('start neoinclude parse: %s', base) input_data = to_bytes(self.input_data, get_encoding()) get_complete_position = vim.Function( 'neoinclude#file_include#get_complete_position') start_column = get_complete_position(input_data) if start_column == -1: return [] get_include_files = vim.Function( 'neoinclude#file_include#get_include_files') try: candidates = [{ 'word': item[b'word'], 'dup': 1, 'menu': b'[include]', 'kind': item[b'kind'] } for item in get_include_files(input_data)[:]] except TypeError as e: logger.exception(e) candidates = [] logger.info(candidates) return candidates
def parse(self, base): trigger = self.trigger_cache.get(self.ft) if not trigger or not trigger.search(base): return [] cursor = self.cursor try: func_name = vim.current.buffer.options['omnifunc'] logger.info('omnifunc: %s', func_name) if not func_name: return [] omnifunc = vim.Function(func_name) start = omnifunc(1, '') codepoint = self.start_column() logger.info('start: %s,%s', start, codepoint) if start < 0 or start != codepoint: return [] res = omnifunc(0, to_bytes(base, get_encoding())[codepoint:]) for i, e in enumerate(res): if not isinstance(e, dict): res[i] = {'word': e} res[i]['offset'] = codepoint return res except (vim.error, ValueError, KeyboardInterrupt): return [] finally: self.cursor = cursor
def on_complete(self, items): if self.is_comment_or_string() or '///' in self.input_data: return [] input_data = to_bytes(self.input_data, get_encoding()) completions = [] for item in items: if not item.startswith(b'MATCH'): continue parts = item.split(b',') if len(parts) < 6: continue name = parts[0][6:] kind = parts[4].lower() spec = b'mod' if kind == b'module' else b', '.join(parts[5:]) if spec.startswith(b'pub '): spec = spec[4:] if spec.startswith(input_data): continue completions.append({ 'word': name, 'menu': spec, 'dup': 0 }) return completions
def parse(self, base): trigger = self.trigger_cache.get(self.ft) if not trigger or not trigger.search(base): return [] try: func_name = vim.current.buffer.options['omnifunc'] if not func_name: return [] omnifunc = vim.Function(func_name) start = omnifunc(1, '') codepoint = self.start_column() if start < 0 or start != codepoint: return [] return omnifunc(0, to_bytes(base, get_encoding())[codepoint:]) except (vim.error, ValueError, KeyboardInterrupt): return []
def on_complete(self, items): """ :param items: List<bytes> """ match = trigger.search(self.input_data) if match: _, prefix = match.groups() else: match = word_patten.search(self.input_data) if not match: return [] prefix = match.group() prefix = to_bytes(prefix) res = [] for item in items: logger.info(item) if not item.startswith(b'COMPLETION:'): continue parts = [e.strip() for e in item.split(b':')] if len(parts) < 2: continue data = {'word': parts[1], 'dup': 1, 'menu': b''} if parts[1] == b'Pattern': data['word'] = get_word(parts[2]) data['menu'] = parts[2] else: data['menu'] = b':'.join(parts[2:]) func_sig = sanitize(data['menu']) data['abbr'] = data['word'] if self.disable_placeholders != 1 and data['menu']: data['word'] = strip_optional(data['menu']) else: data['word'] = strip_tag(data['word']) data['menu'] = func_sig # Show function signature in the preview window # data['info'] = func_sig if data['word'].startswith(prefix): res.append(data) return res
def parse(self, base): if not self.ft or not base: return [] logger.info('start necovim parse: %s', base) try: match = self.trigger.search(base) except TypeError as e: logger.exception(e) match = None if not match: logger.info('no matches') return [] kw = match.group() if len(kw) < self.get_option('min_chars'): return [] items = list( itertools.islice(itertools.chain(self.gen_entry(kw)), LIMIT)) items.sort(key=lambda x: x[1]) index = match.start() index = len(to_bytes(self.cursor_line[:index], get_encoding())) start_column = self.start_column() prefix = start_column - index if prefix < 0: prefix = 0 ret = [] for item, _ in items: ret.append({ 'word': item[b'word'][prefix:], 'abbr': item[b'word'], 'dub': 1, 'menu': '[vim]' }) return ret
def _gen_archive(self): if not vim.current.buffer.options['modified']: return '' content = '\n'.join(vim.current.buffer[:]) n = len(to_bytes(content, get_encoding())) return '\n'.join([self.filename, str(n), content])