def parse(self, base): if not self.ft or not base or base.endswith((' ', '\t')): return [] if self.ft not in _cache: try: _cache[self.ft] = self._get_candidates() except Exception: _cache[self.ft] = [] # token = base.split()[-1] token = to_bytes(base, get_encoding())[self.start_column():] token = to_unicode(token, 'utf-8') if len(token) < self.get_option('min_chars'): return [] candidates = [ dict(item) for item in _cache[self.ft] if item['abbr'].startswith(token.encode('utf-8')) ] offset = len(to_bytes(base[:-len(token)], get_encoding())) for c in candidates: c['word'] = c['abbr'] c['offset'] = offset return candidates
def gen_entry(self, base): gather_candidates = vim.Function('necovim#gather_candidates') binput_data = to_bytes(self.input_data, get_encoding()) bbase = to_bytes(base, get_encoding()) candidates = gather_candidates(binput_data, bbase) for entry in candidates: score = test_subseq(base, to_unicode(entry[b'word'], 'utf-8')) if score is None: continue yield entry, score
def on_complete(self, items): if self.is_comment_or_string() or '///' in self.input_data: return [] input_data = to_bytes(self.input_data, get_encoding()) completions = [] for item in items: if not item.startswith(b'MATCH'): continue parts = item.split(b',') if len(parts) < 6: continue name = parts[0][6:] kind = parts[4].lower() spec = b'mod' if kind == b'module' else b', '.join(parts[5:]) if spec.startswith(b'pub '): spec = spec[4:] if spec.startswith(input_data): continue completions.append({'word': name, 'menu': spec, 'dup': 0}) return completions
def parse(self, base): if not self.ft or not base: return [] logger.info('start neoinclude parse: %s', base) input_data = to_bytes(self.input_data, get_encoding()) get_complete_position = vim.Function( 'neoinclude#file_include#get_complete_position') start_column = get_complete_position(input_data) if start_column == -1: return [] get_include_files = vim.Function( 'neoinclude#file_include#get_include_files') try: candidates = [{ 'word': item[b'word'], 'dup': 1, 'menu': b'[include]', 'kind': item[b'kind'] } for item in get_include_files(input_data)[:]] except TypeError as e: logger.exception(e) candidates = [] logger.info(candidates) return candidates
def parse(self, base): trigger = self.trigger_cache.get(self.ft) if not trigger or not trigger.search(base): return [] cursor = self.cursor try: func_name = vim.current.buffer.options['omnifunc'] logger.info('omnifunc: %s', func_name) if not func_name: return [] omnifunc = vim.Function(func_name) start = omnifunc(1, '') codepoint = self.start_column() logger.info('start: %s,%s', start, codepoint) if start < 0 or start != codepoint: return [] res = omnifunc(0, to_bytes(base, get_encoding())[codepoint:]) for i, e in enumerate(res): if not isinstance(e, dict): res[i] = {'word': e} res[i]['offset'] = codepoint return res except (vim.error, ValueError, KeyboardInterrupt): return [] finally: self.cursor = cursor
def on_complete(self, items): if self.is_comment_or_string() or '///' in self.input_data: return [] input_data = to_bytes(self.input_data, get_encoding()) completions = [] for item in items: if not item.startswith(b'MATCH'): continue parts = item.split(b',') if len(parts) < 6: continue name = parts[0][6:] kind = parts[4].lower() spec = b'mod' if kind == b'module' else b', '.join(parts[5:]) if spec.startswith(b'pub '): spec = spec[4:] if spec.startswith(input_data): continue completions.append({ 'word': name, 'menu': spec, 'dup': 0 }) return completions
def _bytes(data): from completor import get_encoding if isinstance(data, bytes): return data if isinstance(data, str): return data.encode(get_encoding()) if isinstance(data, list): for i, e in enumerate(data): data[i] = _bytes(e) elif isinstance(data, dict): for k in list(data.keys()): data[_bytes(k)] = _bytes(data.pop(k)) return data
def parse(self, base): trigger = self.trigger_cache.get(self.ft) if not trigger or not trigger.search(base): return [] try: func_name = vim.current.buffer.options['omnifunc'] if not func_name: return [] omnifunc = vim.Function(func_name) start = omnifunc(1, '') codepoint = self.start_column() if start < 0 or start != codepoint: return [] return omnifunc(0, to_bytes(base, get_encoding())[codepoint:]) except (vim.error, ValueError, KeyboardInterrupt): return []
def parse(self, base): if not self.ft or not base: return [] logger.info('start necovim parse: %s', base) try: match = self.trigger.search(base) except TypeError as e: logger.exception(e) match = None if not match: logger.info('no matches') return [] kw = match.group() if len(kw) < self.get_option('min_chars'): return [] items = list( itertools.islice(itertools.chain(self.gen_entry(kw)), LIMIT)) items.sort(key=lambda x: x[1]) index = match.start() index = len(to_bytes(self.cursor_line[:index], get_encoding())) start_column = self.start_column() prefix = start_column - index if prefix < 0: prefix = 0 ret = [] for item, _ in items: ret.append({ 'word': item[b'word'][prefix:], 'abbr': item[b'word'], 'dub': 1, 'menu': '[vim]' }) return ret
def _gen_archive(self): if not vim.current.buffer.options['modified']: return '' content = '\n'.join(vim.current.buffer[:]) n = len(to_bytes(content, get_encoding())) return '\n'.join([self.filename, str(n), content])