def test_uniq_list_dict(): assert util.uniq_list_dict([ {'abbr': 'word', 'word': 'foobar'}, {'word': 'bar'}, {'word': 'foobar', 'abbr': 'word'}, {'word': 'baz'}, ]) == [ {'word': 'foobar', 'abbr': 'word'}, {'word': 'bar'}, {'word': 'baz'} ]
def _update_result(self, result, context_input, next_input): source = result['source'] # Gather async results if result['is_async']: self._gather_async_results(result, source) if not result['candidates']: return None # Source context ctx = copy.deepcopy(result['context']) ctx['input'] = context_input ctx['next_input'] = next_input ctx['complete_str'] = context_input[ctx['char_position']:] ctx['is_sorted'] = False # Set ignorecase case = ctx['smartcase'] or ctx['camelcase'] if case and re.search(r'[A-Z]', ctx['complete_str']): ctx['ignorecase'] = 0 ignorecase = ctx['ignorecase'] # Filtering for f in [self._filters[x] for x in source.matchers + source.sorters + source.converters if x in self._filters]: self._process_filter(f, ctx, source.max_candidates) ctx['ignorecase'] = ignorecase # On post filter if hasattr(source, 'on_post_filter'): ctx['candidates'] = source.on_post_filter(ctx) mark = source.mark + ' ' dup = bool(source.filetypes) for candidate in ctx['candidates']: # Set default menu and icase candidate['icase'] = 1 if (mark != ' ' and candidate.get('menu', '').find(mark) != 0): candidate['menu'] = mark + candidate.get('menu', '') if dup: candidate['dup'] = 1 # Note: cannot use set() for dict if dup: # Remove duplicates ctx['candidates'] = uniq_list_dict(ctx['candidates']) result['candidates'] = ctx['candidates'] return result if result['candidates'] else None
def _merge_results(self, context, queue_id): self.debug('merged_results: begin') results = self._gather_results(context) merged_results = [] for result in [ x for x in results if not self._is_skip(x['context'], x['source']) ]: if self._update_result(result, context['input']): rank = get_custom(self._custom, result['source'].name, 'rank', result['source'].rank) dup = bool(result['source'].filetypes) candidates = result['candidates'] # Note: cannot use set() for dict if dup: # Remove duplicates candidates = uniq_list_dict(candidates) merged_results.append({ 'complete_position': result['complete_position'], 'mark': result['source'].mark, 'dup': dup, 'candidates': candidates, 'source_name': result['source'].name, 'rank': rank, }) is_async = len([x for x in results if x['is_async']]) > 0 self.debug('merged_results: end') return { 'queue_id': queue_id, 'is_async': is_async, 'merged_results': merged_results, }
def test_uniq_list_dict(): assert util.uniq_list_dict([ { 'abbr': 'word', 'word': 'foobar' }, { 'word': 'bar' }, { 'word': 'foobar', 'abbr': 'word' }, { 'word': 'baz' }, ]) == [{ 'word': 'foobar', 'abbr': 'word' }, { 'word': 'bar' }, { 'word': 'baz' }]
def _get_candidates(self, result, context_input, next_input): source = result['source'] # Gather async results if result['is_async']: self._gather_async_results(result, source) if not result['candidates']: return None # Source context ctx = copy.deepcopy(result['context']) ctx['input'] = context_input ctx['next_input'] = next_input ctx['complete_str'] = context_input[ctx['char_position']:] ctx['is_sorted'] = False # Set ignorecase case = ctx['smartcase'] or ctx['camelcase'] if case: if re.search(r'[A-Z]', ctx['complete_str']): ctx['ignorecase'] = False else: ctx['ignorecase'] = True ignorecase = ctx['ignorecase'] # Match matchers = [ self._filters[x] for x in source.matchers if x in self._filters ] if source.matcher_key != '': # Convert word key to matcher_key for candidate in ctx['candidates']: candidate['__save_word'] = candidate['word'] candidate['word'] = candidate[source.matcher_key] for f in matchers: self._process_filter(f, ctx, source.max_candidates) if source.matcher_key != '': # Restore word key for candidate in ctx['candidates']: candidate['word'] = candidate['__save_word'] # Sort and Convert sorters = [ self._filters[x] for x in source.sorters if x in self._filters ] converters = [ self._filters[x] for x in source.converters if x in self._filters ] for f in sorters + converters: self._process_filter(f, ctx, source.max_candidates) if (isinstance(ctx['candidates'], dict) and 'sorted_candidates' in ctx['candidates']): sorted_candidates = ctx['candidates']['sorted_candidates'] ctx['candidates'] = [] for candidates in sorted_candidates: ctx['candidates'] += candidates ctx['ignorecase'] = ignorecase # On post filter if hasattr(source, 'on_post_filter'): ctx['candidates'] = source.on_post_filter(ctx) mark = source.mark + ' ' for candidate in ctx['candidates']: # Set default menu and icase candidate['icase'] = 1 if (mark != ' ' and candidate.get('menu', '').find(mark) != 0): candidate['menu'] = mark + candidate.get('menu', '') if source.dup: candidate['dup'] = 1 # Note: cannot use set() for dict if source.dup: # Remove duplicates ctx['candidates'] = uniq_list_dict(ctx['candidates']) return ctx['candidates']
class Child(logger.LoggingMixin): def __init__(self, vim): self.name = 'child' self._vim = vim self._filters = {} self._sources = {} self._profile_flag = None self._profile_start_time = 0 self._loaded_sources = {} self._loaded_filters = {} self._source_errors = defaultdict(int) self._prev_results = {} self._unpacker = msgpack.Unpacker(encoding='utf-8', unicode_errors='surrogateescape') self._packer = msgpack.Packer(use_bin_type=True, encoding='utf-8', unicode_errors='surrogateescape') self._ignore_sources = [] def main_loop(self, stdout): while True: feed = sys.stdin.buffer.raw.read(102400) if feed is None: continue if feed == b'': # EOF return self._unpacker.feed(feed) self.debug('_read: %d bytes', len(feed)) for child_in in self._unpacker: name = child_in['name'] args = child_in['args'] queue_id = child_in['queue_id'] self.debug('main_loop: %s begin', name) ret = self.main(name, args, queue_id) if ret: self._write(stdout, ret) self.debug('main_loop: end') def main(self, name, args, queue_id): ret = None if name == 'enable_logging': self._enable_logging() elif name == 'add_source': self._add_source(args[0]) elif name == 'add_filter': self._add_filter(args[0]) elif name == 'set_source_attributes': self._set_source_attributes(args[0]) elif name == 'on_event': self._on_event(args[0]) elif name == 'merge_results': ret = self._merge_results(args[0], queue_id) return ret def _write(self, stdout, expr): stdout.buffer.write(self._packer.pack(expr)) stdout.flush() def _enable_logging(self): logging = self._vim.vars['deoplete#_logging'] logger.setup(self._vim, logging['level'], logging['logfile']) self.is_debug_enabled = True def _add_source(self, path): source = None try: Source = import_plugin(path, 'source', 'Source') if not Source: return source = Source(self._vim) name = os.path.splitext(os.path.basename(path))[0] source.name = getattr(source, 'name', name) source.path = path if source.name in self._loaded_sources: # Duplicated name error_tb(self._vim, 'Duplicated source: %s' % source.name) error_tb( self._vim, 'path: "%s" "%s"' % (path, self._loaded_sources[source.name])) source = None except Exception: error_tb(self._vim, 'Could not load source: %s' % path) finally: if source: self._loaded_sources[source.name] = path self._sources[source.name] = source self.debug('Loaded Source: %s (%s)', source.name, path) def _add_filter(self, path): f = None try: Filter = import_plugin(path, 'filter', 'Filter') if not Filter: return f = Filter(self._vim) name = os.path.splitext(os.path.basename(path))[0] f.name = getattr(f, 'name', name) f.path = path if f.name in self._loaded_filters: # Duplicated name error_tb(self._vim, 'Duplicated filter: %s' % f.name) error_tb( self._vim, 'path: "%s" "%s"' % (path, self._loaded_filters[f.name])) f = None except Exception: # Exception occurred when loading a filter. Log stack trace. error_tb(self._vim, 'Could not load filter: %s' % path) finally: if f: self._loaded_filters[f.name] = path self._filters[f.name] = f self.debug('Loaded Filter: %s (%s)', f.name, path) def _merge_results(self, context, queue_id): self.debug('merged_results: begin') results = self._gather_results(context) merged_results = [] for result in [ x for x in results if not self._is_skip(x['context'], x['source']) ]: if self._update_result(result, context['input'], context['next_input']): rank = get_custom(context['custom'], result['source'].name, 'rank', result['source'].rank) candidates = result['candidates'] merged_results.append({ 'complete_position': result['complete_position'], 'candidates': candidates, 'rank': rank, }) is_async = len([x for x in results if x['is_async']]) > 0 self.debug('merged_results: end') return { 'queue_id': queue_id, 'is_async': is_async, 'merged_results': merged_results, } def _gather_results(self, context): results = [] for source in [x[1] for x in self._itersource(context)]: try: if source.disabled_syntaxes and 'syntax_names' not in context: context['syntax_names'] = get_syn_names(self._vim) ctx = copy.deepcopy(context) charpos = source.get_complete_position(ctx) if charpos >= 0 and source.is_bytepos: charpos = bytepos2charpos(ctx['encoding'], ctx['input'], charpos) ctx['char_position'] = charpos ctx['complete_position'] = charpos2bytepos( ctx['encoding'], ctx['input'], charpos) ctx['complete_str'] = ctx['input'][ctx['char_position']:] if charpos < 0 or self._is_skip(ctx, source): if source.name in self._prev_results: self._prev_results.pop(source.name) # Skip continue if (source.name in self._prev_results and self._use_previous_result( context, self._prev_results[source.name], source.is_volatile)): results.append(self._prev_results[source.name]) continue ctx['is_async'] = False ctx['is_refresh'] = True ctx['max_abbr_width'] = min(source.max_abbr_width, ctx['max_abbr_width']) ctx['max_kind_width'] = min(source.max_kind_width, ctx['max_kind_width']) ctx['max_menu_width'] = min(source.max_menu_width, ctx['max_menu_width']) if ctx['max_abbr_width'] > 0: ctx['max_abbr_width'] = max(20, ctx['max_abbr_width']) if ctx['max_kind_width'] > 0: ctx['max_kind_width'] = max(10, ctx['max_kind_width']) if ctx['max_menu_width'] > 0: ctx['max_menu_width'] = max(10, ctx['max_menu_width']) # Gathering self._profile_start(ctx, source.name) ctx['candidates'] = source.gather_candidates(ctx) self._profile_end(source.name) if ctx['candidates'] is None: continue ctx['candidates'] = convert2candidates(ctx['candidates']) result = { 'name': source.name, 'source': source, 'context': ctx, 'is_async': ctx['is_async'], 'prev_linenr': ctx['position'][1], 'prev_input': ctx['input'], 'input': ctx['input'], 'complete_position': ctx['complete_position'], 'candidates': ctx['candidates'], } self._prev_results[source.name] = result results.append(result) except Exception as exc: self._source_errors[source.name] += 1 if source.is_silent: continue if self._source_errors[source.name] > 2: error( self._vim, 'Too many errors from "%s". ' 'This source is disabled until Neovim ' 'is restarted.' % source.name) self._ignore_sources.append(source.name) continue error_tb(self._vim, 'Error from %s: %r' % (source.name, exc)) return results def _gather_async_results(self, result, source): try: context = result['context'] context['is_refresh'] = False async_candidates = source.gather_candidates(context) result['is_async'] = context['is_async'] if async_candidates is None: return context['candidates'] += convert2candidates(async_candidates) except Exception as exc: self._source_errors[source.name] += 1 if source.is_silent: return if self._source_errors[source.name] > 2: error( self._vim, 'Too many errors from "%s". ' 'This source is disabled until Neovim ' 'is restarted.' % source.name) self._ignore_sources.append(source.name) else: error_tb(self._vim, 'Error from %s: %r' % (source.name, exc)) def _process_filter(self, f, context, max_candidates): try: self._profile_start(context, f.name) if (isinstance(context['candidates'], dict) and 'sorted_candidates' in context['candidates']): filtered = [] context['is_sorted'] = True for candidates in context['candidates']['sorted_candidates']: context['candidates'] = candidates filtered += f.filter(context) else: filtered = f.filter(context) if max_candidates > 0: filtered = filtered[:max_candidates] context['candidates'] = filtered self._profile_end(f.name) except Exception: error_tb(self._vim, 'Errors from: %s' % f) def _update_result(self, result, context_input, next_input): source = result['source'] # Gather async results if result['is_async']: self._gather_async_results(result, source) if not result['candidates']: return None # Source context ctx = copy.deepcopy(result['context']) ctx['input'] = context_input ctx['next_input'] = next_input ctx['complete_str'] = context_input[ctx['char_position']:] ctx['is_sorted'] = False # Set ignorecase case = ctx['smartcase'] or ctx['camelcase'] if case and re.search(r'[A-Z]', ctx['complete_str']): ctx['ignorecase'] = 0 ignorecase = ctx['ignorecase'] # Filtering for f in [ self._filters[x] for x in source.matchers + source.sorters + source.converters if x in self._filters ]: self._process_filter(f, ctx, source.max_candidates) ctx['ignorecase'] = ignorecase # On post filter if hasattr(source, 'on_post_filter'): ctx['candidates'] = source.on_post_filter(ctx) mark = source.mark + ' ' dup = bool(source.filetypes) for candidate in ctx['candidates']: # Set default menu and icase candidate['icase'] = 1 if (mark != ' ' and candidate.get('menu', '').find(mark) != 0): candidate['menu'] = mark + candidate.get('menu', '') if dup: candidate['dup'] = 1 # Note: cannot use set() for dict if dup: # Remove duplicates ctx['candidates'] = uniq_list_dict(ctx['candidates']) result['candidates'] = ctx['candidates'] return result if result['candidates'] else None
def _get_candidates(self, result: Result, context_input: str, next_input: str) -> Candidates: source = result['source'] # Gather async results if result['is_async']: self._gather_async_results(result, source) if not result['candidates']: return [] # Source context ctx = copy.copy(result['context']) ctx['input'] = context_input ctx['next_input'] = next_input ctx['complete_str'] = context_input[ctx['char_position']:] ctx['is_sorted'] = False # Set ignorecase case = ctx['smartcase'] or ctx['camelcase'] if case: if re.search(r'[A-Z]', ctx['complete_str']): ctx['ignorecase'] = False else: ctx['ignorecase'] = True ignorecase = ctx['ignorecase'] # Match matchers = [self._filters[x] for x in source.matchers if x in self._filters] if source.matcher_key != '': original_candidates = ctx['candidates'] # Convert word key to matcher_key for candidate in original_candidates: candidate['__save_word'] = candidate['word'] candidate['word'] = candidate[source.matcher_key] for f in matchers: self._process_filter(f, ctx, source.max_candidates) if source.matcher_key != '': # Restore word key for candidate in original_candidates: candidate['word'] = candidate['__save_word'] del candidate['__save_word'] # Sort sorters = [self._filters[x] for x in source.sorters if x in self._filters] for f in sorters: self._process_filter(f, ctx, source.max_candidates) # Note: converter may break candidates ctx['candidates'] = copy.deepcopy(ctx['candidates']) # Convert converters = [self._filters[x] for x in source.converters if x in self._filters] for f in converters: self._process_filter(f, ctx, source.max_candidates) if (isinstance(ctx['candidates'], dict) and 'sorted_candidates' in ctx['candidates']): sorted_candidates = ctx['candidates']['sorted_candidates'] ctx['candidates'] = [] for candidates in sorted_candidates: ctx['candidates'] += candidates ctx['ignorecase'] = ignorecase # On post filter if hasattr(source, 'on_post_filter'): ctx['candidates'] = source.on_post_filter(ctx) mark = source.mark + ' ' refresh = False refresh_always = self._vim.call( 'deoplete#custom#_get_option', 'refresh_always') auto_complete = self._vim.call( 'deoplete#custom#_get_option', 'auto_complete') eskk_check = self._vim.call( 'deoplete#util#check_eskk_phase_henkan') if refresh_always and auto_complete and not eskk_check: refresh = True for candidate in ctx['candidates']: candidate['icase'] = 1 candidate['equal'] = refresh candidate['source'] = source.name # Set default menu if (mark != ' ' and candidate.get('menu', '').find(mark) != 0): candidate['menu'] = mark + candidate.get('menu', '') if source.dup: candidate['dup'] = 1 # Note: cannot use set() for dict if source.dup: # Remove duplicates ctx['candidates'] = uniq_list_dict(ctx['candidates']) return list(ctx['candidates'])
def _get_candidates(self, result, context_input, next_input): source = result['source'] # Gather async results if result['is_async']: self._gather_async_results(result, source) if not result['candidates']: return None # Source context ctx = copy.deepcopy(result['context']) ctx['input'] = context_input ctx['next_input'] = next_input ctx['complete_str'] = context_input[ctx['char_position']:] ctx['is_sorted'] = False # Set ignorecase case = ctx['smartcase'] or ctx['camelcase'] if case: if re.search(r'[A-Z]', ctx['complete_str']): ctx['ignorecase'] = False else: ctx['ignorecase'] = True ignorecase = ctx['ignorecase'] # Match matchers = [self._filters[x] for x in source.matchers if x in self._filters] if source.matcher_key != '': # Convert word key to matcher_key for candidate in ctx['candidates']: candidate['__save_word'] = candidate['word'] candidate['word'] = candidate[source.matcher_key] for f in matchers: self._process_filter(f, ctx, source.max_candidates) if source.matcher_key != '': # Restore word key for candidate in ctx['candidates']: candidate['word'] = candidate['__save_word'] # Sort and Convert sorters = [self._filters[x] for x in source.sorters if x in self._filters] converters = [self._filters[x] for x in source.converters if x in self._filters] for f in sorters + converters: self._process_filter(f, ctx, source.max_candidates) if (isinstance(ctx['candidates'], dict) and 'sorted_candidates' in ctx['candidates']): sorted_candidates = ctx['candidates']['sorted_candidates'] ctx['candidates'] = [] for candidates in sorted_candidates: ctx['candidates'] += candidates ctx['ignorecase'] = ignorecase # On post filter if hasattr(source, 'on_post_filter'): ctx['candidates'] = source.on_post_filter(ctx) mark = source.mark + ' ' for candidate in ctx['candidates']: # Set default menu and icase candidate['icase'] = 1 if (mark != ' ' and candidate.get('menu', '').find(mark) != 0): candidate['menu'] = mark + candidate.get('menu', '') if source.dup: candidate['dup'] = 1 # Note: cannot use set() for dict if source.dup: # Remove duplicates ctx['candidates'] = uniq_list_dict(ctx['candidates']) return ctx['candidates']