def _GetExtraData(self, completion): location = {} if completion['file_path']: location['filepath'] = ToUtf8IfNeeded(completion['file_path']) if completion['line']: location['line_num'] = completion['line'] if completion['column']: location['column_num'] = completion['column'] + 1 if location: return {'location': location} return None
def EchoTextVimWidth(text): vim_width = GetIntValue('&columns') truncated_text = ToUtf8IfNeeded(text)[:int(vim_width * 0.9)] truncated_text.replace('\n', ' ') old_ruler = GetIntValue('&ruler') old_showcmd = GetIntValue('&showcmd') vim.command('set noruler noshowcmd') EchoText(truncated_text, False) vim.command('let &ruler = {0}'.format(old_ruler)) vim.command('let &showcmd = {0}'.format(old_showcmd))
def ComputeCandidatesInner(self, request_data): filename = request_data['filepath'] if not filename: return if self._completer.UpdatingTranslationUnit(ToUtf8IfNeeded(filename)): raise RuntimeError(PARSING_FILE_MESSAGE) flags = self._FlagsForRequest(request_data) if not flags: raise RuntimeError(NO_COMPILE_FLAGS_MESSAGE) files = self.GetUnsavedFilesVector(request_data) line = request_data['line_num'] column = request_data['start_column'] with self._files_being_compiled.GetExclusive(filename): results = self._completer.CandidatesForLocationInFile( ToUtf8IfNeeded(filename), line, column, files, flags) if not results: raise RuntimeError(NO_COMPLETIONS_MESSAGE) return [ConvertCompletionData(x) for x in results]
def ConvertDiagnosticToQfFormat(diagnostic): # see :h getqflist for a description of the dictionary fields # Note that, as usual, Vim is completely inconsistent about whether # line/column numbers are 1 or 0 based in its various APIs. Here, it wants # them to be 1-based. location = diagnostic['location'] return { 'bufnr': GetBufferNumberForFilename(location['filepath']), 'lnum': location['line_num'] + 1, 'col': location['column_num'] + 1, 'text': ToUtf8IfNeeded(diagnostic['text']), 'type': diagnostic['kind'], 'valid': 1 }
def _LocationForGoTo(self, goto_function, request_data, reparse=True): filename = request_data['filepath'] if not filename: raise ValueError(INVALID_FILE_MESSAGE) flags = self._FlagsForRequest(request_data) if not flags: raise ValueError(NO_COMPILE_FLAGS_MESSAGE) files = self.GetUnsavedFilesVector(request_data) line = request_data['line_num'] column = request_data['column_num'] return getattr(self._completer, goto_function)(ToUtf8IfNeeded(filename), line, column, files, flags, reparse)
def _GetExtraData(self, completion): location = {} if completion['module_path']: location['filepath'] = ToUtf8IfNeeded(completion['module_path']) if completion['line']: location['line_num'] = completion['line'] if completion['column']: location['column_num'] = completion['column'] + 1 if location: extra_data = {} extra_data['location'] = location return extra_data else: return None
def FilterAndSortCandidates(self, candidates, query): if not candidates: return [] # We need to handle both an omni_completer style completer and a server # style completer if 'words' in candidates: candidates = candidates['words'] sort_property = '' if 'word' in candidates[0]: sort_property = 'word' elif 'insertion_text' in candidates[0]: sort_property = 'insertion_text' matches = FilterAndSortCandidates(candidates, sort_property, ToUtf8IfNeeded(query)) return matches
def OnFileReadyToParse( self, request_data ): filename = request_data[ 'filepath' ] if not filename: raise ValueError( INVALID_FILE_MESSAGE ) flags = self._FlagsForRequest( request_data ) if not flags: raise ValueError( NO_COMPILE_FLAGS_MESSAGE ) with self._files_being_compiled.GetExclusive( filename ): diagnostics = self._completer.UpdateTranslationUnit( ToUtf8IfNeeded( filename ), self.GetUnsavedFilesVector( request_data ), flags ) diagnostics = _FilterDiagnostics( diagnostics ) self._diagnostic_store = DiagnosticsToDiagStructure( diagnostics ) return [ responses.BuildDiagnosticData( x ) for x in diagnostics[ : self._max_diagnostics_to_display ] ]
def _FixIt(self, request_data): filename = request_data['filepath'] if not filename: raise ValueError(INVALID_FILE_MESSAGE) flags = self._FlagsForRequest(request_data) if not flags: raise ValueError(NO_COMPILE_FLAGS_MESSAGE) files = self.GetUnsavedFilesVector(request_data) line = request_data['line_num'] column = request_data['column_num'] fixits = getattr(self._completer, "GetFixItsForLocationInFile")( ToUtf8IfNeeded(filename), line, column, files, flags, True) # don't raise an error if not fixits: - leave that to the client to respond # in a nice way return responses.BuildFixItResponse(fixits)
def CompletionStartColumn(line_value, column_num, filetype): """Returns the 1-based index where the completion query should start. So if the user enters: foo.bar^ with the cursor being at the location of the caret (so the character *AFTER* 'r'), then the starting column would be the index of the letter 'b'.""" # NOTE: column_num and other numbers on the wire are byte indices, but we need # to walk codepoints for identifier checks. utf8_line_value = ToUtf8IfNeeded(line_value) unicode_line_value = ToUnicodeIfNeeded(line_value) codepoint_column_num = len( unicode(utf8_line_value[:column_num - 1], 'utf8')) + 1 # -1 and then +1 to account for difference betwen 0-based and 1-based # indices/columns codepoint_start_column = StartOfLongestIdentifierEndingAtIndex( unicode_line_value, codepoint_column_num - 1, filetype) + 1 return len( unicode_line_value[:codepoint_start_column - 1].encode('utf8')) + 1
def _GetSemanticInfo(self, request_data, func, reparse=True): filename = request_data['filepath'] if not filename: raise ValueError(INVALID_FILE_MESSAGE) flags = self._FlagsForRequest(request_data) if not flags: raise ValueError(NO_COMPILE_FLAGS_MESSAGE) files = self.GetUnsavedFilesVector(request_data) line = request_data['line_num'] column = request_data['column_num'] message = getattr(self._completer, func)(ToUtf8IfNeeded(filename), line, column, files, flags, reparse) if not message: message = "No semantic information available" return responses.BuildDisplayMessageResponse(message)
def _SanitizeFlags( flags ): """Drops unsafe flags. Currently these are only -arch flags; they tend to crash libclang.""" sanitized_flags = [] saw_arch = False for i, flag in enumerate( flags ): if flag == '-arch': saw_arch = True continue elif flag.startswith( '-arch' ): continue elif saw_arch: saw_arch = False continue sanitized_flags.append( flag ) vector = ycm_core.StringVector() for flag in sanitized_flags: vector.append( ToUtf8IfNeeded( flag ) ) return vector
def AddIdentifiersFromTagFiles(self, tag_files): absolute_paths_to_tag_files = ycm_core.StringVector() for tag_file in tag_files: try: current_mtime = os.path.getmtime(tag_file) except: continue last_mtime = self._tags_file_last_mtime[tag_file] # We don't want to repeatedly process the same file over and over; we only # process if it's changed since the last time we looked at it if current_mtime <= last_mtime: continue self._tags_file_last_mtime[tag_file] = current_mtime absolute_paths_to_tag_files.append(ToUtf8IfNeeded(tag_file)) if not absolute_paths_to_tag_files: return self._completer.AddIdentifiersToDatabaseFromTagFiles( absolute_paths_to_tag_files)
def CompletionStartColumn(line_value, column_num): """Returns the 1-based index where the completion query should start. So if the user enters: foo.bar^ with the cursor being at the location of the caret (so the character *AFTER* 'r'), then the starting column would be the index of the letter 'b'.""" # NOTE: column_num and other numbers on the wire are byte indices, but we need # to walk codepoints for IsIdentifierChar. start_column = column_num utf8_line_value = ToUtf8IfNeeded(line_value) unicode_line_value = ToUnicodeIfNeeded(line_value) codepoint_column_num = len( unicode(utf8_line_value[:column_num - 1], 'utf8')) + 1 # -2 because start_column is 1-based (so -1) and another -1 because we want to # look at the previous character while (codepoint_column_num > 1 and IsIdentifierChar(unicode_line_value[codepoint_column_num - 2])): start_column -= len(unicode_line_value[codepoint_column_num - 2].encode('utf8')) codepoint_column_num -= 1 return start_column
def OnFileReadyToParse(self, request_data): filename = request_data['filepath'] contents = request_data['file_data'][filename]['contents'] if contents.count('\n') < MIN_LINES_IN_FILE_TO_PARSE: raise ValueError(FILE_TOO_SHORT_MESSAGE) if not filename: raise ValueError(INVALID_FILE_MESSAGE) flags = self._FlagsForRequest(request_data) if not flags: raise ValueError(NO_COMPILE_FLAGS_MESSAGE) diagnostics = self._completer.UpdateTranslationUnit( ToUtf8IfNeeded(filename), self.GetUnsavedFilesVector(request_data), flags) diagnostics = _FilterDiagnostics(diagnostics) self._diagnostic_store = DiagnosticsToDiagStructure(diagnostics) return [ responses.BuildDiagnosticData(x) for x in diagnostics[:self._max_diagnostics_to_display] ]
def OnBufferUnload(self, request_data): self._completer.DeleteCachesForFile( ToUtf8IfNeeded(request_data['unloaded_buffer']))
def _CallExtraConfParentForFile(module, filename): filename = ToUtf8IfNeeded(filename) try: return module.ParentForFile(filename) except: return filename
def addOne(ftype, name, description, doc): ycm_completions.append( responses.BuildCompletionData( ToUtf8IfNeeded(name), ToUtf8IfNeeded(ftype + ': ' + description), ToUtf8IfNeeded(FormatDocStr(doc))))
def _ParentForRequest(self, filename): return self._flags.ParentForFile(ToUtf8IfNeeded(filename))