def _CallGlobalExtraConfMethod(function_name): global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation() if not (global_ycm_extra_conf and os.path.exists(global_ycm_extra_conf)): LOGGER.debug('No global extra conf, not calling method %s', function_name) return try: module = Load(global_ycm_extra_conf, force=True) except Exception: LOGGER.exception('Error occurred while loading global extra conf %s', global_ycm_extra_conf) return if not module or not hasattr(module, function_name): LOGGER.debug('Global extra conf not loaded or no function %s', function_name) return try: LOGGER.info('Calling global extra conf method %s on conf file %s', function_name, global_ycm_extra_conf) getattr(module, function_name)() except Exception: LOGGER.exception( 'Error occurred while calling global extra conf method %s ' 'on conf file %s', function_name, global_ycm_extra_conf)
def FiletypeCompletionAvailable( self, filetypes ): try: self.GetFiletypeCompleter( filetypes ) return True except Exception: LOGGER.exception( 'Semantic completion not available for %s', filetypes ) return False
def GetCompletions(): request_data = RequestWrap(request.json) do_filetype_completion = _server_state.ShouldUseFiletypeCompleter( request_data) LOGGER.debug('Using filetype completion: %s', do_filetype_completion) errors = None completions = None if do_filetype_completion: try: filetype_completer = _server_state.GetFiletypeCompleter( request_data['filetypes']) completions = filetype_completer.ComputeCandidates(request_data) except Exception as exception: if request_data['force_semantic']: # user explicitly asked for semantic completion, so just pass the error # back raise # store the error to be returned with results from the identifier # completer LOGGER.exception( 'Exception from semantic completer (using general)') stack = traceback.format_exc() errors = [BuildExceptionResponse(exception, stack)] if not completions and not request_data['force_semantic']: completions = _server_state.GetGeneralCompleter().ComputeCandidates( request_data) return _JsonResponse( BuildCompletionResponse(completions if completions else [], request_data['start_column'], errors=errors))
def _CleanUp( self ): if not self._server_keep_logfiles: if self._server_stderr: utils.RemoveIfExists( self._server_stderr ) self._server_stderr = None if self._workspace_path and self._use_clean_workspace: try: shutil.rmtree( self._workspace_path ) except OSError: LOGGER.exception( 'Failed to clean up workspace dir %s', self._workspace_path ) self._launcher_path = _PathToLauncherJar() self._launcher_config = _LauncherConfiguration() self._workspace_path = None self._java_project_dir = None self._received_ready_message = threading.Event() self._server_init_status = 'Not started' self._server_started = False self._server_handle = None self._connection = None self._started_message_sent = False self.ServerReset()
def GetSemanticTokens(): LOGGER.info('Received semantic tokens request') request_data = RequestWrap(request.json) if not _server_state.FiletypeCompletionUsable(request_data['filetypes'], silent=True): return _JsonResponse(BuildSemanticTokensResponse(None)) errors = None semantic_tokens = None try: filetype_completer = _server_state.GetFiletypeCompleter( request_data['filetypes']) semantic_tokens = filetype_completer.ComputeSemanticTokens( request_data) except Exception as exception: LOGGER.exception( 'Exception from semantic completer during tokens request') errors = [BuildExceptionResponse(exception, traceback.format_exc())] # No fallback for signature help. The general completer is unlikely to be able # to offer anything of for that here. return _JsonResponse( BuildSemanticTokensResponse(semantic_tokens, errors=errors))
def PollModule(module, filepath): """ Try to use passed module in the selection process by calling CSharpSolutionFile on it """ path_to_solutionfile = None module_hint = None if module: try: module_hint = module.CSharpSolutionFile(filepath) LOGGER.info('extra_conf_store suggests %s as solution file', module_hint) if module_hint: # received a full path or one relative to the config's location? candidates = [ module_hint, os.path.join(os.path.dirname(getfile(module)), module_hint) ] # try the assumptions for path in candidates: if os.path.isfile(path): # path seems to point to a solution path_to_solutionfile = path LOGGER.info( 'Using solution file %s selected by extra_conf_store', path_to_solutionfile) break except AttributeError: # the config script might not provide solution file locations LOGGER.exception( 'Could not retrieve solution for %s' 'from extra_conf_store', filepath) return path_to_solutionfile
def _ReaderLoop( self ): """ Read responses from TSServer and use them to resolve the DeferredResponse instances. """ while True: self._tsserver_is_running.wait() try: message = self._ReadMessage() except ( RuntimeError, ValueError ): LOGGER.exception( 'Error while reading message from server' ) if not self._ServerIsRunning(): self._tsserver_is_running.clear() continue # We ignore events for now since we don't have a use for them. msgtype = message[ 'type' ] if msgtype == 'event': eventname = message[ 'event' ] LOGGER.info( 'Received %s event from TSServer', eventname ) continue if msgtype != 'response': LOGGER.error( 'Unsupported message type', msgtype ) continue seq = message[ 'request_seq' ] with self._pending_lock: if seq in self._pending: self._pending[ seq ].resolve( message ) del self._pending[ seq ]
def _ReaderLoop(self): """ Read responses from TSServer and use them to resolve the DeferredResponse instances. """ while True: self._tsserver_is_running.wait() try: message = self._ReadMessage() except (RuntimeError, ValueError): LOGGER.exception('Error while reading message from server') if not self._ServerIsRunning(): self._tsserver_is_running.clear() continue # We ignore events for now since we don't have a use for them. msgtype = message['type'] if msgtype == 'event': eventname = message['event'] LOGGER.info('Received %s event from TSServer', eventname) continue if msgtype != 'response': LOGGER.error('Unsupported message type', msgtype) continue seq = message['request_seq'] with self._pending_lock: if seq in self._pending: self._pending[seq].resolve(message) del self._pending[seq]
def _CleanUp(self): if not self._server_keep_logfiles: if self._server_stderr: utils.RemoveIfExists(self._server_stderr) self._server_stderr = None if self._workspace_path and self._use_clean_workspace: try: shutil.rmtree(self._workspace_path) except OSError: LOGGER.exception('Failed to clean up workspace dir %s', self._workspace_path) self._launcher_path = _PathToLauncherJar() self._launcher_config = _LauncherConfiguration() self._workspace_path = None self._java_project_dir = None self._received_ready_message = threading.Event() self._server_init_status = 'Not started' self._server_started = False self._server_handle = None self._connection = None self._started_message_sent = False self.ServerReset()
def _CallGlobalExtraConfMethod( function_name ): global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation() if not ( global_ycm_extra_conf and os.path.exists( global_ycm_extra_conf ) ): LOGGER.debug( 'No global extra conf, not calling method %s', function_name ) return try: module = Load( global_ycm_extra_conf, force = True ) except Exception: LOGGER.exception( 'Error occurred while loading global extra conf %s', global_ycm_extra_conf ) return if not module or not hasattr( module, function_name ): LOGGER.debug( 'Global extra conf not loaded or no function %s', function_name ) return try: LOGGER.info( 'Calling global extra conf method %s on conf file %s', function_name, global_ycm_extra_conf ) getattr( module, function_name )() except Exception: LOGGER.exception( 'Error occurred while calling global extra conf method %s ' 'on conf file %s', function_name, global_ycm_extra_conf )
def PollModule( module, filepath ): """ Try to use passed module in the selection process by calling CSharpSolutionFile on it """ path_to_solutionfile = None module_hint = None if module: try: module_hint = module.CSharpSolutionFile( filepath ) LOGGER.info( 'extra_conf_store suggests %s as solution file', module_hint ) if module_hint: # received a full path or one relative to the config's location? candidates = [ module_hint, os.path.join( os.path.dirname( getfile( module ) ), module_hint ) ] # try the assumptions for path in candidates: if os.path.isfile( path ): # path seems to point to a solution path_to_solutionfile = path LOGGER.info( 'Using solution file %s selected by extra_conf_store', path_to_solutionfile ) break except AttributeError: # the config script might not provide solution file locations LOGGER.exception( 'Could not retrieve solution for %s' 'from extra_conf_store', filepath ) return path_to_solutionfile
def FiletypeCompletionAvailable(self, filetypes): try: self.GetFiletypeCompleter(filetypes) return True except Exception: LOGGER.exception('Semantic completion not available for %s', filetypes) return False
def _GetDoc(self, request_data): try: definition = self._GetResponse('/find_definition', request_data) docs = [definition['context'], definition['docs']] return responses.BuildDetailedInfoResponse('\n---\n'.join(docs)) except Exception: LOGGER.exception('Failed to find definition') raise RuntimeError('Can\'t lookup docs.')
def _GoToDefinition(self, request_data): try: definition = self._GetResponse('/find_definition', request_data) return responses.BuildGoToResponse(definition['file_path'], definition['line'], definition['column'] + 1) except Exception: LOGGER.exception('Failed to find definition') raise RuntimeError('Can\'t jump to definition.')
def _CurrentLine( self ): try: return self[ 'lines' ][ self[ 'line_num' ] - 1 ] except IndexError: LOGGER.exception( 'Client returned invalid line number %s ' 'for file %s. Assuming empty', self[ 'line_num' ], self[ 'filepath' ] ) return ''
def _CurrentLine(self): try: return self['lines'][self['line_num'] - 1] except IndexError: LOGGER.exception( 'Client returned invalid line number %s ' 'for file %s. Assuming empty', self['line_num'], self['filepath']) return ''
def _GoToDefinition( self, request_data ): try: definition = self._GetResponse( '/find_definition', request_data ) return responses.BuildGoToResponse( definition[ 'file_path' ], definition[ 'line' ], definition[ 'column' ] + 1 ) except Exception: LOGGER.exception( 'Failed to find definition' ) raise RuntimeError( 'Can\'t jump to definition.' )
def _GetDoc( self, request_data ): try: definition = self._GetResponse( '/find_definition', request_data ) docs = [ definition[ 'context' ], definition[ 'docs' ] ] return responses.BuildDetailedInfoResponse( '\n---\n'.join( docs ) ) except Exception: LOGGER.exception( 'Failed to find definition' ) raise RuntimeError( 'Can\'t lookup docs.' )
def ComputeSignaturesInner(self, request_data): self._Reload(request_data) try: items = self._SendRequest( 'signatureHelp', { 'file': request_data['filepath'], 'line': request_data['line_num'], 'offset': request_data['start_codepoint'], # triggerReason - opitonal and tricky to populate }) except RuntimeError: # We get an exception when there are no results, so squash it if LOGGER.isEnabledFor(logging.DEBUG): LOGGER.exception("No signatures from tsserver") return {} def MakeSignature(s): def GetTSDocs(docs_list): return '\n'.join(item['text'] for item in docs_list) label = _DisplayPartsToString(s['prefixDisplayParts']) parameters = [] sep = _DisplayPartsToString(s['separatorDisplayParts']) for index, p in enumerate(s['parameters']): param = _DisplayPartsToString(p['displayParts']) start = len(label) end = start + len(param) label += param if index < len(s['parameters']) - 1: label += sep parameters.append({ 'documentation': GetTSDocs(p.get('documentation', [])), 'label': [ utils.CodepointOffsetToByteOffset(label, start), utils.CodepointOffsetToByteOffset(label, end) ] }) label += _DisplayPartsToString(s['suffixDisplayParts']) return { 'documentation': GetTSDocs(s.get('documentation', [])), 'label': label, 'parameters': parameters } return { 'activeSignature': items['selectedItemIndex'], 'activeParameter': items['argumentIndex'], 'signatures': [MakeSignature(s) for s in items['items']] }
def _GoTo(self, request_data): try: return self._GoToDefinition(request_data) except Exception: LOGGER.exception('Failed to jump to definition') try: return self._GoToDeclaration(request_data) except Exception: LOGGER.exception('Failed to jump to declaration') raise RuntimeError('Can\'t jump to definition or declaration.')
def _GoTo( self, request_data ): try: return self._GoToDefinition( request_data ) except Exception: LOGGER.exception( 'Failed to jump to definition' ) try: return self._GoToDeclaration( request_data ) except Exception: LOGGER.exception( 'Failed to jump to declaration' ) raise RuntimeError( 'Can\'t jump to definition or declaration.' )
def FiletypeCompletionAvailable(self, filetypes, silent=False): """Returns True if there is a ycmd semantic completer defined for any filetype in the list |filetypes|. Otherwise, returns False and prints an error to the log file, unless silent = True.""" try: self.GetFiletypeCompleter(filetypes) return True except Exception: if not silent: LOGGER.exception('Semantic completion not available for %s', filetypes) return False
def _WriteRequest( self, request ): """Write a request to TSServer stdin.""" serialized_request = utils.ToBytes( json.dumps( request ) + '\n' ) with self._write_lock: try: self._tsserver_handle.stdin.write( serialized_request ) self._tsserver_handle.stdin.flush() # IOError is an alias of OSError in Python 3. except ( AttributeError, IOError ): LOGGER.exception( SERVER_NOT_RUNNING_MESSAGE ) raise RuntimeError( SERVER_NOT_RUNNING_MESSAGE )
def _WriteRequest(self, request): """Write a request to TSServer stdin.""" serialized_request = utils.ToBytes(json.dumps(request) + '\n') with self._write_lock: try: self._tsserver_handle.stdin.write(serialized_request) self._tsserver_handle.stdin.flush() # IOError is an alias of OSError in Python 3. except (AttributeError, IOError): LOGGER.exception(SERVER_NOT_RUNNING_MESSAGE) raise RuntimeError(SERVER_NOT_RUNNING_MESSAGE)
def _GetGenericLSPCompleter(user_options, filetype): custom_lsp = user_options['language_server'] for server_settings in custom_lsp: if filetype in server_settings['filetypes']: try: return generic_lsp_completer.GenericLSPCompleter( user_options, server_settings) except Exception: LOGGER.exception("Unable to instantiate generic completer for " f"filetype { filetype }") # We might just use a built-in completer return None
def _StopServer( self ): with self._server_state_lock: if self._racerd_phandle: LOGGER.info( 'Stopping Racerd with PID %s', self._racerd_phandle.pid ) self._racerd_phandle.terminate() try: utils.WaitUntilProcessIsTerminated( self._racerd_phandle, timeout = 5 ) LOGGER.info( 'Racerd stopped' ) except RuntimeError: LOGGER.exception( 'Error while stopping Racerd' ) self._CleanUp()
def _StopServerNoLock(self): if self._ServerIsRunning(): LOGGER.info('Stopping Tern server with PID %s', self._server_handle.pid) self._server_handle.terminate() try: utils.WaitUntilProcessIsTerminated(self._server_handle, timeout=5) LOGGER.info('Tern server stopped') except RuntimeError: LOGGER.exception('Error while stopping Tern server') self._CleanUp()
def _StopServerNoLock( self ): if self._ServerIsRunning(): LOGGER.info( 'Stopping TSServer with PID %s', self._tsserver_handle.pid ) try: self._SendCommand( 'exit' ) utils.WaitUntilProcessIsTerminated( self._tsserver_handle, timeout = 5 ) LOGGER.info( 'TSServer stopped' ) except Exception: LOGGER.exception( 'Error while stopping TSServer' ) self._CleanUp()
def _StopServer( self ): with self._server_state_mutex: if self._ServerIsRunning(): LOGGER.info( 'Stopping Tern server with PID %s', self._server_handle.pid ) self._server_handle.terminate() try: utils.WaitUntilProcessIsTerminated( self._server_handle, timeout = 5 ) LOGGER.info( 'Tern server stopped' ) except RuntimeError: LOGGER.exception( 'Error while stopping Tern server' ) self._CleanUp()
def _StopServer(self): with self._server_state_lock: if self._racerd_phandle: LOGGER.info('Stopping Racerd with PID %s', self._racerd_phandle.pid) self._racerd_phandle.terminate() try: utils.WaitUntilProcessIsTerminated(self._racerd_phandle, timeout=5) LOGGER.info('Racerd stopped') except RuntimeError: LOGGER.exception('Error while stopping Racerd') self._CleanUp()
def _StopServer( self ): with self._tsserver_lock: if self._ServerIsRunning(): LOGGER.info( 'Stopping TSServer with PID %s', self._tsserver_handle.pid ) try: self._SendCommand( 'exit' ) utils.WaitUntilProcessIsTerminated( self._tsserver_handle, timeout = 5 ) LOGGER.info( 'TSServer stopped' ) except Exception: LOGGER.exception( 'Error while stopping TSServer' ) self._CleanUp()
def _StopServer( self ): """ Stop the OmniSharp server using a lock. """ with self._server_state_lock: if self._ServerIsRunning(): LOGGER.info( 'Stopping OmniSharp server with PID %s', self._omnisharp_phandle.pid ) try: self._GetResponse( '/stopserver' ) utils.WaitUntilProcessIsTerminated( self._omnisharp_phandle, timeout = 5 ) LOGGER.info( 'OmniSharp server stopped' ) except Exception: LOGGER.exception( 'Error while stopping OmniSharp server' ) self._CleanUp()
def _StopServerNoLock(self): """ Stop the OmniSharp server using a lock. """ if self._ServerIsRunning(): LOGGER.info('Stopping OmniSharp server with PID %s', self._omnisharp_phandle.pid) try: self._TryToStopServer() self._ForceStopServer() utils.WaitUntilProcessIsTerminated(self._omnisharp_phandle, timeout=5) LOGGER.info('OmniSharp server stopped') except Exception: LOGGER.exception('Error while stopping OmniSharp server') self._CleanUp()
def GetFileContents( request_data, filename ): """Returns the contents of the absolute path |filename| as a unicode string. If the file contents exist in |request_data| (i.e. it is open and potentially modified/dirty in the user's editor), then it is returned, otherwise the file is read from disk (assuming a UTF-8 encoding) and its contents returned.""" file_data = request_data[ 'file_data' ] if filename in file_data: return ToUnicode( file_data[ filename ][ 'contents' ] ) try: return ToUnicode( ReadFile( filename ) ) except IOError: LOGGER.exception( 'Error reading file %s', filename ) return ''
def ServerIsHealthy(self): """ Check if racerd is alive AND ready to serve requests. """ if not self._ServerIsRunning(): LOGGER.debug('Racerd not running') return False try: self._GetResponse('/ping', method='GET') return True # Do NOT make this except clause more generic! If you need to catch more # exception types, list them all out. Having `Exception` here caused FORTY # HOURS OF DEBUGGING. except requests.exceptions.ConnectionError: LOGGER.exception('Failed to connect to racerd') return False
def ServerIsHealthy( self ): """ Check if racerd is alive AND ready to serve requests. """ if not self._ServerIsRunning(): LOGGER.debug( 'Racerd not running' ) return False try: self._GetResponse( '/ping', method = 'GET' ) return True # Do NOT make this except clause more generic! If you need to catch more # exception types, list them all out. Having `Exception` here caused FORTY # HOURS OF DEBUGGING. except requests.exceptions.ConnectionError: LOGGER.exception( 'Failed to connect to racerd' ) return False
def _FilterUnchangedTagFiles( self, tag_files ): for tag_file in tag_files: try: current_mtime = os.path.getmtime( tag_file ) except Exception: LOGGER.exception( 'Error while getting %s last modification time', tag_file ) continue last_mtime = self._tags_file_last_mtime[ tag_file ] # We don't want to repeatedly process the same file over and over; we only # process if it's changed since the last time we looked at it if current_mtime <= last_mtime: continue self._tags_file_last_mtime[ tag_file ] = current_mtime yield tag_file
def _FilterUnchangedTagFiles(self, tag_files): for tag_file in tag_files: try: current_mtime = os.path.getmtime(tag_file) except Exception: LOGGER.exception( 'Error while getting %s last modification time', tag_file) continue last_mtime = self._tags_file_last_mtime[tag_file] # We don't want to repeatedly process the same file over and over; we only # process if it's changed since the last time we looked at it if current_mtime <= last_mtime: continue self._tags_file_last_mtime[tag_file] = current_mtime yield tag_file
def _StopServer(self): """Stop the Gocode server.""" with self._gocode_lock: if self._ServerIsRunning(): LOGGER.info('Stopping Gocode server with PID %s', self._gocode_handle.pid) try: self._ExecuteCommand([ self._gocode_binary_path, '-sock', 'tcp', '-addr', self._gocode_host, 'close' ]) utils.WaitUntilProcessIsTerminated(self._gocode_handle, timeout=5) LOGGER.info('Gocode server stopped') except Exception: LOGGER.exception('Error while stopping Gocode server') self._CleanUp()
def _Reset(self): if self._workspace_path and self._use_clean_workspace: try: shutil.rmtree(self._workspace_path) except OSError: LOGGER.exception('Failed to clean up workspace dir %s', self._workspace_path) self._launcher_path = _PathToLauncherJar() self._launcher_config = None self._workspace_path = None self._java_project_dir = None self._received_ready_message = threading.Event() self._server_init_status = 'Not started' self._started_message_sent = False super(JavaCompleter, self)._Reset()
def Shutdown(self): with self._server_state_mutex: LOGGER.info('Shutting down %s...', self.GetServerName()) # Tell the connection to expect the server to disconnect if self._connection: self._connection.Stop() if not self.ServerIsHealthy(): LOGGER.info('%s is not running', self.GetServerName()) self._Reset() return LOGGER.info('Stopping %s with PID %s', self.GetServerName(), self._server_handle.pid) try: self.ShutdownServer() # By this point, the server should have shut down and terminated. To # ensure that isn't blocked, we close all of our connections and wait # for the process to exit. # # If, after a small delay, the server has not shut down we do NOT kill # it; we expect that it will shut itself down eventually. This is # predominantly due to strange process behaviour on Windows. if self._connection: self._connection.Close() utils.WaitUntilProcessIsTerminated(self._server_handle, timeout=15) LOGGER.info('%s stopped', self.GetServerName()) except Exception: LOGGER.exception('Error while stopping %s', self.GetServerName()) # We leave the process running. Hopefully it will eventually die of its # own accord. # Tidy up our internal state, even if the completer server didn't close # down cleanly. self._Reset()
def Shutdown( self ): with self._server_state_mutex: LOGGER.info( 'Shutting down %s...', self.GetServerName() ) # Tell the connection to expect the server to disconnect if self._connection: self._connection.Stop() if not self.ServerIsHealthy(): LOGGER.info( '%s is not running', self.GetServerName() ) self._Reset() return LOGGER.info( 'Stopping %s with PID %s', self.GetServerName(), self._server_handle.pid ) try: self.ShutdownServer() # By this point, the server should have shut down and terminated. To # ensure that isn't blocked, we close all of our connections and wait # for the process to exit. # # If, after a small delay, the server has not shut down we do NOT kill # it; we expect that it will shut itself down eventually. This is # predominantly due to strange process behaviour on Windows. if self._connection: self._connection.Close() utils.WaitUntilProcessIsTerminated( self._server_handle, timeout = 15 ) LOGGER.info( '%s stopped', self.GetServerName() ) except Exception: LOGGER.exception( 'Error while stopping %s', self.GetServerName() ) # We leave the process running. Hopefully it will eventually die of its # own accord. # Tidy up our internal state, even if the completer server didn't close # down cleanly. self._Reset()
def _CollectExtensionBundles(extension_path): extension_bundles = [] for extension_dir in extension_path: if not os.path.isdir(extension_dir): LOGGER.info('extension directory does not exist: {0}'.format( extension_dir)) continue for path in os.listdir(extension_dir): path = os.path.join(extension_dir, path) manifest_file = os.path.join(path, 'package.json') if not os.path.isdir(path) or not os.path.isfile(manifest_file): LOGGER.debug('{0} is not an extension directory'.format(path)) continue manifest_json = utils.ReadFile(manifest_file) try: manifest = json.loads(manifest_json) except ValueError: LOGGER.exception( 'Could not load bundle {0}'.format(manifest_file)) continue if ('contributes' not in manifest or 'javaExtensions' not in manifest['contributes'] or not isinstance( manifest['contributes']['javaExtensions'], list)): LOGGER.info( 'Bundle {0} is not a java extension'.format(manifest_file)) continue LOGGER.info('Found bundle: {0}'.format(manifest_file)) extension_bundles.extend([ os.path.join(path, p) for p in manifest['contributes']['javaExtensions'] ]) return extension_bundles
def _GoToDefinition(self, request_data): filename = request_data['filepath'] LOGGER.info('Godef GoTo request %s', filename) contents = utils.ToBytes( request_data['file_data'][filename]['contents']) offset = _ComputeOffset(contents, request_data['line_num'], request_data['column_num']) try: stdout = self._ExecuteCommand([ self._godef_binary_path, '-i', '-f={}'.format(filename), '-json', '-o={}'.format(offset) ], contents=contents) # We catch this exception type and not a more specific one because we # raise it in _ExecuteCommand when the command fails. except RuntimeError: LOGGER.exception('Failed to jump to definition') raise RuntimeError('Can\'t find a definition.') return self._ConstructGoToFromResponse(stdout)
def DebugInfo(): LOGGER.info('Received debug info request') request_data = RequestWrap(request.json) has_clang_support = ycm_core.HasClangSupport() clang_version = ycm_core.ClangVersion() if has_clang_support else None filepath = request_data['filepath'] try: extra_conf_path = extra_conf_store.ModuleFileForSourceFile(filepath) is_loaded = bool(extra_conf_path) except UnknownExtraConf as error: extra_conf_path = error.extra_conf_file is_loaded = False response = { 'python': { 'executable': sys.executable, 'version': platform.python_version() }, 'clang': { 'has_support': has_clang_support, 'version': clang_version }, 'extra_conf': { 'path': extra_conf_path, 'is_loaded': is_loaded }, 'completer': None } try: response['completer'] = _GetCompleterForRequestData( request_data).DebugInfo(request_data) except Exception: LOGGER.exception('Error retrieving completer debug info') return _JsonResponse(response)
def DebugInfo(): LOGGER.info( 'Received debug info request' ) request_data = RequestWrap( request.json ) has_clang_support = ycm_core.HasClangSupport() clang_version = ycm_core.ClangVersion() if has_clang_support else None filepath = request_data[ 'filepath' ] try: extra_conf_path = extra_conf_store.ModuleFileForSourceFile( filepath ) is_loaded = bool( extra_conf_path ) except UnknownExtraConf as error: extra_conf_path = error.extra_conf_file is_loaded = False response = { 'python': { 'executable': sys.executable, 'version': platform.python_version() }, 'clang': { 'has_support': has_clang_support, 'version': clang_version }, 'extra_conf': { 'path': extra_conf_path, 'is_loaded': is_loaded }, 'completer': None } try: response[ 'completer' ] = _GetCompleterForRequestData( request_data ).DebugInfo( request_data ) except Exception: LOGGER.exception( 'Error retrieving completer debug info' ) return _JsonResponse( response )
def GetCompletions(): LOGGER.info( 'Received completion request' ) request_data = RequestWrap( request.json ) do_filetype_completion = _server_state.ShouldUseFiletypeCompleter( request_data ) LOGGER.debug( 'Using filetype completion: %s', do_filetype_completion ) errors = None completions = None if do_filetype_completion: try: completions = ( _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] ) .ComputeCandidates( request_data ) ) except Exception as exception: if request_data[ 'force_semantic' ]: # user explicitly asked for semantic completion, so just pass the error # back raise # store the error to be returned with results from the identifier # completer LOGGER.exception( 'Exception from semantic completer (using general)' ) stack = traceback.format_exc() errors = [ BuildExceptionResponse( exception, stack ) ] if not completions and not request_data[ 'force_semantic' ]: completions = _server_state.GetGeneralCompleter().ComputeCandidates( request_data ) return _JsonResponse( BuildCompletionResponse( completions if completions else [], request_data[ 'start_column' ], errors = errors ) )