def _CallGlobalExtraConfMethod( function_name ): global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation() if not ( global_ycm_extra_conf and os.path.exists( global_ycm_extra_conf ) ): LOGGER.debug( 'No global extra conf, not calling method %s', function_name ) return try: module = Load( global_ycm_extra_conf, force = True ) except Exception: LOGGER.exception( 'Error occurred while loading global extra conf %s', global_ycm_extra_conf ) return if not module or not hasattr( module, function_name ): LOGGER.debug( 'Global extra conf not loaded or no function %s', function_name ) return try: LOGGER.info( 'Calling global extra conf method %s on conf file %s', function_name, global_ycm_extra_conf ) getattr( module, function_name )() except Exception: LOGGER.exception( 'Error occurred while calling global extra conf method %s ' 'on conf file %s', function_name, global_ycm_extra_conf )
def _CleanUp( self ): if not self._server_keep_logfiles: if self._server_stderr: utils.RemoveIfExists( self._server_stderr ) self._server_stderr = None if self._workspace_path and self._use_clean_workspace: try: shutil.rmtree( self._workspace_path ) except OSError: LOGGER.exception( 'Failed to clean up workspace dir %s', self._workspace_path ) self._launcher_path = _PathToLauncherJar() self._launcher_config = _LauncherConfiguration() self._workspace_path = None self._java_project_dir = None self._received_ready_message = threading.Event() self._server_init_status = 'Not started' self._server_started = False self._server_handle = None self._connection = None self._started_message_sent = False self.ServerReset()
def FiletypeCompletionAvailable( self, filetypes ): try: self.GetFiletypeCompleter( filetypes ) return True except Exception: LOGGER.exception( 'Semantic completion not available for %s', filetypes ) return False
def GetReady(): LOGGER.info( 'Received ready request' ) if request.query.subserver: filetype = request.query.subserver completer = _server_state.GetFiletypeCompleter( [ filetype ] ) return _JsonResponse( completer.ServerIsReady() ) return _JsonResponse( True )
def _StartServer( self ): with self._tsserver_lock: if self._ServerIsRunning(): return self._logfile = utils.CreateLogfile( LOGFILE_FORMAT ) tsserver_log = '-file {path} -level {level}'.format( path = self._logfile, level = _LogLevel() ) # TSServer gets the configuration for the log file through the # environment variable 'TSS_LOG'. This seems to be undocumented but # looking at the source code it seems like this is the way: # https://github.com/Microsoft/TypeScript/blob/8a93b489454fdcbdf544edef05f73a913449be1d/src/server/server.ts#L136 environ = os.environ.copy() utils.SetEnviron( environ, 'TSS_LOG', tsserver_log ) LOGGER.info( 'TSServer log file: %s', self._logfile ) # We need to redirect the error stream to the output one on Windows. self._tsserver_handle = utils.SafePopen( self._tsserver_executable, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, env = environ ) self._tsserver_is_running.set() utils.StartThread( self._SetServerVersion )
def FindRacerdBinary( user_options ): """ Find path to racerd binary This function prefers the 'racerd_binary_path' value as provided in user_options if available. It then falls back to ycmd's racerd build. If that's not found, attempts to use racerd from current path. """ racerd_user_binary = user_options.get( 'racerd_binary_path' ) if racerd_user_binary: # The user has explicitly specified a path. if os.path.isfile( racerd_user_binary ): return racerd_user_binary LOGGER.warning( 'User-provided racerd_binary_path does not exist' ) if os.path.isfile( RACERD_BINARY_RELEASE ): return RACERD_BINARY_RELEASE # We want to support using the debug binary for the sake of debugging; also, # building the release version on Travis takes too long. if os.path.isfile( RACERD_BINARY_DEBUG ): LOGGER.warning( 'Using racerd DEBUG binary; performance will suffer!' ) return RACERD_BINARY_DEBUG return utils.PathToFirstExistingExecutable( [ 'racerd' ] )
def _ChooseOmnisharpPort( self ): if not self._omnisharp_port: if self._desired_omnisharp_port: self._omnisharp_port = int( self._desired_omnisharp_port ) else: self._omnisharp_port = utils.GetUnusedLocalhostPort() LOGGER.info( 'using port %s', self._omnisharp_port )
def RunCompleterCommand(): LOGGER.info( 'Received command request' ) request_data = RequestWrap( request.json ) completer = _GetCompleterForRequestData( request_data ) return _JsonResponse( completer.OnUserCommand( request_data[ 'command_arguments' ], request_data ) )
def Keepalive( check_interval_seconds ): while True: time.sleep( check_interval_seconds ) LOGGER.debug( 'Keeping subservers alive' ) loaded_completers = _server_state.GetLoadedFiletypeCompleters() for completer in loaded_completers: completer.ServerIsHealthy()
def ShouldEnableTypeScriptCompleter(): tsserver = FindTSServer() if not tsserver: LOGGER.error( 'Not using TypeScript completer: TSServer not installed ' 'in %s', TSSERVER_DIR ) return False LOGGER.info( 'Using TypeScript completer with %s', tsserver ) return True
def _RestartServer( self ): LOGGER.debug( 'Restarting racerd' ) with self._server_state_lock: if self._ServerIsRunning(): self._StopServer() self._StartServer() LOGGER.debug( 'Racerd restarted' )
def _CurrentLine( self ): try: return self[ 'lines' ][ self[ 'line_num' ] - 1 ] except IndexError: LOGGER.exception( 'Client returned invalid line number %s ' 'for file %s. Assuming empty', self[ 'line_num' ], self[ 'filepath' ] ) return ''
def _GoToDefinition( self, request_data ): try: definition = self._GetResponse( '/find_definition', request_data ) return responses.BuildGoToResponse( definition[ 'file_path' ], definition[ 'line' ], definition[ 'column' ] + 1 ) except Exception: LOGGER.exception( 'Failed to find definition' ) raise RuntimeError( 'Can\'t jump to definition.' )
def _GetDoc( self, request_data ): try: definition = self._GetResponse( '/find_definition', request_data ) docs = [ definition[ 'context' ], definition[ 'docs' ] ] return responses.BuildDetailedInfoResponse( '\n---\n'.join( docs ) ) except Exception: LOGGER.exception( 'Failed to find definition' ) raise RuntimeError( 'Can\'t lookup docs.' )
def _GoTo( self, request_data ): try: return self._GoToDefinition( request_data ) except Exception: LOGGER.exception( 'Failed to jump to definition' ) try: return self._GoToDeclaration( request_data ) except Exception: LOGGER.exception( 'Failed to jump to declaration' ) raise RuntimeError( 'Can\'t jump to definition or declaration.' )
def FilterAndSortCandidates(): LOGGER.info( 'Received filter & sort request' ) # Not using RequestWrap because no need and the requests coming in aren't like # the usual requests we handle. request_data = request.json return _JsonResponse( FilterAndSortCandidatesWrap( request_data[ 'candidates' ], request_data[ 'sort_property' ], request_data[ 'query' ], _server_state.user_options[ 'max_num_candidates' ] ) )
def HandleNotificationInPollThread( self, notification ): if notification[ 'method' ] == 'language/status': message_type = notification[ 'params' ][ 'type' ] if message_type == 'Started': LOGGER.info( 'jdt.ls initialized successfully' ) self._server_init_status = notification[ 'params' ][ 'message' ] self._received_ready_message.set() elif not self._received_ready_message.is_set(): self._server_init_status = notification[ 'params' ][ 'message' ] super( JavaCompleter, self ).HandleNotificationInPollThread( notification )
def _WriteRequest( self, request ): """Write a request to TSServer stdin.""" serialized_request = utils.ToBytes( json.dumps( request ) + '\n' ) with self._write_lock: try: self._tsserver_handle.stdin.write( serialized_request ) self._tsserver_handle.stdin.flush() # IOError is an alias of OSError in Python 3. except ( AttributeError, IOError ): LOGGER.exception( SERVER_NOT_RUNNING_MESSAGE ) raise RuntimeError( SERVER_NOT_RUNNING_MESSAGE )
def _WatchdogMain( self ): while True: time.sleep( self._check_interval_seconds ) # We make sure we don't terminate if we skipped a wakeup time. If we # skipped a check, that means the machine probably went to sleep and the # client might still actually be up. In such cases, we give it one more # wait interval to contact us before we die. if ( self._TimeSinceLastRequest() > self._idle_suicide_seconds and self._TimeSinceLastWakeup() < 2 * self._check_interval_seconds ): LOGGER.info( 'Shutting down server due to inactivity' ) ServerShutdown() self._UpdateLastWakeupTime()
def _AddIdentifier( self, identifier, request_data ): filetype = request_data[ 'first_filetype' ] filepath = request_data[ 'filepath' ] if not filetype or not filepath or not identifier: return vector = ycm_core.StringVector() vector.append( ToCppStringCompatible( identifier ) ) LOGGER.info( 'Adding ONE buffer identifier for file: %s', filepath ) self._completer.AddIdentifiersToDatabase( vector, ToCppStringCompatible( filetype ), ToCppStringCompatible( filepath ) )
def _GetSettings( self, module, client_data ): # We don't warn the user if no extra conf file is found. if module: if hasattr( module, 'Settings' ): settings = module.Settings( language = 'python', client_data = client_data ) if settings is not None: return settings LOGGER.debug( 'No Settings function defined in %s', module.__file__ ) return { # NOTE: this option is only kept for backward compatibility. Setting the # Python interpreter path through the extra conf file is preferred. 'interpreter_path': self.user_options[ 'python_binary_path' ] }
def _SolutionTestCheckHeuristics( candidates, tokens, i ): """ Test if one of the candidate files stands out """ path = os.path.join( *tokens[ : i + 1 ] ) selection = None # if there is just one file here, use that if len( candidates ) == 1 : selection = os.path.join( path, candidates[ 0 ] ) LOGGER.info( 'Selected solution file %s as it is the first one found', selection ) # there is more than one file, try some hints to decide # 1. is there a solution named just like the subdirectory with the source? if ( not selection and i < len( tokens ) - 1 and u'{0}.sln'.format( tokens[ i + 1 ] ) in candidates ): selection = os.path.join( path, u'{0}.sln'.format( tokens[ i + 1 ] ) ) LOGGER.info( 'Selected solution file %s as it matches source subfolder', selection ) # 2. is there a solution named just like the directory containing the # solution? if not selection and u'{0}.sln'.format( tokens[ i ] ) in candidates : selection = os.path.join( path, u'{0}.sln'.format( tokens[ i ] ) ) LOGGER.info( 'Selected solution file %s as it matches containing folder', selection ) if not selection: LOGGER.error( 'Could not decide between multiple solution files:\n%s', candidates ) return selection
def GetFileContents( request_data, filename ): """Returns the contents of the absolute path |filename| as a unicode string. If the file contents exist in |request_data| (i.e. it is open and potentially modified/dirty in the user's editor), then it is returned, otherwise the file is read from disk (assuming a UTF-8 encoding) and its contents returned.""" file_data = request_data[ 'file_data' ] if filename in file_data: return ToUnicode( file_data[ filename ][ 'contents' ] ) try: return ToUnicode( ReadFile( filename ) ) except IOError: LOGGER.exception( 'Error reading file %s', filename ) return ''
def ShouldEnableClangdCompleter( user_options ): third_party_clangd = Get3rdPartyClangd() # User disabled clangd explicitly. if user_options[ 'use_clangd' ].lower() == 'never': return False # User haven't downloaded clangd and use_clangd is in auto mode. if not third_party_clangd and user_options[ 'use_clangd' ].lower() == 'auto': return False clangd_command = GetClangdCommand( user_options, third_party_clangd ) if not clangd_command: LOGGER.warning( 'Not using clangd: unable to find clangd binary' ) return False LOGGER.info( 'Using clangd from %s', clangd_command ) return True
def ShouldEnableClangdCompleter( user_options ): """Checks whether clangd should be enabled or not. - Returns True iff an up-to-date binary exists either in `clangd_binary_path` or in third party folder and `use_clangd` is not set to `0`. """ # User disabled clangd explicitly. if not user_options[ 'use_clangd' ]: return False clangd_command = GetClangdCommand( user_options ) if not clangd_command: return False LOGGER.info( 'Computed Clangd command: %s', clangd_command ) return True
def _GetSysPath( self, request_data, environment ): settings = { 'sys_path': [] } settings.update( self._SettingsForRequest( request_data ) ) settings[ 'interpreter_path' ] = environment.executable settings[ 'sys_path' ].extend( environment.get_sys_path() ) filepath = request_data[ 'filepath' ] module = extra_conf_store.ModuleForSourceFile( filepath ) # We don't warn the user if no extra conf file is found. if module: if hasattr( module, 'PythonSysPath' ): return module.PythonSysPath( **settings ) LOGGER.debug( 'No PythonSysPath function defined in %s', module.__file__ ) return settings[ 'sys_path' ]
def ServerIsHealthy( self ): """ Check if racerd is alive AND ready to serve requests. """ if not self._ServerIsRunning(): LOGGER.debug( 'Racerd not running' ) return False try: self._GetResponse( '/ping', method = 'GET' ) return True # Do NOT make this except clause more generic! If you need to catch more # exception types, list them all out. Having `Exception` here caused FORTY # HOURS OF DEBUGGING. except requests.exceptions.ConnectionError: LOGGER.exception( 'Failed to connect to racerd' ) return False
def _PathToLauncherJar(): # The file name changes between version of eclipse, so we use a glob as # recommended by the language server developers. There should only be one. launcher_jars = glob.glob( os.path.abspath( os.path.join( LANGUAGE_SERVER_HOME, 'plugins', 'org.eclipse.equinox.launcher_*.jar' ) ) ) LOGGER.debug( 'Found launchers: %s', launcher_jars ) if not launcher_jars: return None return launcher_jars[ 0 ]
def wrapper( *args, **kwargs ): if not HostHeaderCorrect( request ): LOGGER.info( 'Dropping request with bad Host header' ) abort( requests.codes.unauthorized, 'Unauthorized, received bad Host header.' ) return body = ToBytes( request.body.read() ) if not RequestAuthenticated( request.method, request.path, body, self._hmac_secret ): LOGGER.info( 'Dropping request with bad HMAC' ) abort( requests.codes.unauthorized, 'Unauthorized, received bad HMAC.' ) return body = callback( *args, **kwargs ) SetHmacHeader( body, self._hmac_secret ) return body
def _FilterUnchangedTagFiles( self, tag_files ): for tag_file in tag_files: try: current_mtime = os.path.getmtime( tag_file ) except Exception: LOGGER.exception( 'Error while getting %s last modification time', tag_file ) continue last_mtime = self._tags_file_last_mtime[ tag_file ] # We don't want to repeatedly process the same file over and over; we only # process if it's changed since the last time we looked at it if current_mtime <= last_mtime: continue self._tags_file_last_mtime[ tag_file ] = current_mtime yield tag_file
def ResolveFixit(): LOGGER.info('Received resolve_fixit request') request_data = RequestWrap(request.json) completer = _GetCompleterForRequestData(request_data) return _JsonResponse(completer.ResolveFixit(request_data))
def _HasBinary(binary): binary_path = FindBinary(binary, user_options) if not binary_path: LOGGER.error('%s binary not found', binary_path) return binary_path
def _LogLevel(): return 'verbose' if LOGGER.isEnabledFor(logging.DEBUG) else 'normal'
def StartServer(self, request_data, project_directory=None, wipe_workspace=False, wipe_config=False): with self._server_state_mutex: LOGGER.info('Starting jdt.ls Language Server...') if project_directory: self._java_project_dir = project_directory else: self._java_project_dir = _FindProjectDir( os.path.dirname(request_data['filepath'])) self._workspace_path = _WorkspaceDirForProject( self._workspace_root_path, self._java_project_dir, self._use_clean_workspace) if not self._use_clean_workspace and wipe_workspace: if os.path.isdir(self._workspace_path): LOGGER.info('Wiping out workspace {0}'.format( self._workspace_path)) shutil.rmtree(self._workspace_path) self._launcher_config = _LauncherConfiguration( self._workspace_root_path, wipe_config) command = [ PATH_TO_JAVA, '-Dfile.encoding=UTF-8', '-Declipse.application=org.eclipse.jdt.ls.core.id1', '-Dosgi.bundles.defaultStartLevel=4', '-Declipse.product=org.eclipse.jdt.ls.core.product', '-Dlog.level=ALL', '-jar', self._launcher_path, '-configuration', self._launcher_config, '-data', self._workspace_path, ] LOGGER.debug('Starting java-server with the following command: %s', command) self._server_stderr = utils.CreateLogfile('jdt.ls_stderr_') with utils.OpenForStdHandle(self._server_stderr) as stderr: self._server_handle = utils.SafePopen(command, stdin=PIPE, stdout=PIPE, stderr=stderr) self._connection = ( language_server_completer.StandardIOLanguageServerConnection( self._server_handle.stdin, self._server_handle.stdout, self.GetDefaultNotificationHandler())) self._connection.Start() try: self._connection.AwaitServerConnection() except language_server_completer.LanguageServerConnectionTimeout: LOGGER.error('jdt.ls failed to start, or did not connect ' 'successfully') self.Shutdown() return False LOGGER.info('jdt.ls Language Server started') return True
def SignatureHelp_MultipleSignatures_test(app): filepath = PathToTestFile('testy', 'ContinuousTest.cs') contents = ReadFile(filepath) request = BuildRequest(line_num=18, column_num=15, filetypes=['cs'], filepath=filepath, contents=contents) with WrapOmniSharpServer(app, filepath): response = app.post_json('/signature_help', request).json LOGGER.debug('response = %s', response) assert_that( response, has_entries({ 'errors': empty(), 'signature_help': has_entries({ 'activeSignature': 0, 'activeParameter': 0, 'signatures': contains_exactly( SignatureMatcher( 'void ContinuousTest.Overloaded(int i, int a)', [ ParameterMatcher(31, 36), ParameterMatcher(38, 43) ]), SignatureMatcher( 'void ContinuousTest.Overloaded(string s)', [ParameterMatcher(31, 39)]), ) }) })) request['column_num'] = 20 with WrapOmniSharpServer(app, filepath): response = app.post_json('/signature_help', request).json LOGGER.debug('response = %s', response) assert_that( response, has_entries({ 'errors': empty(), 'signature_help': has_entries({ 'activeSignature': 0, 'activeParameter': 1, 'signatures': contains_exactly( SignatureMatcher( 'void ContinuousTest.Overloaded(int i, int a)', [ ParameterMatcher(31, 36), ParameterMatcher(38, 43) ]), SignatureMatcher( 'void ContinuousTest.Overloaded(string s)', [ParameterMatcher(31, 39)]), ) }) }))
def Shutdown(): LOGGER.info('Received shutdown request') ServerShutdown() return _JsonResponse(True)
def IgnoreExtraConfFile(): LOGGER.info('Received extra conf ignore request') request_data = RequestWrap(request.json, validate=False) extra_conf_store.Disable(request_data['filepath']) return _JsonResponse(True)
def Shutdown( self ): LOGGER.debug( 'Shutting down Tern server' ) self._StopServer()
def LoadExtraConfFile(): LOGGER.info('Received extra conf load request') request_data = RequestWrap(request.json, validate=False) extra_conf_store.Load(request_data['filepath'], force=True) return _JsonResponse(True)
def GetDetailedDiagnostic(): LOGGER.info('Received detailed diagnostic request') request_data = RequestWrap(request.json) completer = _GetCompleterForRequestData(request_data) return _JsonResponse(completer.GetDetailedDiagnostic(request_data))
def DefinedSubcommands(): LOGGER.info('Received defined subcommands request') completer = _GetCompleterForRequestData(RequestWrap(request.json)) return _JsonResponse(completer.DefinedSubcommands())
def FiletypeCompletionAvailable(): LOGGER.info('Received filetype completion available request') return _JsonResponse( _server_state.FiletypeCompletionAvailable( RequestWrap(request.json)['filetypes']))
def _StartServer( self, request_data ): with self._server_state_mutex: if self._server_started: return self._server_started = True LOGGER.info( 'Starting Tern server...' ) self._SetServerProjectFileAndWorkingDirectory( request_data ) self._server_port = utils.GetUnusedLocalhostPort() command = [ PATH_TO_NODE, PATH_TO_TERN_BINARY, '--port', str( self._server_port ), '--host', SERVER_HOST, '--persistent', '--no-port-file' ] if LOGGER.isEnabledFor( logging.DEBUG ): command.append( '--verbose' ) LOGGER.debug( 'Starting tern with the following command: %s', command ) self._server_stdout = utils.CreateLogfile( LOGFILE_FORMAT.format( port = self._server_port, std = 'stdout' ) ) self._server_stderr = utils.CreateLogfile( LOGFILE_FORMAT.format( port = self._server_port, std = 'stderr' ) ) # We need to open a pipe to stdin or the Tern server is killed. # See https://github.com/ternjs/tern/issues/740#issuecomment-203979749 # For unknown reasons, this is only needed on Windows and for Python # 3.4+ on other platforms. with utils.OpenForStdHandle( self._server_stdout ) as stdout: with utils.OpenForStdHandle( self._server_stderr ) as stderr: self._server_handle = utils.SafePopen( command, stdin = PIPE, stdout = stdout, stderr = stderr, cwd = self._server_working_dir ) if self._ServerIsRunning(): LOGGER.info( 'Tern Server started with pid %d listening on port %d', self._server_handle.pid, self._server_port ) LOGGER.info( 'Tern Server log files are %s and %s', self._server_stdout, self._server_stderr ) self._do_tern_project_check = True else: LOGGER.warning( 'Tern server did not start successfully' )