示例#1
0
    def __init__(self, addr):
        self._pool = AdaptiveProcessPool(run_command)
        # Used to keep track of ResponseHelpers so that the right one can be
        # used when a job result is received from the pool.
        self._last_id = 0
        self._responders = {}
        # Used to keep track of whether the server is receiving requests.
        self._taking_work = Event()

        self.stats = defaultdict(int)
        self.get_time_stats = defaultdict(list)
        self.put_time_stats = defaultdict(list)

        self._read_stats()

        try:
            base_server.CommandServer.__init__(self, addr, CommandHandler)
        except:
            # We need to stop the job pool to avoid the process living forever
            # waiting for jobs.
            self._pool.stop()
            raise

        Thread(target=self.dispatch_results).start()
        Thread(target=self._watchdog).start()
示例#2
0
    def __init__(self, addr):
        self._pool = AdaptiveProcessPool(run_command)
        # Used to keep track of ResponseHelpers so that the right one can be
        # used when a job result is received from the pool.
        self._last_id = 0
        self._responders = {}
        # Used to keep track of whether the server is receiving requests.
        self._taking_work = Event()

        self.stats = defaultdict(int)
        self.get_time_stats = defaultdict(list)
        self.put_time_stats = defaultdict(list)

        self._read_stats()

        try:
            base_server.CommandServer.__init__(self, addr, CommandHandler)
        except:
            # We need to stop the job pool to avoid the process living forever
            # waiting for jobs.
            self._pool.stop()
            raise

        Thread(target=self.dispatch_results).start()
        Thread(target=self._watchdog).start()
示例#3
0
class CommandServer(base_server.CommandServer):
    '''
    Main sccache server.
    '''
    def __init__(self, addr):
        self._pool = AdaptiveProcessPool(run_command)
        # Used to keep track of ResponseHelpers so that the right one can be
        # used when a job result is received from the pool.
        self._last_id = 0
        self._responders = {}
        # Used to keep track of whether the server is receiving requests.
        self._taking_work = Event()

        self.stats = defaultdict(int)
        self.get_time_stats = defaultdict(list)
        self.put_time_stats = defaultdict(list)

        self._read_stats()

        try:
            base_server.CommandServer.__init__(self, addr, CommandHandler)
        except:
            # We need to stop the job pool to avoid the process living forever
            # waiting for jobs.
            self._pool.stop()
            raise

        Thread(target=self.dispatch_results).start()
        Thread(target=self._watchdog).start()

    def dispatch_job(self, request, responder):
        # Mark the server as not idle.
        self._taking_work.set()

        # Sanity checks on the requested command line
        cmd = request.get('cmd')
        if not cmd:
            raise ArgumentError('Command is either missing or empty')
        if not isinstance(cmd, list):
            raise TypeError('Expected list, got %s' % type(cmd))

        executable, args = cmd[0], cmd[1:]
        cwd = request.get('cwd')

        # Get a Compiler instance corresponding to that executable.
        # The cwd is necessary because sometimes the path to the executable is
        # relative.
        compiler = Compiler.from_path(executable, cwd)
        if not compiler:
            raise RuntimeError('%s is not a known compiler' % executable)

        # Parse the command line arguments in the main thread, this is fast
        # enough not to be a problem in practice, and avoids dispatching
        # compilations that can't be cached.
        parsed_args = None
        try:
            parsed_args = compiler.parse_arguments(args)
        except CannotCacheError:
            self.stats['non-cachable'] += 1
        except NotACompilationError:
            self.stats['non-compile'] += 1

        if not parsed_args:
            # Return status code -2 when the compiler result can't be cached
            # or when the compiler is invoked for something else than a
            # compilation.
            responder.respond(-2)
            return

        # Prepare job for run_command and send it to the job pool.
        self._last_id += 1
        job = {
            'id': self._last_id,
            'compiler': compiler,
            'args': args,
            'parsed_args': parsed_args,
            'cwd': cwd,
        }
        self._responders[self._last_id] = responder
        self._pool.add_job(job)

    def dispatch_results(self):
        # This runs in a dedicated thread.
        for result in self._pool.results():
            if not isinstance(result, dict):
                continue
            # Redispatch the results from the pool to the corresponding client.
            id = result.get('id')
            stats = result.get('stats')
            if stats:
                for key in ('dns', 'connect', 'response', 'size'):
                    value = stats.get(key, 0)
                    if id:
                        self.get_time_stats[key].append(value)
                    else:
                        self.put_time_stats[key].append(value)
            responder = self._responders.get(id)
            status = result.get('status')
            if status:
                self.stats[status] += 1
            if not responder:
                continue
            responder.respond(
                result.get('retcode', -1),
                result.get('stdout', ''),
                result.get('stderr', ''),
            )

    def _watchdog(self):
        # This runs in a dedicated thread.
        while not self.stopping:
            self._taking_work.clear()
            self._taking_work.wait(timeout=600)
            # If the server hasn't received a connection in the past 600
            # seconds, stop it.
            if not self._taking_work.is_set():
                self.stop(dump_stats=True)
                break

    def stop(self, dump_stats=False):
        if not self.stopping:
            base_server.CommandServer.stop(self)
            # If the watchdog is waiting for the taking_work event timeout,
            # trigger one now to unblock it and make it quit.
            self._taking_work.set()
            self._pool.stop()
            if dump_stats:
                self._dump_stats()

    def _stats_file_path(self):
        # Use the local cache storage directory if one is given.
        dir = os.environ.get('SCCACHE_DIR')
        if dir:
            try:
                ensure_dir(dir)
            except:
                dir = None
        if not dir:
            dir = os.path.dirname(FILE)

        return os.path.join(dir, 'sccache.stats')

    def _dump_stats(self):
        if (not self.stats and not self.get_time_stats
                and not self.put_time_stats):
            return

        try:
            stats_file = open(self._stats_file_path(), 'w')
        except:
            # Just don't dump anything if we couldn't open the stats file.
            return

        stats_file.write('%s\n%s\n%s\n' % (
            json.dumps(self.stats),
            json.dumps(self.get_time_stats),
            json.dumps(self.put_time_stats),
        ))
        stats_file.close()

    def _read_stats(self):
        stats = (
            self.stats,
            self.get_time_stats,
            self.put_time_stats,
        )
        try:
            stats_file = self._stats_file_path()
            with open(stats_file) as f:
                for n, line in enumerate(f):
                    stats[n % len(stats)].update(json.loads(line))
            os.remove(stats_file)
        except:
            pass
示例#4
0
class CommandServer(base_server.CommandServer):
    '''
    Main sccache server.
    '''
    def __init__(self, addr):
        self._pool = AdaptiveProcessPool(run_command)
        # Used to keep track of ResponseHelpers so that the right one can be
        # used when a job result is received from the pool.
        self._last_id = 0
        self._responders = {}
        # Used to keep track of whether the server is receiving requests.
        self._taking_work = Event()

        self.stats = defaultdict(int)
        self.get_time_stats = defaultdict(list)
        self.put_time_stats = defaultdict(list)

        self._read_stats()

        try:
            base_server.CommandServer.__init__(self, addr, CommandHandler)
        except:
            # We need to stop the job pool to avoid the process living forever
            # waiting for jobs.
            self._pool.stop()
            raise

        Thread(target=self.dispatch_results).start()
        Thread(target=self._watchdog).start()

    def dispatch_job(self, request, responder):
        # Mark the server as not idle.
        self._taking_work.set()

        # Sanity checks on the requested command line
        cmd = request.get('cmd')
        if not cmd:
            raise ArgumentError('Command is either missing or empty')
        if not isinstance(cmd, list):
            raise TypeError('Expected list, got %s' % type(cmd))

        executable, args = cmd[0], cmd[1:]
        cwd = request.get('cwd')

        # Get a Compiler instance corresponding to that executable.
        # The cwd is necessary because sometimes the path to the executable is
        # relative.
        compiler = Compiler.from_path(executable, cwd)
        if not compiler:
            raise RuntimeError('%s is not a known compiler' % executable)

        # Parse the command line arguments in the main thread, this is fast
        # enough not to be a problem in practice, and avoids dispatching
        # compilations that can't be cached.
        parsed_args = None
        try:
            parsed_args = compiler.parse_arguments(args)
        except CannotCacheError:
            LOG.exception('could not cache')
            self.stats['non-cachable'] += 1
        except NotACompilationError:
            LOG.info('not a compilation')
            self.stats['non-compile'] += 1

        if not parsed_args:
            # Return status code -2 when the compiler result can't be cached
            # or when the compiler is invoked for something else than a
            # compilation.
            responder.respond(-2)
            return

        # Prepare job for run_command and send it to the job pool.
        self._last_id += 1
        job = {
            'id': self._last_id,
            'compiler': compiler,
            'args': args,
            'parsed_args': parsed_args,
            'cwd': cwd,
        }
        self._responders[self._last_id] = responder
        self._pool.add_job(job)

    def dispatch_results(self):
        # This runs in a dedicated thread.
        for result in self._pool.results():
            if not isinstance(result, dict):
                continue
            # Redispatch the results from the pool to the corresponding client.
            id = result.get('id')
            stats = result.get('stats')
            if stats:
                for key in ('dns', 'connect', 'response', 'size'):
                    value = stats.get(key, 0)
                    if id:
                        self.get_time_stats[key].append(value)
                    else:
                        self.put_time_stats[key].append(value)
            responder = self._responders.get(id)
            status = result.get('status')
            if status:
                self.stats[status] += 1
            if not responder:
                continue
            responder.respond(
                result.get('retcode', -1),
                result.get('stdout', ''),
                result.get('stderr', ''),
            )

    def _watchdog(self):
        # This runs in a dedicated thread.
        while not self.stopping:
            self._taking_work.clear()
            self._taking_work.wait(timeout=600)
            # If the server hasn't received a connection in the past 600
            # seconds, stop it.
            if not self._taking_work.is_set():
                self.stop(dump_stats=True)
                break

    def stop(self, dump_stats=False):
        if not self.stopping:
            base_server.CommandServer.stop(self)
            # If the watchdog is waiting for the taking_work event timeout,
            # trigger one now to unblock it and make it quit.
            self._taking_work.set()
            self._pool.stop()
            if dump_stats:
                self._dump_stats()

    def _stats_file_path(self):
        # Use the local cache storage directory if one is given.
        dir = os.environ.get('SCCACHE_DIR')
        if dir:
            try:
                ensure_dir(dir)
            except:
                dir = None
        if not dir:
            dir = os.path.dirname(FILE)

        return os.path.join(dir, 'sccache.stats')

    def _dump_stats(self):
        if (not self.stats and not self.get_time_stats
                and not self.put_time_stats):
            return

        try:
            stats_file = open(self._stats_file_path(), 'w')
        except:
            # Just don't dump anything if we couldn't open the stats file.
            return

        stats_file.write('%s\n%s\n%s\n' % (
            json.dumps(self.stats),
            json.dumps(self.get_time_stats),
            json.dumps(self.put_time_stats),
        ))
        stats_file.close()

    def _read_stats(self):
        stats = (
            self.stats,
            self.get_time_stats,
            self.put_time_stats,
        )
        try:
            stats_file = self._stats_file_path()
            with open(stats_file) as f:
                for n, line in enumerate(f):
                    stats[n % len(stats)].update(json.loads(line))
            os.remove(stats_file)
        except:
            pass