def end(self): """This pants run is over, so stop tracking it. Note: If end() has been called once, subsequent calls are no-ops. """ if self._background_worker_pool: if self._aborted: self.log(Report.INFO, "Aborting background workers.") self._background_worker_pool.abort() else: self.log(Report.INFO, "Waiting for background workers to finish.") self._background_worker_pool.shutdown() self.end_workunit(self._background_root_workunit) SubprocPool.shutdown(self._aborted) # Run a dummy work unit to write out one last timestamp. with self.new_workunit("complete"): pass self.end_workunit(self._main_root_workunit) outcome = self._main_root_workunit.outcome() if self._background_root_workunit: outcome = min(outcome, self._background_root_workunit.outcome()) outcome_str = WorkUnit.outcome_string(outcome) log_level = RunTracker._log_levels[outcome] self.log(log_level, outcome_str) if self.run_info.get_info('outcome') is None: # If the goal is clean-all then the run info dir no longer exists, so ignore that error. self.run_info.add_info('outcome', outcome_str, ignore_errors=True) self.report.close() self.store_stats()
def shutdown_worker_pool(self): """Shuts down the SubprocPool. N.B. This exists only for internal use and to afford for fork()-safe operation in pantsd. :API: public """ SubprocPool.shutdown(self._aborted)
def end(self): """This pants run is over, so stop tracking it. Note: If end() has been called once, subsequent calls are no-ops. """ if self._background_worker_pool: if self._aborted: self.log(Report.INFO, "Aborting background workers.") self._background_worker_pool.abort() else: self.log(Report.INFO, "Waiting for background workers to finish.") self._background_worker_pool.shutdown() self.report.end_workunit(self._background_root_workunit) self._background_root_workunit.end() if self._foreground_worker_pool: if self._aborted: self.log(Report.INFO, "Aborting foreground workers.") self._foreground_worker_pool.abort() else: self.log(Report.INFO, "Waiting for foreground workers to finish.") self._foreground_worker_pool.shutdown() SubprocPool.shutdown(self._aborted) self.report.end_workunit(self._main_root_workunit) self._main_root_workunit.end() outcome = self._main_root_workunit.outcome() if self._background_root_workunit: outcome = min(outcome, self._background_root_workunit.outcome()) outcome_str = WorkUnit.outcome_string(outcome) log_level = WorkUnit.choose_for_outcome(outcome, Report.ERROR, Report.ERROR, Report.WARN, Report.INFO, Report.INFO) self.log(log_level, outcome_str) if self.run_info.get_info('outcome') is None: try: self.run_info.add_info('outcome', outcome_str) except IOError: pass # If the goal is clean-all then the run info dir no longer exists... self.report.close() self.upload_stats()
def subproc_map(self, f, items): """Map function `f` over `items` in subprocesses and return the result. :param f: A multiproc-friendly (importable) work function. :param args: A iterable of pickleable arguments to f. """ try: # Pool.map (and async_map().get() w/o timeout) can miss SIGINT. # See: http://stackoverflow.com/a/1408476, http://bugs.python.org/issue8844 # Instead, we map_async(...), wait *with a timeout* until ready, then .get() # NB: in 2.x, wait() with timeout wakes up often to check, burning CPU. Oh well. res = SubprocPool.foreground().map_async(f, items) while not res.ready(): res.wait(60) # Repeatedly wait for up to a minute. if not res.ready(): self.log.debug('subproc_map result still not ready...') return res.get() except KeyboardInterrupt: SubprocPool.shutdown(True) raise
def subproc_map(self, f, items): """Map function `f` over `items` in subprocesses and return the result. :param f: A multiproc-friendly (importable) work function. :param items: A iterable of pickleable arguments to f. """ try: # Pool.map (and async_map().get() w/o timeout) can miss SIGINT. # See: http://stackoverflow.com/a/1408476, http://bugs.python.org/issue8844 # Instead, we map_async(...), wait *with a timeout* until ready, then .get() # NB: in 2.x, wait() with timeout wakes up often to check, burning CPU. Oh well. res = SubprocPool.foreground().map_async(f, items) while not res.ready(): res.wait(60) # Repeatedly wait for up to a minute. if not res.ready(): self.log.debug('subproc_map result still not ready...') return res.get() except KeyboardInterrupt: SubprocPool.shutdown(True) raise