def parallel(self, *tasklist): """Run tasks in parallel""" pids = [] old_log_filename = self._logger.global_filename for i, task in enumerate(tasklist): assert isinstance(task, (tuple, list)) self._logger.global_filename = old_log_filename + (".%d" % i) def task_func(): # stub out _record_indent with a process-local one base_record_indent = self._record_indent proc_local = self._job_state.property_factory( '_state', '_record_indent.%d' % os.getpid(), base_record_indent, namespace='client') self.__class__._record_indent = proc_local task[0](*task[1:]) pids.append(parallel.fork_start(self.resultdir, task_func)) old_log_path = os.path.join(self.resultdir, old_log_filename) old_log = open(old_log_path, "a") exceptions = [] for i, pid in enumerate(pids): # wait for the task to finish try: parallel.fork_waitfor(self.resultdir, pid) except Exception, e: exceptions.append(e) # copy the logs from the subtask into the main log new_log_path = old_log_path + (".%d" % i) if os.path.exists(new_log_path): new_log = open(new_log_path) old_log.write(new_log.read()) new_log.close() old_log.flush() os.remove(new_log_path)
def parallel(self, *tasklist): """Run tasks in parallel""" pids = [] old_log_filename = self.log_filename for i, task in enumerate(tasklist): assert isinstance(task, (tuple, list)) self.log_filename = old_log_filename + (".%d" % i) task_func = lambda: task[0](*task[1:]) pids.append(parallel.fork_start(self.resultdir, task_func)) old_log_path = os.path.join(self.resultdir, old_log_filename) old_log = open(old_log_path, "a") exceptions = [] for i, pid in enumerate(pids): # wait for the task to finish try: parallel.fork_waitfor(self.resultdir, pid) except Exception, e: exceptions.append(e) # copy the logs from the subtask into the main log new_log_path = old_log_path + (".%d" % i) if os.path.exists(new_log_path): new_log = open(new_log_path) old_log.write(new_log.read()) new_log.close() old_log.flush() os.remove(new_log_path)
def _forkwait(self, pid, timeout=None): """Wait for the given pid to complete @param pid (int) process id to wait for @param timeout (int) seconds to wait before timing out the process""" if timeout: logging.debug('Waiting for pid %d for %d seconds', pid, timeout) parallel.fork_waitfor_timed(self.resultdir, pid, timeout) else: logging.debug('Waiting for pid %d', pid) parallel.fork_waitfor(self.resultdir, pid) logging.info('pid %d completed', pid)
def _runtest(self, url, tag, args, dargs): try: l = lambda : test.runtest(self, url, tag, args, dargs) pid = parallel.fork_start(self.resultdir, l) parallel.fork_waitfor(self.resultdir, pid) except error.TestBaseException: # These are already classified with an error type (exit_status) raise except error.JobError: raise # Caught further up and turned into an ABORT. except Exception, e: # Converts all other exceptions thrown by the test regardless # of phase into a TestError(TestBaseException) subclass that # reports them with their full stack trace. raise error.UnhandledTestError(e)
def _runtest(self, url, tag, timeout, args, dargs): try: l = lambda : test.runtest(self, url, tag, args, dargs) pid = parallel.fork_start(self.resultdir, l) if timeout: logging.debug('Waiting for pid %d for %d seconds', pid, timeout) parallel.fork_waitfor_timed(self.resultdir, pid, timeout) else: parallel.fork_waitfor(self.resultdir, pid) except error.TestBaseException: # These are already classified with an error type (exit_status) raise except error.JobError: raise # Caught further up and turned into an ABORT. except Exception, e: # Converts all other exceptions thrown by the test regardless # of phase into a TestError(TestBaseException) subclass that # reports them with their full stack trace. raise error.UnhandledTestError(e)