def join(self): while self._nprocessed < self._nthreads: tid, arg, exc, result = self.result_queue.get() del self._threads[tid] if exc: raise SphinxParallelError(*result) self._result_funcs.pop(tid)(arg, result) self._nprocessed += 1 # there shouldn't be any threads left... for t in self._threads.values(): t.join()
def _join_one(self): for tid, pipe in iteritems(self._precvs): if pipe.poll(): exc, result = pipe.recv() if exc: raise SphinxParallelError(*result) self._result_funcs.pop(tid)(self._args.pop(tid), result) self._procs[tid].join() self._pworking -= 1 break else: time.sleep(0.02) while self._precvsWaiting and self._pworking < self.nproc: newtid, newprecv = self._precvsWaiting.popitem() self._precvs[newtid] = newprecv self._procs[newtid].start() self._pworking += 1
def _join_one(self): # type: () -> None for tid, pipe in self._precvs.items(): if pipe.poll(): exc, logs, result = pipe.recv() if exc: raise SphinxParallelError(*result) for log in logs: logger.handle(log) self._result_funcs.pop(tid)(self._args.pop(tid), result) self._procs[tid].join() self._precvs.pop(tid) self._pworking -= 1 break else: time.sleep(0.02) while self._precvsWaiting and self._pworking < self.nproc: newtid, newprecv = self._precvsWaiting.popitem() self._precvs[newtid] = newprecv self._procs[newtid].start() self._pworking += 1
def add_task(self, task_func, arg=None, result_func=None): tid = self._taskid self._taskid += 1 self._semaphore.acquire() thread = threading.Thread(target=self._process_thread, args=(tid, task_func, arg)) thread.setDaemon(True) thread.start() self._nthreads += 1 self._threads[tid] = thread self._result_funcs[tid] = result_func or (lambda *x: None) # try processing results already in parallel try: tid, arg, exc, result = self.result_queue.get(False) except queue.Empty: pass else: del self._threads[tid] if exc: raise SphinxParallelError(*result) self._result_funcs.pop(tid)(arg, result) self._nprocessed += 1
def _join_one(self) -> bool: joined_any = False for tid, pipe in self._precvs.items(): if pipe.poll(): exc, logs, result = pipe.recv() if exc: raise SphinxParallelError(*result) for log in logs: logger.handle(log) self._result_funcs.pop(tid)(self._args.pop(tid), result) self._procs[tid].join() self._precvs.pop(tid) self._pworking -= 1 joined_any = True break while self._precvsWaiting and self._pworking < self.nproc: newtid, newprecv = self._precvsWaiting.popitem() self._precvs[newtid] = newprecv self._procs[newtid].start() self._pworking += 1 return joined_any