async def _run_analysis(self, analysis): request, stream = analysis for index, value in enumerate(self._requests): if value is not None: ex_request, ex_stream = value if (request.instanceId == ex_request.instanceId and request.analysisId == ex_request.analysisId): break else: for index, value in enumerate(self._requests): if value is None: break else: raise QueueFull() try: log.debug('%s %s on %s', 'running', req_str(request), index) self._requests[index] = (request, stream) await self._restart_task.join() # wait if engines are restarting await self._engines[index].run(request, stream) self._requests[index] = None log.debug('%s %s', 'completed', req_str(request)) except CancelledError: log.debug('%s %s', 'cancelled', req_str(request)) except Exception as e: log.exception(e)
async def add(self, tasks: Tuple[TTask, ...]) -> None: """ add() will insert as many tasks as can be inserted until the queue fills up. Then it will pause until the queue is no longer full, and continue adding tasks. It will finally return when all tasks have been inserted. """ if not isinstance(tasks, tuple): raise ValidationError( f"must pass a tuple of tasks to add(), but got {tasks!r}") already_pending = self._tasks.intersection(tasks) if already_pending: raise ValidationError( f"Duplicate tasks detected: {already_pending!r} are already present in the queue" ) # make sure to insert the highest-priority items first, in case queue fills up remaining = tuple( sorted((self._order_fn(task), task) for task in tasks)) while remaining: num_tasks = len(self._tasks) if self._maxsize <= 0: # no cap at all, immediately insert all tasks open_slots = len(remaining) elif num_tasks < self._maxsize: # there is room to add at least one more task open_slots = self._maxsize - num_tasks else: # wait until there is room in the queue await self._full_lock.acquire() # the current number of tasks has changed, can't reuse num_tasks num_tasks = len(self._tasks) open_slots = self._maxsize - num_tasks queueing, remaining = remaining[:open_slots], remaining[ open_slots:] for task in queueing: # There will always be room in _open_queue until _maxsize is reached try: self._open_queue.put_nowait(task) except QueueFull as exc: task_idx = queueing.index(task) qsize = self._open_queue.qsize() raise QueueFull( f'TaskQueue unsuccessful in adding task {task[1]!r} because qsize={qsize}, ' f'num_tasks={num_tasks}, maxsize={self._maxsize}, open_slots={open_slots}, ' f'num queueing={len(queueing)}, len(_tasks)={len(self._tasks)}, task_idx=' f'{task_idx}, queuing={queueing}, original msg: {exc}', ) unranked_queued = tuple(task for _rank, task in queueing) self._tasks.update(unranked_queued) if self._full_lock.locked() and len(self._tasks) < self._maxsize: self._full_lock.release()
def put_nowait(self, item: T) -> None: """Put an *item* into the queue without blocking. If no free slot is immediately available, raise :exc:`asyncio.QueueFull`. """ if self.full(): raise QueueFull() self._put(item) self._finished.clear() self._wakeup_next(self._getters)
def get_nowait(self, max_results: int = None) -> Tuple[int, Tuple[TTask, ...]]: """ Get pending tasks. If no tasks are pending, raise an exception. :param max_results: return up to this many pending tasks. If None, return all pending tasks. :return: (batch_id, tasks to attempt) :raise ~asyncio.QueueFull: if no tasks are available """ if self._open_queue.empty(): raise QueueFull("No tasks are available to get") else: pending_tasks = self._get_nowait(max_results) # Generate a pending batch of tasks, so uncompleted tasks can be inferred next_id = next(self._id_generator) self._in_progress[next_id] = pending_tasks return (next_id, pending_tasks)