async def get_workers(request: Request, rearq: ReArq = Depends(get_rearq)): redis = rearq.redis workers_info = await redis.hgetall(constants.WORKER_KEY) workers = [] for worker_name, value in workers_info.items(): job_stat = (await JobResult.filter(worker=worker_name).annotate( count=Count("job_id")).group_by("job__status").values( "count", status="job__status", )) item = { "name": worker_name, "job_stat": {job["status"]: job["count"] for job in job_stat} } item.update(json.loads(value)) time = ms_to_datetime(item["ms"]) item["time"] = time item["is_offline"] = ( timezone.now() - time).seconds > constants.WORKER_HEARTBEAT_SECONDS + 10 workers.append(item) return templates.TemplateResponse("worker.html", { "request": request, "page_title": "worker", "workers": workers })
async def get_tasks(request: Request, rearq: ReArq = Depends(get_rearq)): task_map = rearq.task_map tasks = [] cron_tasks = [] for task_name, task in task_map.items(): item = { "name": task_name, "queue": task.queue, } job_result = await JobResult.filter(job__task=task_name).order_by("-id").first() if job_result: item["last_time"] = job_result.finish_time else: item["last_time"] = None if isinstance(task, CronTask): item["cron"] = task.cron task.set_next() item["next_time"] = ms_to_datetime(task.next_run) cron_tasks.append(item) else: tasks.append(item) return templates.TemplateResponse( "task.html", {"request": request, "page_title": "task", "tasks": tasks, "cron_tasks": cron_tasks}, )
async def delay( self, args: Optional[Tuple[Any, ...]] = None, kwargs: Optional[Dict[str, Any]] = None, job_id: str = None, countdown: Union[float, datetime.timedelta] = 0, eta: Optional[datetime.datetime] = None, expire: Optional[Union[float, datetime.datetime]] = None, job_retry: int = 0, job_retry_after: int = 60, ) -> Job: if not job_id: job_id = uuid4().hex if countdown: defer_ts = to_ms_timestamp(countdown) elif eta: defer_ts = to_ms_timestamp(eta) else: defer_ts = timestamp_ms_now() expire_time = None expires = expire or self.expire if expires: expire_time = ms_to_datetime(to_ms_timestamp(expires)) job = await Job.get_or_none(job_id=job_id) if job: logger.warning(f"Job {job_id} exists") return job job = Job( task=self.function.__name__, args=args, kwargs=kwargs, job_retry=job_retry or self.job_retry, queue=self.queue, job_id=job_id, expire_time=expire_time, enqueue_time=timezone.now(), job_retry_after=job_retry_after, ) if not eta and not countdown: job.status = JobStatus.queued await job.save() await self.rearq.redis.xadd(self.queue, {"job_id": job_id}) else: job.status = JobStatus.deferred await job.save() await self.rearq.redis.zadd(DELAY_QUEUE, defer_ts, f"{self.queue}:{job_id}") return job
async def _pre_run(self): async with await self._lock_manager.lock(constants.WORKER_KEY_LOCK): workers_info = await self._redis.hgetall(constants.WORKER_KEY) for worker_name, value in workers_info.items(): value = json.loads(value) time = ms_to_datetime(value["ms"]) is_offline = ( timezone.now() - time ).seconds > constants.WORKER_HEARTBEAT_SECONDS + 10 if value.get("is_timer") and not is_offline: msg = f"There is a timer worker `{worker_name}` already, you can only start one timer worker" logger.error(msg) raise UsageError(msg) else: await self._push_heartbeat()