async def _push_heartbeat(self, is_offline: bool = False): if is_offline: await self._redis.hdel(constants.WORKER_KEY, self.worker_name) else: value = { "queue": self.queue, "is_timer": isinstance(self, TimerWorker), "ms": timestamp_ms_now(), } await self._redis.hset(constants.WORKER_KEY, self.worker_name, value=json.dumps(value))
async def delay( self, args: Optional[Tuple[Any, ...]] = None, kwargs: Optional[Dict[str, Any]] = None, job_id: str = None, countdown: Union[float, datetime.timedelta] = 0, eta: Optional[datetime.datetime] = None, expire: Optional[Union[float, datetime.datetime]] = None, job_retry: int = 0, job_retry_after: int = 60, ) -> Job: if not job_id: job_id = uuid4().hex if countdown: defer_ts = to_ms_timestamp(countdown) elif eta: defer_ts = to_ms_timestamp(eta) else: defer_ts = timestamp_ms_now() expire_time = None expires = expire or self.expire if expires: expire_time = ms_to_datetime(to_ms_timestamp(expires)) job = await Job.get_or_none(job_id=job_id) if job: logger.warning(f"Job {job_id} exists") return job job = Job( task=self.function.__name__, args=args, kwargs=kwargs, job_retry=job_retry or self.job_retry, queue=self.queue, job_id=job_id, expire_time=expire_time, enqueue_time=timezone.now(), job_retry_after=job_retry_after, ) if not eta and not countdown: job.status = JobStatus.queued await job.save() await self.rearq.redis.xadd(self.queue, {"job_id": job_id}) else: job.status = JobStatus.deferred await job.save() await self.rearq.redis.zadd(DELAY_QUEUE, defer_ts, f"{self.queue}:{job_id}") return job
async def _poll_iteration(self): """ get delay task and put to queue :return: """ redis = self._redis now = timestamp_ms_now() p = redis.pipeline() for queue in self.rearq.delay_queues: p.zrangebyscore(queue, offset=0, count=self.queue_read_limit, max=now) jobs_id_list = await p.execute() p = redis.pipeline() execute = False for jobs_id_info in jobs_id_list: for job_id_info in jobs_id_info: execute = True separate = job_id_info.rindex(":") queue, job_id = job_id_info[:separate], job_id_info[separate + 1 :] # noqa: p.xadd(queue, {"job_id": job_id}) queue = self.rearq.get_delay_queue(job_id_info) p.zrem(queue, job_id_info) execute and await p.execute()
async def run_cron(self): """ run cron task :return: """ redis = self._redis cron_tasks = CronTask.get_cron_tasks() p = redis.pipeline() execute = False jobs = [] for function, task in cron_tasks.items(): if timestamp_ms_now() >= task.next_run: execute = True job_id = uuid4().hex if task.function == check_pending_msgs: asyncio.ensure_future( check_pending_msgs(task, task.queue, self.group_name, self.job_timeout) ) else: logger.info(f"{task.function.__name__}()") jobs.append( Job( task=function, job_retry=self.job_retry, queue=task.queue, job_id=job_id, enqueue_time=timezone.now(), job_retry_after=self.job_retry_after, status=JobStatus.queued, ) ) p.xadd(task.queue, {"job_id": job_id}) self.jobs_complete += 1 task.set_next() if jobs: await Job.bulk_create(jobs) if execute: await p.execute()
async def run_job(self, queue: str, msg_id: str, job: Job): if job.expire_time and job.expire_time > timezone.now(): logger.warning(f"job {job.job_id} is expired, ignore") job.status = JobStatus.expired await job.save(update_fields=["status"]) return job_id = job.job_id job_result = JobResult( msg_id=msg_id, job=job, worker=self.worker_name, start_time=timezone.now() ) task = self._task_map.get(job.task) if not task: logger.warning(f"job {job_id}, task {job.task} not found") job_result.result = "task not found" await job_result.save() return job_result ref = f"{job_id}:{job.task}" start_ms = timestamp_ms_now() logger.info( "%6.2fs → %s(%s)%s" % ( (start_ms - to_ms_timestamp(job.enqueue_time)) / 1000, ref, args_to_string(job.args, job.kwargs), f" try={job.job_retries}" if job.job_retries > 1 else "", ) ) try: async with async_timeout.timeout(self.job_timeout): if task.bind: result = await task.function(task, *(job.args or []), **(job.kwargs or {})) else: result = await task.function(*(job.args or []), **(job.kwargs or {})) job_result.success = True job_result.finish_time = timezone.now() job.status = JobStatus.success logger.info("%6.2fs ← %s ● %s" % ((timestamp_ms_now() - start_ms) / 1000, ref, result)) self.jobs_complete += 1 except Exception as e: job_result.finish_time = timezone.now() self.jobs_failed += 1 result = f"Run task error in NO.{job.job_retries} times, exc: {e}, retry after {self.job_retry_after} seconds" logger.error("%6.2fs ← %s ● %s" % ((timestamp_ms_now() - start_ms) / 1000, ref, result)) if job.job_retries >= job.job_retry: t = (timestamp_ms_now() - to_ms_timestamp(job.enqueue_time)) / 1000 logger.error("%6.2fs ! %s max retries %d exceeded" % (t, ref, job.job_retry)) job.status = JobStatus.failed else: job.status = JobStatus.deferred job.job_retries = F("job_retries") + 1 await self.rearq.zadd(to_ms_timestamp(self.job_retry_after), f"{queue}:{job_id}") finally: await self._xack(queue, msg_id) await job.save(update_fields=["status", "job_retries"]) job_result.result = result await job_result.save() return job_result