class TaskExecutor: def __init__(self, db, task_instance, task_param): self.task_instance = task_instance self.task_param = task_param self.db = db # invoke log self.invoke_log_map = {} self.jobs = {} logging.config.fileConfig("../logger.ini") self.logger = logging.getLogger("taskExecutor") invoke_count = int(self.task_param.get_invoke_args()['invoke_count']) executors = { 'default': { 'type': 'threadpool', 'max_workers': invoke_count + 1 } } self.scheduler = BlockingScheduler(executors=executors) def execute(self): self.scheduler.add_listener( self._job_listener, events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR | events.EVENT_JOB_ADDED | events.EVENT_JOB_MISSED) # invoke_log_map up server self.scheduler.add_job(self._invoke_break_heart, "interval", seconds=2) try: self.scheduler.start() except Exception as e: print(e) self.scheduler.shutdown(wait=True) def _job_listener(self, ev): """ 监听job的事件,job完成后再发起下次调用,对于异常也要处理 :param ev: :return: """ if self.task_instance.status == 'off': return if ev.code == events.EVENT_JOB_ADDED: self.jobs[ev.job_id] = self.scheduler.get_job(ev.job_id) elif ev.code == events.EVENT_JOB_EXECUTED or ev.code == events.EVENT_JOB_ERROR: if ev.code == events.EVENT_JOB_ERROR: self.logger.error(ev.exception) self.logger.error(ev.traceback) job = self.jobs[ev.job_id] self.scheduler.add_job( job.func, next_run_time=(datetime.datetime.now() + datetime.timedelta(seconds=1)), id=ev.job_id, args=job.args) else: pass def _invoke_break_heart(self): if self.task_instance.status == 'off': jobs = self.scheduler.get_jobs() for job in jobs: try: job.pause() job.remove() except Exception as e: self.logger.error(e) self.db.save_task_logs(self.invoke_log_map)
class EventScheduler: def __init__(self, reporter: ResultReporter): self.reporter = reporter self.scheduler = BlockingScheduler() self.events = list() log_path = static_setting.settings["CaseRunner"].log_path log_file = os.path.join(log_path, "event_scheduler_log.log") self.log = logger.register("EventScheduler", filename=log_file, for_test=True) self.scheduler.add_listener(self._event_listen, EVENT_JOB_EXECUTED) def add_event(self, event, package, args, is_background, need_lock, start_time, interval=5, loop_count=1, description=""): m = importlib.import_module(package) event_cls = getattr(m, event) new_event = event_cls(description, log=self.log) new_event.need_lock = need_lock new_event.back_ground = is_background new_event.arguments = args new_event.interval = interval new_event.loop_count = loop_count # 生成一个STEP 的节点给Event操作 new_event.reporter = self.reporter.add_event_group(f"Event: {event}") if is_background: new_event.job = self.scheduler.add_job(new_event.run, "interval", seconds=interval, start_date=start_time, id=f"{event}{uuid.uuid4()}") else: new_event.job = self.scheduler.add_job(new_event.run, "date", run_date=start_time, id=f"{event}{uuid.uuid4()}") self.events.append(new_event) def remove_event(self, event_id): job = self.scheduler.get_job(event_id) if job: event_to_remove = None for event in self.events: if event.job == job: event_to_remove = event self.scheduler.remove_job(event_id) break if event_to_remove: self.events.remove(event_to_remove) def start(self): self.scheduler.start() def _event_listen(self, job): for event in self.events: if event.job.id == job.job_id: if event.back_ground: return else: if event.loop_count == 1: return delta = datetime.timedelta(seconds=event.interval) next_date = job.scheduled_run_time + delta event.job = self.scheduler.add_job( event.run, "date", run_date=next_date, id=f"{event.name}{uuid.uuid4()}") event.loop_count -= 1 return
class JobLauncher(object): def __init__(self, background=False, deamon=True, **kwargs): logging.basicConfig(format="[%(asctime)s] %(message)s", atefmt="%Y-%m-%d %H:%M:%S") logging.getLogger('apscheduler').setLevel(logging.DEBUG) if background: self.sched = BackgroundScheduler(deamon=deamon) # background else: self.sched = BlockingScheduler(deamon=deamon) # foreground # TODO: Read from configuration file. self.sched.configure( jobstores={ # "sqlite": SQLAlchemyJobStore(url='sqlite:///app/database/example.db'), # "default": MemoryJobStore() "default": SQLAlchemyJobStore(url='sqlite:///app/database/example.db') }, executors={ 'default': ThreadPoolExecutor(20), 'processpool': ProcessPoolExecutor(5) }, job_defaults={ 'coalesce': False, 'max_instances': 3 }, timezone=get_localzone() # Asia/Seoul ) self.retried = 0 self.logger = logging.getLogger('apscheduler') super(JobLauncher, self).__init__() def start(self): try: if self.sched.state != STATE_RUNNING: self.printJobs(jobstore='default') started = self.sched.start() except ConflictingIdError as e: traceback.print_exc() except KeyboardInterrupt as e: traceback.print_exc() finally: pass # Remove all remained store. # self.sched.remove_all_jobs() # for job in self.getJobs(): # if job.pending: # job.pause() self.logger.info('Finished') self.logger.info(self.getJobs()) self.printJobs() def stop(self, wait=False): if self.sched.state == STATE_RUNNING: self.sched.shutdown(wait=wait) def resume(self): if self.sched.state == STATE_RUNNING: self.sched.resume() def pause(self): if self.sched.state == STATE_RUNNING: self.sched.pause() def addListener(self, listener, types): self.sched.add_listener(listener, types) def addJob(self, job, **kwargs): execute, trigger, options = job.build(**kwargs) added_job = self.sched.add_job(execute, trigger, **options) self.printJobs() return added_job def getJob(self, job_id): return self.sched.get_job(job_id) def getJobs(self, jobstore=None): return self.sched.get_jobs(jobstore=jobstore) def removeJob(self, job_id, jobstore=None): return self.sched.remove_job(job_id, jobstore=jobstore) def removeAllJob(self, jobstore=None): return self.sched.remove_all_jobs(jobstore=jobstore) def printJobs(self, jobstore=None, out=None): return self.sched.print_jobs(jobstore=jobstore, out=None) def getJobState(self, job_id=None, jobstore=None): state = list() if job_id is not None: job = self.sched.get_job(job_id, jobstore=jobstore) if job is not None: temp = dict() temp[job.id] = { "next_run_time": job.next_run_time, "state": job.pending, } state.append(temp) else: for job in self.sched.get_jobs(jobstore=jobstore): temp = dict() temp[job.id] = { "next_run_time": job.next_run_time, "state": job.pending, } state.append(temp) return state