class Scheduler(object): def __init__(self): self.scheduler = BlockingScheduler() self.logger = Logger("scheduler").getlog() self.aps_log = Logger("apscheduler").getlog() self.dboption = DBOption() self.config = ConfigUtil() def get_timetrigger_job(self, current): dstring = DateUtil.format_year_second(current) day = DateUtil.get_time_day(current) hour = DateUtil.get_time_hour(current) minute = DateUtil.get_time_minute(current) week_day = DateUtil.get_week_day(current) self.logger.info("获取start_hour:" + str(hour) + " start_minute:" + str(minute) + " 运行的 Job") time_jobs = self.dboption.get_time_trigger_job(hour, minute) if time_jobs is None or len(time_jobs) == 0: self.logger.info(dstring + " 没有需要运行的时间触发Job") return else: try: for job in time_jobs: job_name = job["job_name"] trigger_type = job["trigger_type"] record = 0 should_run = False if trigger_type == "day": # 每天运行 should_run = True elif trigger_type == "month": # 每月运行 start_day = job["start_day"] if int(start_day) == day: should_run = True elif trigger_type == "week": # 每周运行 start_day = job["start_day"] if int(start_day) == week_day: should_run = True if should_run: record = self.dboption.update_trigger_job_pending(current, job_name) if record == 1: self.logger.info("更新时间触发Job:" + job_name + " 状态为Pending") else: self.logger.error("更新时间触发Job :" + job_name + " 状态为Pending失败") else: self.logger.info("时间触发 Job:" + job_name + " 没有对应时间触发执行方式 trigger_type:" + str(trigger_type)) except Exception, e: self.logger.error(e) self.logger.error("处理时间触发Job异常")
class Executor(object): def __init__(self): self.logger = Logger("executor").getlog() self.aps_log = Logger("apscheduler").getlog() self.config = ConfigUtil() self.dboption = DBOption() self.process_running = {} self.scheduler = BlockingScheduler() self.monitor = Monitor() ''' 运行 t_etl_job_queue 中Pending状态的job ''' def run_queue_job_pending(self): self.logger.info("\n") self.logger.info("... interval run run_queue_job_pending ....") try: self.check_process_state() # 判断已有的进程状态 logpath = self.config.get("job.log.path") if logpath is None or len(logpath.strip()) == 0: raise Exception("can't find slave job.log.path") if not os.path.exists(logpath): os.makedirs(logpath) today = DateUtil.get_today() today_log_dir = logpath + "/" + today if not os.path.exists(today_log_dir): os.makedirs(today_log_dir) queue_job = self.dboption.get_queue_job_pending() if queue_job is not None: job_name = queue_job["job_name"] etl_job = self.dboption.get_job_info(job_name) job_status = etl_job["job_status"] job_retry_count = etl_job["retry_count"] run_number = queue_job["run_number"] if not self.check_should_run(job_name, job_status, job_retry_count, run_number): return logfile = today_log_dir + "/" + job_name + "_" + today + ".log." + str( run_number) bufsize = 0 logfile_handler = open(logfile, 'w', bufsize) python_bin = CommonUtil.python_bin(self.config) run_path = project_path + "/bin/" + "runcommand.py" child = subprocess.Popen(python_bin + [run_path, "-job", job_name], stdout=logfile_handler.fileno(), stderr=subprocess.STDOUT, shell=False) pid = child.pid if pid > 0: self.logger.info("创建子进程:" + str(pid) + " 运行Job:" + str(job_name)) code = self.dboption.update_job_running(job_name) if code != 1: try: self.logger.info("更新Job:" + job_name + " 运行状态为Running失败,停止创建的进程") self.terminate_process(child, logfile_handler) except Exception, e: self.logger.error(e) self.logger.error("terminate 子进程异常") logfile_handler.flush() logfile_handler.close() else: self.logger.info("更新Job:" + job_name + " 运行状态Running") code = self.dboption.update_job_queue_done( job_name) # FixMe 事物问题 self.logger.info("更新Queue job:" + str(job_name) + " 状态为Done,影响行数:" + str(code)) if code != 1: self.logger.error("更新Job Queue job:" + job_name + " 状态为Done失败") self.terminate_process(child, logfile_handler) self.logger.info("重新修改job_name:" + job_name + " 状态为Pending 等待下次运行") self.dboption.update_job_pending_from_running( job_name) else: self.process_running[child] = { "logfile_handler": logfile_handler, "job_name": job_name, "pid": pid } else: self.logger.error("启动子进程异常pid:" + str(pid)) logfile_handler.flush() logfile_handler.close() else: