class scheduler: def __init__(self): self.scheduler_job = BackgroundScheduler() self.scheduler_job.add_job(stock_api_update, 'interval', seconds=THREAD_INTERVAL) self.scheduler_job.add_job(change_status_rule, 'interval', minutes=CHANGE_STATUS_RULE_THREAD_INT, max_instances=1) self.scheduler_job.add_job(change_threshold_rule, 'interval', minutes=CHANGE_THRESHOLD_RULE_THREAD_INT, max_instances=1) self.scheduler_job.add_job(price_threshold_rule, 'interval', minutes=PRICE_THRESHOLD_RULE_THREAD_INT, max_instances=1) self.scheduler_job.add_job( recommendation_analyst_rule, 'interval', minutes=RECOMMENDATION_ANALYST_RULE_THREAD_INT, max_instances=1) def start(self): self.scheduler_job.start() def stop_all_jobs(self): # Removes all jobs from this store. self.scheduler_job.remove_all_jobs() # Frees any resources still bound to this job store. self.scheduler_job.shutdown() def get_running_jobs(self): return self.scheduler_job.get_jobs()
class Scheduler(object): # 定义静态变量实例 __singleton = None def __init__(self): self.scheduler = BackgroundScheduler() self.scheduler.start() self.jobIds = [] pass @staticmethod def getInstance(): if Scheduler.__singleton is None: Scheduler.__singleton = Scheduler() return Scheduler.__singleton def getScheduler(self): return self.scheduler def addJob(self, seconds, fun): self.jobIds.append(id(fun)) self.scheduler.add_job(fun, 'interval', seconds=seconds) def removeJob(self, fun): self.scheduler.remove_all_jobs()
class Timer: def __init__(self): self.scheduler = BackgroundScheduler() def submit_job(self, func, delay_seconds): """ :param func: :param delay_seconds: :return: :rtype str """ return self.scheduler.add_job( func=func, trigger=DateTrigger(datetime.datetime.now() + datetime.timedelta(seconds=delay_seconds))).id def submit_periodical_job(self, func, period_seconds, id): return self.scheduler.add_job(func, 'interval', seconds=period_seconds, id=id).id def remove_job(self, job_id): self.scheduler.remove_job(job_id) def start(self): self.scheduler.start() def shutdown(self): self.scheduler.remove_all_jobs() self.scheduler.shutdown()
def test_run(config=None): scheduler = BackgroundScheduler() scheduler.add_job(test_job, kwargs={'job_id': '1'}, trigger='interval', seconds=5, id='1') print('[INFO] job 1 added') scheduler.add_job(test_job, kwargs={'job_id': '2'}, trigger='interval', seconds=8, id='2') print('[INFO] job 2 added') scheduler.add_job(test_job, kwargs={'job_id': '3'}, trigger='interval', seconds=11, id='3') print('[INFO] job 3 added') scheduler.start() print('[INFO] scheduler start') try: while True: print('[INFO] scheduler is sleeping ...') time.sleep(3600) except (KeyboardInterrupt, SystemExit): print('[INFO] remove all jobs ...') scheduler.remove_all_jobs() print('[INFO] scheduler shutdown ...') scheduler.shutdown() print('[INFO] Exit.')
class SerialReader: scheduler = None job = None def __init__(self, serial, callback, interval=0.1): self.active = True self.interval = interval self.serial = serial self.data = None self.callback = callback def start(self): self.scheduler = BackgroundScheduler() self.job = self.scheduler.add_job(self.test, 'interval', seconds=self.interval) self.scheduler.start() def pause(self): self.scheduler.pause() def resume(self): self.scheduler.resume() def test(self): n_in = self.serial.in_waiting if n_in > 0: self.data = self.serial.read() if self.data is not None and len(self.data) > 0: self.callback(self.data) def stop(self): self.scheduler.remove_all_jobs() self.scheduler.shutdown()
def test3(): """定时执行任务,暂停,恢复""" start_time = time.time() scheduler = BackgroundScheduler() scheduler.add_job( my_job, 'interval', args=('123', ), seconds=1, id='my_job_id') # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略; scheduler.start() # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。 print('运行到了这里1') while (scheduler.state): if time.time() - start_time > 5: print('暂停作业') scheduler.pause() # 暂停作业: break print('恢复作业') if time.time() - start_time > 5: scheduler.resume() # time.sleep(4) print('当前任务列表:{}'.format( scheduler.get_jobs())) # 获得调度作业的列表,可以使用 get_jobs() 来完成,它会返回所有的job实例 scheduler.get_job('my_job_id') # 获取id为my_job_id的作业实例 scheduler.print_jobs() # 输出所有格式化的作业列表。 print('移除作业') # scheduler.remove_job('my_job_id') # 移除id为my_job_id的作业 scheduler.remove_all_jobs() # 移除所有的作业
def start_scheduler(): """ Creates the scheduler and adds all basic tasks. Returns: """ global scheduler scheduler = BackgroundScheduler(executors=app.config['SCHEDULER_EXECUTORS'], job_defaults=app.config['SCHEDULER_DEFAULTS']) scheduler.remove_all_jobs() scheduler.add_job(meassure, 'cron', minute='0,15,30,45', id='meassure') # Light devices. for light_device in LightDevice.get_active(): start_light_tasks(light_device) # Water devices. for active_water_device in WaterDevice.get_turned_on(): if active_water_device.switch_off_time <= datetime.now(): # switch now if event in past stop_water(active_water_device.id) else: scheduler.add_job(stop_water, 'date', run_date=active_water_device.switch_off_time, args=[active_water_device.id], misfire_grace_time=10000000, id='water_off_' + active_water_device.name) # Subscribers. scheduler.add_job(update_subscribers, 'cron', hour='15', id='update_subscribers') # Webcam scheduler.add_job(webcam, 'cron', minute='1,31', id='webcam') webcam() # Time Lapse # scheduler.add_job(time_lapse, 'cron', # hour='12,18', id='timelapse') # time_lapse() print("Scheduler started") scheduler.start()
def start_scheduler(): scheduler = BackgroundScheduler() scheduler.remove_all_jobs() logger.info("启动定时,启动时间:%s" % ( dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))) scheduler.add_job(fetch_email_task, 'interval', max_instances=10, minutes = 10) # scheduler.add_job(fetch_email_task, 'date', run_date='2017-02-13 12:21:00') try: scheduler.start() except (KeyboardInterrupt, SystemExit): scheduler.shutdown()
class AutoReserve(): def __init__(self): self.scheduler = BackgroundScheduler(timezone="Asia/Seoul") self.jobid = 1 self.jobdb = {} def addGolfJob(self, ccName, BackFunc, user_id, user_pw, schedule, target_date, target_time, day=21): print('autoReserve in Jayuro', schedule.hour(), schedule.minute(), schedule.second(), target_date, target_time, day) print(self.jobid) while self.scheduler.get_job(self.jobid): print(self.jobid, self.scheduler.get_job(self.jobid)) self.jobid += 1 result = self.scheduler.add_job(BackFunc,'cron', args=[user_id, user_pw, target_date, target_time, day], week='1-53', day_of_week='0-6', \ hour=schedule.hour(), minute=schedule.minute(), second=schedule.second(), id=str(self.jobid)) print("result:", result) self.jobdb[self.jobid]= ccName if self.scheduler.state == 1: #apscheduler.schedulers.base.STATE_RUNNING print('Scheduler is running') elif self.scheduler.state == 2: print('Scheduler is paused') elif self.scheduler.state == 0: print('Scheduler is stopped') self.scheduler.start() return self.jobid def autoStop(self): print('autoStop') for jj in self.scheduler.get_jobs(): print(jj) self.scheduler.remove_all_jobs() self.jobdb.clear() self.jobid = 1 print(self.scheduler.get_jobs()) def autoInfo(self): print('Auto information') print(self.scheduler.get_jobs()) for jj in self.scheduler.get_jobs(): print(jj) for key in self.jobdb: print(key, self.jobdb[key]) def printlog(self, target_date, target_time): now = datetime.datetime.now() print(str(now) + str(target_date + "," + target_time)) # print("Running main process............... : " + str(datetime.datetime.now(timezone('Asia/Seoul')))) def getJobInfo(self): return self.jobdb
def add_asp_scheduler(dt, id): scheduler = BackgroundScheduler() scheduler.add_job(date_job, trigger='date', run_date=dt, id=id) print "scheduler add job." try: scheduler.start() except Exception as err: print "clear job." scheduler.remove_all_jobs()
def main(): """Main function""" # Connect to RedisDB redis_db = redis.StrictRedis(host='redis', port=6379, db=0) # Initialize scheduler scheduler = BackgroundScheduler() scheduler.start() # Essential data initialization try: usernames_list = get_usernames(redis_db, "usernames") except ConnectionError: print("Error connecting to db to get usernames list") exit() headers_list = [ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 " + "(KHTML, like Gecko)Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134", "Mozilla/5.0 (Windows NT 6.1; Win64; rv:59.0) Gecko/20100101 Firefox/59.0", "Mozilla/5.0 (Windows NT 10.0; Win64; rv:60.0) Gecko/20100101 Firefox/60.0" ] proxies_list = ["127.0.0.1:10000"] # HTTPS proxies for i in usernames_list: scheduler.add_job(crawl_username_job, 'interval', args=[i, headers_list, proxies_list, redis_db], seconds=3, timezone="Europe/Paris", max_instances=20000) time.sleep(2 / len(usernames_list)) # Maintain main thread alive and actualize usernames try: while True: time.sleep(10) # Remove all jobs before adding them again (support for new usernames added) scheduler.remove_all_jobs() usernames_list = get_usernames(redis_db, "usernames") for i in usernames_list: scheduler.add_job( crawl_username_job, 'interval', args=[i, headers_list, proxies_list, redis_db], seconds=3, timezone="Europe/Paris", max_instances=20000) time.sleep(2 / len(usernames_list)) except (KeyboardInterrupt, SystemExit): # We shutdown the scheduler print('shutdown, please wait for correct exit') scheduler.shutdown()
def start(): x = Apps.Applications.all_apps sched = BackgroundScheduler(daemon=True) for y in x: DB.setWorkingDB(y.app_name) sched.remove_all_jobs() sched.add_job(DB.deleteDocs, 'interval', seconds=int(y.max_time), id=y.app_name) print(y.app_name, y.max_time) sched.start()
def set_auto_backup(): # 不需要参数 # 从数据库中读取天数间隔 days = SystemConfig.objects.first().days_to_auto_backup job = BackgroundScheduler() # 清空当前的任务 if job.get_jobs(): job.remove_all_jobs() # 如果天数间隔小于等于0,则不创建新任务,直接退出 if days <= 0: return # 创建任务 job.add_job(auto_backup, 'interval', days=days, id='auto_backup') # 开启当前任务 job.start()
class Timer: def __init__(self): self.timer = BackgroundScheduler(daemon=True) self.timer.start() self.start = False def setTimer(self, time): print('Starting time: ' + time.strftime("%m/%d/%Y, %H:%M:%S")) if self.start is False: self.start = True else: self.timer.remove_all_jobs() self.timer.add_job(control.switch, 'date', run_date=time, misfire_grace_time=7200, args=[True])
def create_scheduler(): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("127.0.0.1", 47200)) except socket.error: print("!!!scheduler already started, DO NOTHING") else: # start_scheduler = get_conf_value(section='control_pram',option='if_scheduler') scheduler = BackgroundScheduler(daemonic=True) scheduler.start() # 清理历史任务 scheduler.remove_all_jobs() try: # 清理历史任务 # scheduler.remove_all_jobs() current_time = dt.datetime.now() scheduler.add_job(ods_video_det_data_pd, 'interval', minutes=1, start_date=current_time, id='ods_video_det_data_pd', replace_existing=True) # 添加监控任务:检测器ods数据转存kafka # scheduler.add_job(road_ranking_data, 'interval', minutes=2, start_date=current_time, # id='road_ranking_data_interval', replace_existing=True) # 添加监控任务:高德openAPI道路指数 # scheduler.add_job(road_ranking_data, 'date', run_date=current_time, # id='road_ranking_data_date', replace_existing=True) # 添加监控任务:高德openAPI道路指数 # scheduler.add_job(device_status_update, 'date', run_date=current_time, # id='device_status_update_date', replace_existing=True) # 添加监控任务:设备状态更新 scheduler.add_job(device_status_update, 'interval', minutes=10, start_date=current_time, id='device_status_update_interval', replace_existing=True) # 添加监控任务:设备状态更新 # register_events(scheduler) except Exception as e: print(e) traceback.print_exc() # 报错则调度器停止执行 scheduler.shutdown() else: # logger.info('[*traffic_alarm] start scheduler task!job list [%s]' % scheduler.get_jobs()) print( "=======================scheduler start==========================" )
def send_delayed_message(recipient_id, message_text, time_in_seconds): scheduler = BackgroundScheduler() scheduler.remove_all_jobs() print(scheduler.get_jobs()) current_date = datetime.datetime.now() print(time_in_seconds) send_time = datetime.datetime(current_date.year, current_date.month, current_date.day, current_date.hour, current_date.minute, current_date.second + time_in_seconds) if send_time.second > 59: additional_ send_time.second = send_time.second - 60 scheduler.add_job(send_message(recipient_id, message_text), trigger='date', run_date=send_time) scheduler.start()
class Repeat: def __init__(self, toDo, repeatTime): self.isQuit = False self.scheduler = BackgroundScheduler() self.job = None self.i = 1 self.addJob(toDo, repeatTime) def addJob(self, toDo, repeatTime=1800): print("Job added every " + str(repeatTime)) self.job = self.scheduler.add_job(toDo, 'interval', seconds=repeatTime, max_instances=3) def startJobs(self): print("Started job") self.scheduler.start() def quitJob(self): self.scheduler.remove_all_jobs() self.scheduler.shutdown(wait=False) def sayHi(self): print("Hi") self.i += 1 if self.i == 3: self.pauseJob() self.resumeJob() def resumeJob(self): self.scheduler.resume() def pauseJob(self): self.scheduler.pause() print("paused") # ------------------ usage: ---------------------- # s = Repeat() # try: # s.startJobs() # except (KeyboardInterrupt, SystemExit): # s.quitJob()
class TimeScheduler(object): def __init__(self): self.tasks = {} self.scheduler = BackgroundScheduler() self.scheduler.start() ''' task是一个函数 ''' def addJob(self, name, id, task, second, hour, day_of_week): oldTask = self.tasks.get(id) if oldTask is None: self.tasks.setdefault(name, id) self.scheduler.add_job(task, 'cron', second=second, hour=hour, day_of_week=day_of_week, id=id) def clean(self): self.scheduler.remove_all_jobs()
def main(): try: scheduler = BackgroundScheduler() print("Starting scheduler") scheduler.start() # Connect to dynamoDB table 'schedules' dynamodb = boto3.resource('dynamodb', region_name='us-east-1') table = dynamodb.Table('schedules') while True: # Search db for the inputted row response = table.scan() items = response['Items'] for item in items: #print(item) device_id = int(item["deviceid"]) setdatetime = item["setdatetime"] name = item["name"] recurrent_check = int(item["recurrent"]) custom_freq = item["customfreq"] repeat_freq = item["repeatfreq"] # Check the recurrency of the scheduled task if recurrent_check == 0: # If not recurrent, schedule job once schedule_once(scheduler, device_id, name, setdatetime) elif recurrent_check == 1: # If it is recurrent, check for custom or standard frequency if repeat_freq == "custom": # print("Custom Frequency") schedule_custom(scheduler, name, setdatetime, custom_freq) else: # If repeat_freq is a standard day/week/month/year #print("Standard Frequency") schedule_standard(scheduler, name, setdatetime, repeat_freq) # Print scheduled jobs after scheduling each time from incomingtasks sleep(2) scheduler.print_jobs() # To keep the update real-time, constantly remove and add jobs to act as a replacing mechanism scheduler.remove_all_jobs() except: print(sys.exc_info()[0]) print(sys.exc_info()[1])
class TT(object): def __init__(self): # self._sched = BlockingScheduler() self._sched = AsyncIOScheduler() self._sched = BackgroundScheduler() def hello(self): print('hello') def start(self): # self._sched.add_job(self.hello, 'cron', hour=17, minute=46) self._sched.add_job(self.hello, 'interval', seconds=1) self._sched.start() print('hello world.') sleep(10) self._sched.remove_all_jobs() print('remove jobs') sleep(10) self._sched.add_job(self.hello, 'interval', seconds=1) print('add new jobs') sleep(10)
def handle(self, *args, **options): self._log_info("Configuring jobs") scheduler = BackgroundScheduler() scheduler.add_job(bot_run, "interval", seconds=int(options.get("repeat") or "10"), jitter=10) port = options.get("port") or "8000" try: self._log_info("Running scheduler") scheduler.start() if settings.DEBUG == "True": self._log_info("Startig dev server") call_command("runserver") return call_command("runserver", f"0.0.0.0:{port}") self._log_info("Startig prod server") except KeyboardInterrupt: self._log_info("Stoping background job") scheduler.remove_all_jobs() return
def webhook(): # endpoint for processing incoming messaging events data = request.get_json() log( data ) # you may not want to log every incoming message in production, but it's good for testing if data["object"] == "page": for entry in data["entry"]: for messaging_event in entry["messaging"]: if messaging_event.get("message"): # someone sent us a message sender_id = messaging_event["sender"][ "id"] # the facebook ID of the person sending you the message recipient_id = messaging_event["recipient"][ "id"] # the recipient's ID, which should be your page's facebook ID message_text = messaging_event["message"][ "text"] # the message's text scheduler = BackgroundScheduler() scheduler.remove_all_jobs() message_string = str(message_text) send_message(sender_id, message_string) break if messaging_event.get("delivery"): # delivery confirmation pass if messaging_event.get("optin"): # optin confirmation pass if messaging_event.get( "postback" ): # user clicked/tapped "postback" button in earlier message pass return "ok", 200
def _init_scheduler(): redis_pool = RedisPipeline() job_stores: Dict = { "redis": RedisJobStore( db=1, jobs_key="blogs_crawler.jobs", run_times_key="blogs_crawler.run_times", connection_pool=redis_pool, ) } executors = { "default": {"type": "threadpool", "max_workers": THREADS_NUM}, "processpool": ProcessPoolExecutor(max_workers=PROCESS_NUM), } job_defaults = {"coalesce": False, "max_instances": 5, "misfire_grace_time": 60} background_scheduler = BackgroundScheduler( jobstores=job_stores, executors=executors, job_defaults=job_defaults ) # 设置定时任务的 logger background_scheduler._logger = logger # 设置任务监听 def init_scheduler_listener(event): if event.exception: logger.error("定时任务出现异常!") background_scheduler.add_listener( init_scheduler_listener, EVENT_JOB_ERROR | EVENT_JOB_EXECUTED ) # 清理任务 background_scheduler.remove_all_jobs() # 启动定时任务对象 background_scheduler.start() return background_scheduler
def create_scheduler(): # manage = SchedulerManage() scheduler = BackgroundScheduler(daemonic=True) try: scheduler.add_jobstore(DjangoJobStore(), "default") # 清理历史任务 scheduler.remove_all_jobs() date = dt.datetime.now() scheduler.add_listener(mylistener,EVENT_JOB_EXECUTED | EVENT_JOB_ERROR) # 报警强度计算程序 scheduler.add_job(cal_kde_value, "date", run_date=date, id='alarm_proj', args=[], replace_existing=True) # scheduler.add_job(his_model_update, "date", run_date=date, id='his_model_up', args=[], replace_existing=True) # 开启操作记录解析程序 scheduler.add_job(seperate_operate_record.main, "date", run_date=date, id='operate_parsing', args=[], replace_existing=True) # 开启数据清理程序 scheduler.add_job(clear_database, 'cron', hour='16', minute='04', id='clear_database',replace_existing=True ) # scheduler.add_job(operate_resolve, "date", run_date=date, id='alarm_proj', args=[], replace_existing=True) # 开启操作记录解析程序 scheduler.add_job(seperate_operate_record.main, "interval", minutes=3, id='operate_proj', args=[]) # 开启操作记录匹配程序 scheduler.add_job(so_run, "interval", minutes=1, id='operate_match', args=[], replace_existing=True) # 开启接口数据入库量监控 scheduler.add_job(monitor, "interval", minutes=15, id='interface_monitor', args=[15, ], replace_existing=True) # 开启接口数据修复程序 scheduler.add_job(interface_check, 'cron', hour='2', minute='05', id='interface_check', replace_existing=True ) # 启动所有调度任务(开车了,嘟嘟嘟) scheduler.start() print(scheduler.get_jobs()) print(scheduler.state) except Exception as e: print(e) # 报错则调度器停止执行 scheduler.shutdown() logger.info('start scheduler task') print("=======================定时任务启动==========================") logger.info('start task register,check on admin platform!') register_events(scheduler)
def main(): """MAIN """ log.info('######## Container Started ########') log.info("Looking for vacancies {} on {} ".format(str(DESIRED_TIMES), str(DESIRED_DAYS))) # setup container = Container() telegram_bot = Bot(container) ms = Microservice(telegram_bot, container) # run once after startup ms.run() # set cron schedule for any further runs, every minute, Mo-Fr, 9-5 scheduler = BackgroundScheduler() scheduler.add_job(ms.run, 'cron', day_of_week='0-4', hour='9-17', minute='*') scheduler.start() log.info("Next job scheduled: {}".format( scheduler.get_jobs()[0].next_run_time)) # keep container running forever or until telegram sets termination while container.run: time.sleep(1) scheduler.remove_all_jobs() log.warning('######## Container Shutdown ########') sys.exit(0)
def schedule_rescuetime_task(): if os.environ.get('BEEHIVE_SCHEDULER'): print "schedule_rescuetime_task:", "Scheduler running!" return else: os.environ['BEEHIVE_SCHEDULER'] = 'true' print "schedule_rescuetime_task:", "Start scheduler!" print "###############################################################################################" try: # Sanity check to avoid database data duplication date_yesterday = date.today() - timedelta(days=2) count_rows = RescuetimeData.query.filter_by( created_date=date_yesterday).count() except: print "schedule_rescuetime_task:", "rescuetime_data table not found!" print "schedule_rescuetime_task:", "FAILED - could not schedule task!" return store_rescuetime_data() # Schedule every 6 hours to ensure fault tolerance interval = 60 * 60 * 6 print "###############################################################################################" print " RescueTime Job Scheduled" print "###############################################################################################" scheduler = BackgroundScheduler() scheduler.start() scheduler.remove_all_jobs() scheduler.add_job(func=store_rescuetime_data, trigger=IntervalTrigger(seconds=interval), id='rescuetime_job', name='store_rescuetime_data', replace_existing=True) # Shut down the scheduler when exiting the app atexit.register(lambda: scheduler.shutdown()) atexit.register(lambda: scheduler_setenv())
class AutoCyclingDisplayController(CyclableDisplayController): """ Display controller that auto cycles through images. """ def __init__( self, driver: DisplayDriver, image_store: ImageStore, identifier: Optional[str] = None, image_transformers: Sequence[ImageTransformer] = (), cycle_image_after_seconds: float = DEFAULT_SECONDS_BETWEEN_CYCLE): """ Constructor. :param driver: see `CyclableDisplayController.__init__` :param image_store: see `CyclableDisplayController.__init__` :param identifier: see `CyclableDisplayController.__init__` :param image_transformers: see `CyclableDisplayController.__init__` :param cycle_image_after_seconds: the number of seconds before cycling on to the next image """ super().__init__(driver, image_store, identifier, image_transformers) self.cycle_image_after_seconds = cycle_image_after_seconds self._scheduler = BackgroundScheduler() def start(self): if self._scheduler.state != STATE_RUNNING: self._scheduler.start() self._scheduler.add_job(self.display_next_image, "interval", seconds=self.cycle_image_after_seconds) def stop(self): self._scheduler.remove_all_jobs() try: self._scheduler.pause() except SchedulerNotRunningError: pass
def test3(): """定时执行任务,暂停,恢复""" start_time = time.time() scheduler = BackgroundScheduler() scheduler.add_job(my_job, 'interval', args=('123',),seconds=1, id='my_job_id') # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略; scheduler.start() # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。 print('运行到了这里1') while (scheduler.state): if time.time() - start_time >5: print('暂停作业') scheduler.pause() # 暂停作业: break print('恢复作业') if time.time() - start_time >5: scheduler.resume() # time.sleep(4) print('当前任务列表:{}'.format(scheduler.get_jobs())) # 获得调度作业的列表,可以使用 get_jobs() 来完成,它会返回所有的job实例 scheduler.get_job('my_job_id') # 获取id为my_job_id的作业实例 scheduler.print_jobs() # 输出所有格式化的作业列表。 print('移除作业') # scheduler.remove_job('my_job_id') # 移除id为my_job_id的作业 scheduler.remove_all_jobs() # 移除所有的作业
class Scheduler(ABC): def __init__(self, signalController, logger:Logger): self.logger = logger self.signalController = signalController self.ntcipBackupTime_Sec = signalController.ntcipBackupTime_sec # Scheduler parameters self.backgroundScheduler = BackgroundScheduler() self.backgroundScheduler.start() # Ensure that the scheduler shuts down when the app is exited atexit.register(lambda: self.stopBackgroundScheduler()) def stopBackgroundScheduler(self): """ stopBackgroundScheduler function first clears all jobs from the backgroundScheduler, and then shuts down the backgroundScheduler. This function is intended to run at the exit. """ # Clear all jobs from the BackgroundScheduler self.backgroundScheduler.remove_all_jobs() # Shut down the background scheduler self.backgroundScheduler.shutdown(wait=False)
class CronManager: def __init__(self, use_mongo_db=True): self.scheduler = BackgroundScheduler(timezone=shanghai_tz) self.scheduler.configure() if use_mongo_db: self.job_store = MongoDBJobStore(database='apscheduler', collection='cronTab', client=db) self.scheduler.add_jobstore(self.job_store) self.is_replace_existing = True else: self.is_replace_existing = False def add_cron(self, cron_instance): if not isinstance(cron_instance, Cron): raise TypeError('please add correct cron!') if cron_instance.trigger_type == 'interval': seconds = cron_instance.trigger_args.get('seconds') if not isinstance(seconds, int) and not common.can_convert_to_int(seconds): raise TypeError('请输入合法的时间间隔!') seconds = int(seconds) if seconds <= 0: raise TypeError('请输入大于0的时间间隔!') job = self.scheduler.add_job( func=cron_instance.cron_mission, trigger=cron_instance.trigger_type, seconds=seconds, replace_existing=self.is_replace_existing, coalesce=True, id=cron_instance.get_id(), max_instances=5, jitter=0) # 玄学,新增job的时候不用加args,直接加对象调用的func elif cron_instance.trigger_type == 'date': run_date = cron_instance.trigger_args.get('run_date') # TODO 判断run_date类型 job = self.scheduler.add_job( func=cron_instance.cron_mission, trigger=cron_instance.trigger_type, run_date=run_date, replace_existing=self.is_replace_existing, coalesce=True, id=cron_instance.get_id()) # 玄学,新增job的时候不用加args,直接加对象调用的func elif cron_instance.trigger_type == 'cron': raise TypeError('暂时不支持 trigger_type 等于 \'cron\'') return cron_instance.get_id() def start(self, paused=False): self.scheduler.start(paused=paused) def pause_cron(self, cron_id=None, pause_all=False): if pause_all: self.scheduler.pause() elif cron_id: self.scheduler.pause_job(job_id=cron_id) def resume_cron(self, cron_id=None, resume_all=False): if resume_all: self.scheduler.resume() elif cron_id: self.scheduler.resume_job(job_id=cron_id) def del_cron(self, cron_id=None, del_all=False): if del_all: self.scheduler.remove_all_jobs() elif cron_id: self.scheduler.remove_job(job_id=cron_id) def update_cron(self, cron_id, cron_info): if not isinstance(cron_id, str): raise TypeError('cron_id must be str') if not isinstance(cron_info, dict): raise TypeError('cron_info must be dict') trigger_type = cron_info.get('triggerType') interval = cron_info.get('interval') run_date = cron_info.get('runDate') test_case_suite_id_list = cron_info.get('testCaseSuiteIdList') is_execute_forbiddened_case = cron_info.get('isExecuteForbiddenedCase') test_case_id_list = cron_info.get('testCaseIdList') test_domain = cron_info.get('testDomain') global_vars_id = cron_info.get('globalVarsId') alarm_mail_list = cron_info.get('alarmMailList') is_ding_ding_notify = cron_info.get('isDingDingNotify') ding_ding_access_token = cron_info.get('dingdingAccessToken') ding_ding_notify_strategy = cron_info.get('dingdingNotifyStrategy') is_enterprise_wechat_notify = cron_info.get('isEnterpriseWechatNotify') enterprise_wechat_access_token = cron_info.get( 'enterpriseWechatAccessToken') enterprise_wechat_notify_strategy = cron_info.get( 'enterpriseWechatNotifyStrategy') cron_name = cron_info.get('name') try: if trigger_type == 'interval' and int(interval) > 0: self.scheduler.modify_job( job_id=cron_id, trigger=IntervalTrigger(seconds=interval)) elif trigger_type == 'date': # TODO 判断run_date类型 self.scheduler.modify_job( job_id=cron_id, trigger=DateTrigger(run_date=run_date)) else: raise TypeError('更新定时任务触发器失败!') if run_date: cron = Cron( test_case_suite_id_list=test_case_suite_id_list, is_execute_forbiddened_case=is_execute_forbiddened_case, test_domain=test_domain, global_vars_id=global_vars_id, alarm_mail_list=alarm_mail_list, is_ding_ding_notify=is_ding_ding_notify, ding_ding_access_token=ding_ding_access_token, ding_ding_notify_strategy=ding_ding_notify_strategy, is_enterprise_wechat_notify=is_enterprise_wechat_notify, enterprise_wechat_access_token= enterprise_wechat_access_token, enterprise_wechat_notify_strategy= enterprise_wechat_notify_strategy, trigger_type=trigger_type, # 更新定时器时,此参数并没有真正起到作用, 仅修改展示字段 test_case_id_list=test_case_id_list, run_date=run_date, cron_name=cron_name) # 更新定时器时,此参数并没有起到作用, 仅修改展示字段 else: cron = Cron( test_case_suite_id_list=test_case_suite_id_list, is_execute_forbiddened_case=is_execute_forbiddened_case, test_domain=test_domain, global_vars_id=global_vars_id, alarm_mail_list=alarm_mail_list, is_ding_ding_notify=is_ding_ding_notify, ding_ding_access_token=ding_ding_access_token, ding_ding_notify_strategy=ding_ding_notify_strategy, is_enterprise_wechat_notify=is_enterprise_wechat_notify, enterprise_wechat_access_token= enterprise_wechat_access_token, enterprise_wechat_notify_strategy= enterprise_wechat_notify_strategy, trigger_type=trigger_type, # 更新定时器时,此参数并没有起到作用, 仅修改展示字段 test_case_id_list=test_case_id_list, seconds=interval, # 更新定时器时,此参数并没有起到作用, 仅修改展示字段 cron_name=cron_name) # 玄学,更改job的时候必须改args,不能改func self.scheduler.modify_job(job_id=cron_id, coalesce=True, args=[cron]) except BaseException as e: raise TypeError('更新定时任务失败: %s' % e) def shutdown(self, force_shutdown=False): if force_shutdown: self.scheduler.shutdown(wait=False) else: self.scheduler.shutdown(wait=True) def get_crons(self): return self.scheduler.get_jobs()
class TestSchedulerListener(unittest.TestCase): def setUp(self): self.scheduler = BackgroundScheduler() self.scheduler.add_jobstore(MemoryJobStore(), alias='in_memory') self.scheduler.add_executor(ThreadPoolExecutor(1), alias='secondary_executor') self.scheduler.start() def tearDown(self): self.scheduler.shutdown() def test_watcher_injection(self): watcher = SchedulerWatcher(self.scheduler) self.assertEqual(watcher.scheduler, self.scheduler, 'Watcher should keep a reference to the scheduler') self.assertEqual(1, len(self.scheduler._listeners), 'Watcher should inject itself as a scheduler listener') self.assertEqual( self.scheduler._listeners[0][1], EVENT_ALL, 'Watcher should register iself to watch all events' ) def test_scheduler_inspection(self): self.scheduler.add_job(lambda: 0, jobstore='in_memory', trigger='interval', minutes=60, id='test_job') watcher = SchedulerWatcher(self.scheduler) self.assertEqual('running', watcher.scheduler_info['state'], 'Watcher should inspect scheduler status') self.assertEqual( str(self.scheduler.timezone), watcher.scheduler_info['timezone'], 'Watcher should inspect scheduler timezone' ) self.assertEqual( 'BackgroundScheduler', watcher.scheduler_info['class'], 'Watcher should inspect scheduler class' ) self.assertEqual(2, len(watcher.jobstores), 'Watcher should inspect all scheduler jobstores') self.assertIn('in_memory', watcher.jobstores, 'Watcher should have inspected the in_memory jobstore') self.assertEqual(2, len(watcher.executors), 'Watcher should inspect all scheduler executors') self.assertIn('secondary_executor', watcher.executors, 'Watcher should have inspected the secondary_executor') self.assertEqual(1, len(watcher.jobs), 'Watcher should inspect all jobs in scheduler on init') self.assertIn('test_job', watcher.jobs, 'Watcher should index jobs by id') def test_job_properties_on_add(self): watcher = SchedulerWatcher(self.scheduler) self.scheduler.add_job( lambda x, y: x + y, id='added_job', name='Added job', jobstore='in_memory', trigger='interval', minutes=60, args=(1,), kwargs={'y': 2} ) self.assertIn('added_job', watcher.jobs) job_properties = watcher.jobs['added_job']['properties'] self.assertEqual('added_job', job_properties['id'], 'Job properties should have the job id') self.assertEqual('Added job', job_properties['name'], 'Job properties should have the job name') self.assertIn('trigger', job_properties, 'Job properties should have a representation of the trigger') self.assertEqual('in_memory', job_properties['jobstore'], 'Job properties should have the jobstore name') self.assertEqual('default', job_properties['executor'], 'Job properties should have the executor name') self.assertIn('lambda', job_properties['func'], 'Job properties should have the function string repr') self.assertIn('func_ref', job_properties, 'Job properties should have the function reference') self.assertEqual('(1,)', job_properties['args'], 'Job properties should have the job arguments') self.assertEqual("{'y': 2}", job_properties['kwargs'], 'Job properties should have the job keyword arguments') self.assertIn('pending', job_properties, 'Job properties should have the job pending status') self.assertFalse(job_properties['pending'], 'Job status should not be pending') self.assertIn('coalesce', job_properties, 'Job properties should have the job coalesce configuration') self.assertIn('next_run_time', job_properties, 'Job properties should have the next run time calculated') self.assertIn('misfire_grace_time', job_properties, 'Job properties should have the misfire grace time') self.assertIn('max_instances', job_properties, 'Job properties should have the max instances configuration') def test_job_inspection_matches_job_added_event(self): # We're going to add two jobs that should have the exact same properties, except for the id, in two different # stages of the usage: before the watcher is created and after we start watching for events. def job_function(x, y): return x + y next_run_time = datetime.now() + timedelta(hours=1) # Job that is added before the user calls us. self.scheduler.add_job( job_function, id='job_1', name='Added job', jobstore='in_memory', trigger='interval', minutes=60, args=(1,), kwargs={'y': 2}, next_run_time=next_run_time ) watcher = SchedulerWatcher(self.scheduler) # Job that gets added after we start watching. self.scheduler.add_job( job_function, id='job_2', name='Added job', jobstore='in_memory', trigger='interval', minutes=60, args=(1,), kwargs={'y': 2}, next_run_time=next_run_time ) self.assertEqual(2, len(watcher.jobs)) job_1 = watcher.jobs['job_1'] job_2 = watcher.jobs['job_2'] for property_name in job_1['properties'].keys(): # All properties, except the id, should match. if property_name == 'id': continue self.assertEqual(job_1['properties'][property_name], job_2['properties'][property_name]) def test_all_events_have_a_processing_method(self): for event_name in list(SchedulerWatcher.apscheduler_events.values()): self.assertIn(event_name, dir(SchedulerWatcher)) def test_job_execution_monitoring(self): watcher = SchedulerWatcher(self.scheduler) self.scheduler.add_job( lambda: time.sleep(0.02), id='waiting_job', name='Waiting job', jobstore='in_memory', trigger='interval', seconds=0.2, next_run_time=datetime.now() ) job_events = watcher.jobs['waiting_job']['events'] self.assertEqual(1, len(job_events)) self.assertEqual('job_added', job_events[0]['event_name']) time.sleep(0.05) self.assertEqual(3, len(job_events), 'Job execution needs to be tracked in job events') self.assertEqual( 'job_submitted', job_events[1]['event_name'], 'Job submision needs to be tracked in job events' ) self.assertEqual('job_executed', job_events[2]['event_name'], 'Job execution needs to be tracked in job events') time.sleep(0.2) self.assertEqual(5, len(job_events), 'Subsequent executions get tracked') def test_job_failure_monitoring(self): watcher = SchedulerWatcher(self.scheduler) def fail(): time.sleep(0.02) return 0 / 0 self.scheduler.add_job( fail, id='failing_job', name='Failing job', jobstore='in_memory', trigger='interval', next_run_time=datetime.now(), minutes=60 ) failing_job_events = watcher.jobs['failing_job']['events'] time.sleep(0.05) self.assertEqual(3, len(failing_job_events)) self.assertEqual('job_error', failing_job_events[2]['event_name']) def test_scheduler_summary(self): watcher = SchedulerWatcher(self.scheduler) summary = watcher.scheduler_summary() self.assertEqual(sorted(['scheduler', 'jobs', 'executors', 'jobstores']), sorted(summary.keys())) self.assertEqual('running', summary['scheduler']['state'], 'scheduler_summary should have the scheduler status') self.assertEqual(2, len(summary['executors']), 'scheduler_summaru should have the two added executors') self.assertEqual(2, len(summary['jobstores']), 'scheduler_summary should have the two executors') self.assertEqual(0, len(summary['jobs']), 'scheduler_summary should have no jobs') self.scheduler.add_job(lambda: 0, id='job_1') summary = watcher.scheduler_summary() self.assertIn('job_1', summary['jobs'], 'scheduler_summary should have the added jobs in it') self.scheduler.remove_job('job_1') summary = watcher.scheduler_summary() self.assertIn('job_1', summary['jobs'], 'scheduler_summary should have all jobs in it, even if job was removed') def test_removed_jobs_are_only_flagged_as_removed(self): self.scheduler.add_job(lambda: 0, id='a_job') watcher = SchedulerWatcher(self.scheduler) self.assertIn('a_job', watcher.jobs) self.assertIsNone(watcher.jobs['a_job']['removed_time']) self.scheduler.remove_job('a_job') self.assertIn('a_job', watcher.jobs, 'removed jobs should be still tracked in the scheduler watcher') self.assertIsNotNone(watcher.jobs['a_job']['removed_time'], 'removed_time should be set') def test_modified_job_properties_are_tracked(self): self.scheduler.add_job( lambda x, y: x + y, id='a_job', name='A job', jobstore='in_memory', trigger='interval', minutes=60, args=(1,), kwargs={'y': 2} ) watcher = SchedulerWatcher(self.scheduler) self.assertEqual(watcher.jobs['a_job']['modified_time'], watcher.jobs['a_job']['added_time']) next_run_time = watcher.jobs['a_job']['properties']['next_run_time'][0] self.scheduler.modify_job('a_job', name='A modified job', next_run_time=datetime.now() + timedelta(days=1)) self.assertGreater(watcher.jobs['a_job']['modified_time'], watcher.jobs['a_job']['added_time']) self.assertEqual('A modified job', watcher.jobs['a_job']['properties']['name']) self.assertGreater(watcher.jobs['a_job']['properties']['next_run_time'][0], next_run_time) @patch('apschedulerui.watcher.SchedulerWatcher.notify_jobstore_event') def test_removing_a_jobstore_removes_all_jobs(self, mock_notify_jobstore_event): watcher = SchedulerWatcher(self.scheduler) self.scheduler.add_job(lambda: 0, id='job_1', jobstore='in_memory', trigger='interval', minutes=60) self.scheduler.add_job(lambda: 0, id='job_2', jobstore='in_memory', trigger='interval', minutes=60) self.assertEqual(2, len(watcher.jobs)) self.assertIsNone(watcher.jobs['job_1']['removed_time'], 'job_1 removed time should be None') self.assertEqual('in_memory', watcher.jobs['job_1']['properties']['jobstore']) self.scheduler.remove_jobstore('in_memory') mock_notify_jobstore_event.assert_called() self.assertEqual(2, len(watcher.jobs), 'The amount of jobs after removing a jobstore should not change') self.assertIsNotNone(watcher.jobs['job_1']['removed_time'], 'job_1 removed time should be set') self.assertIsNotNone(watcher.jobs['job_2']['removed_time'], 'job_2 removed time should be set') @patch('apschedulerui.watcher.SchedulerWatcher._repr_job') @patch('apschedulerui.watcher.SchedulerWatcher.notify_job_event') @patch('apschedulerui.watcher.SchedulerWatcher.notify_jobstore_event') def test_adding_a_jobstore_adds_all_jobs_in_it(self, mock_notify_jobstore_event, mock_notify_job_event, _): watcher = SchedulerWatcher(self.scheduler) jobstore = MemoryJobStore() jobstore.add_job(Job(scheduler=self.scheduler, id='job_1', next_run_time=datetime.now() + timedelta(days=1))) jobstore.add_job(Job(scheduler=self.scheduler, id='job_2', next_run_time=datetime.now() + timedelta(days=2))) self.assertEqual(0, len(watcher.jobs)) self.scheduler.add_jobstore(jobstore, alias='in_memory_2') self.assertIn('in_memory_2', watcher.jobstores, 'Watcher should have the new jobstore tracked') self.assertEqual(2, len(watcher.jobs), 'Watcher should add all jobs in the newly added jobstore') self.assertTrue(all([job_id in watcher.jobs for job_id in ['job_1', 'job_2']])) self.assertEqual(2, mock_notify_job_event.call_count) mock_notify_jobstore_event.assert_called_once() @patch('apschedulerui.watcher.SchedulerWatcher.notify_job_event') def test_removing_all_jobs_flags_all_as_removed(self, mock_notify_job_event): watcher = SchedulerWatcher(self.scheduler) self.scheduler.add_job(lambda: 0, id='job_1', jobstore='default', trigger='interval', minutes=60) self.scheduler.add_job(lambda: 0, id='job_2', jobstore='in_memory', trigger='interval', minutes=60) self.assertEqual(2, len(watcher.jobs)) self.assertEqual(2, mock_notify_job_event.call_count) mock_notify_job_event.reset_mock() self.scheduler.remove_all_jobs() self.assertEqual(2, len(watcher.jobs), 'job count should not change after removing all jobs') self.assertEqual(2, mock_notify_job_event.call_count) @patch('apschedulerui.watcher.SchedulerWatcher.notify_executor_event') def test_adding_and_removing_executors(self, mock_notify_executor_event): watcher = SchedulerWatcher(self.scheduler) self.scheduler.add_executor(ThreadPoolExecutor(), alias='new_executor') self.assertIn('new_executor', watcher.executors) mock_notify_executor_event.assert_called() mock_notify_executor_event.reset_mock() self.scheduler.remove_executor('new_executor') self.assertNotIn('new_executor', watcher.executors) mock_notify_executor_event.assert_called() def test_job_event_history_is_limited(self): watcher = SchedulerWatcher(self.scheduler, max_events_per_job=4) self.scheduler.add_job(lambda: 0, trigger='interval', seconds=0.01, id='recurrent_job') time.sleep(0.1) # recurrent_job should have been executed ~10 times now, generating ~20 events (submission + execution). self.assertEqual( watcher.max_events_per_job, len(watcher.jobs['recurrent_job']['events']), 'job event history should be limited' )
class TempChecker: def __init__(self, server: ServerInterface): self.task = BackgroundScheduler() self.cmd_server = server self.warn_start = False self.task_start = False self.count = 0 self.show_msg = False self.now_time = 0 def warning_temp(self): self.warn_start = True self.task.remove_job("loop") self.add_task(1) def add_task(self, num): if num == 0: use_trigger = norm_trigger loop_id = "loop" else: use_trigger = warn_trigger loop_id = "warn_loop" self.task.add_job( self.cal_temp, trigger=use_trigger, id=loop_id, replace_existing=True ) def run_warn(self): self.cal_temp(3) def warning_temp_stop(self): self.task.remove_job("warn_loop") self.warn_start = False self.add_task(0) def open_schedule(self): self.count = 0 self.task = BackgroundScheduler() self.add_task(0) self.start_on() def start_on(self): self.task_start = True self.task.start() def avg_temp(self, num, temp, src: CommandSource = None): packet = 0 cnt = 0 avg = 0 if int(datetime.now().strftime("%Y%m%d%H%M%S")) - self.now_time > 60: self.show_msg = True self.now_time = int(datetime.now().strftime("%Y%m%d%H%M%S")) else: self.show_msg = False if "k10temp" in temp: for i in range(0, len(temp["k10temp"])): if num == 1: if self.show_msg: print_msg(temp["k10temp"][i][0] + " : " + temp_color(temp["k10temp"][i][1]), num, src=src) else: src.reply(temp["k10temp"][i][0] + " : " + temp_color(temp["k10temp"][i][1])) avg = avg + temp["k10temp"][i][1] cnt = cnt + 1 return avg / cnt for i in range(0, len(temp["coretemp"])): if temp["coretemp"][i][0].startswith("Package id"): if packet == 0: packet = 1 else: break if num == 1: if show_msg: print_msg(temp["coretemp"][i][0] + " : " + temp_color(temp["coretemp"][i][1]), num, src=src) else: src.reply(temp["coretemp"][i][0] + " : " + temp_color(temp["coretemp"][i][1])) avg = avg + temp["coretemp"][i][1] cnt = cnt + 1 return avg / cnt def cal_temp(self, num=0, src: CommandSource = None): temp = psutil.sensors_temperatures() try: avg_temp = self.avg_temp(num, temp, src) except KeyError: if src is not None: src.reply(error + tr("error_temperature")) return temp_msg = tr("average_temp") + temp_color(round(avg_temp, 2)) if "k10temp" in temp: tempe = temp["k10temp"][0][1] else: tempe = temp["coretemp"][0][1] high_temp_msg = tr("highest_temp") + temp_color(tempe) if num == 0: if self.count >= show_freq: out_log(temp_msg) out_log(high_temp_msg) self.count = 0 else: self.count += 1 elif num != 3: if show_msg: print_msg(temp_msg, 1, src=src) print_msg(high_temp_msg, 1, src=src) else: src.reply(temp_msg) src.reply(high_temp_msg) if num == 0 or num == 3: if "k10temp" in temp: tempe = temp["k10temp"][0][1] else: tempe = temp["coretemp"][0][1] if tempe > warning_degree: if not self.warn_start: self.warning_temp() print_msg( temp_msg + "\n" + high_temp_msg + rtext_cmd(" §e[❗]§r", f"{tr('click_msg')} {tr('info')}", "!!temp show") + rtext_cmd(f"§c [{tr('restart_raw')}]§r", f"{tr('click_msg')} {tr('restart')}", "!!temp restart") + rtext_cmd(f"§6===={tr('click_msg')} {tr('restart')}====§r", f"{tr('click_msg')} {tr('restart')}", "!!temp restart") + rtext_cmd(f"§e===={tr('click_msg')} {tr('info')}====§r", f"{tr('click_msg')} {tr('information')}", "!!temp show"), 0, src=src, server=self.cmd_server ) else: if self.warn_start: print_msg(temp_msg, 0, src=src, server=self.cmd_server) print_msg(high_temp_msg, 0, src=src, server=self.cmd_server) self.warning_temp_stop() def stop(self): self.task_start = False self.task.remove_all_jobs() self.task.shutdown() out_log(SYSTEM_RETURN + tr("cycle_stop"))
def add_schedule_backup_job(): # if __name__ == '__main__': os_user = config.OS_USER os_password = config.OS_APPS_PASSWD scheduler = BackgroundScheduler() # 默认内存的jobstore url = "sqlite:////home/apps/dbajob.sqlite" scheduler.add_jobstore("sqlalchemy", url=url, alias="sqlite_js") scheduler.print_jobs() print "a" scheduler.remove_all_jobs(jobstore="sqlite_js") scheduler.print_jobs() print "remove" # v_current_jobs = scheduler.get_jobs() # print v_current_jobs # if v_current_jobs: # 如果job存在的话,先请客 # scheduler.remove_job('backup') # 连接配置中心库,获取数据库备份周期等信息 db = Connection("/tmp/mysql3306.sock", config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone="+8:00") v_sql = r"""SELECT a.instance_id,b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where a.instance_id=b.id """ print v_sql bak_server_list = db.query(v_sql) if bak_server_list: # 有server需要配置 i = 0 # 把还没有开始的调度任务,置为手工结束 backup_result_type=4 v_manual_end_sql = "update mysql_ins_bak_log set backup_result_type=4 where backup_result_type=0" db.execute(v_manual_end_sql) for bak_server in bak_server_list: instance_id = bak_server["instance_id"] from_host = bak_server["ip"] # print from_host mysql_port = bak_server["port"] backup_interval_type = bak_server["backup_interval_type"] backup_start_time = bak_server["backup_start_time"] str_start_date = time.strftime("%Y-%m-%d") + " " + backup_start_time print str_start_date v_job_id = "backup_%s_%s" % (from_host, str(mysql_port)) if backup_interval_type == 1: # every day # scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], jobstore='file') scheduler.add_job( backup, "interval", id=v_job_id, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], replace_existing=True, jobstore="sqlite_js", ) elif backup_interval_type == 2: # every week weeks=1 scheduler.add_job( backup, "interval", id=v_job_id, weeks=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], replace_existing=True, jobstore="sqlite_js", ) elif backup_interval_type == 3: # every hour hours=1 scheduler.add_job( backup, "interval", id=v_job_id, hours=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], replace_existing=True, jobstore="sqlite_js", ) else: pass # 开始在数据库记录备份的调度任务状态 0:调度任务已启动,实际备份还没有开始 v_sche_start_sql = """insert into mysql_ins_bak_log(instance_id,backup_result_type) values(%d,0)""" % ( instance_id ) db.execute(v_sche_start_sql) i = i + 1 scheduler.print_jobs() print "b" scheduler.start() scheduler.print_jobs() print "c" db.close()
class StupidAlg(IncentiveAlgorithm): def __init__(self, request): self.s = sched.scheduler(time.time, time.sleep) self.owner=request.user self.incentvesId = [] self.usersId = [] self.sched = BackgroundScheduler() def getAllIncentiveRagted(self, request): """ return all the Incentives IDs in order by the top to the lowest :param request:GET :return:list of incentives IDs """ return self.incentvesId @staticmethod def getIncentiveForUser(self, request, userID): """ Give the best Incentive for a specific user :param request: GET :param userID: a userID for the data Set :return: Incentive ID """ return self.incentvesId[0] def getTheBestIncentive(self,request): """ The Best Incentive for all the data set. what was the best of all. :param request: GET :return:Incentive ID """ return self.incentvesId[0] def start(self, request, *args, **kwargs): """ start the algorithm with giving incentives and Data Set :param request:POST :param args: :param kwargs: :return: Success if everything is working and Error if not """ IdList = getIncentiveID(self, request.owner) self.incentvesId = sorted(IdList) def init(self, request): # self.sched.add_job(self.start(self,request), 'interval', minutes=2) self.sched.add_job(lambda: self.start(self,request), 'interval', id="start", name="start", minutes=60) self.start(self, request) self.sched = BackgroundScheduler.start(self.sched) def clear(self,request, *args, **kwargs): """ clear all the information about this data set :return: Suceess """ if request.user == self.owner: self.sched.remove_all_jobs() self.usersId = [] self.incentvesId = []
class isardScheduler(): def __init__(self): ''' JOB SCHEDULER ''' #<<<<<<< HEAD #~ host=app.config['RETHINKDB_HOST'], #~ port=app.config['RETHINKDB_PORT'], #~ auth_key=app.config['RETHINKDB_AUTH'] #~ rConn=r.connect(host=app.config['RETHINKDB_HOST'], #~ port=app.config['RETHINKDB_PORT'], #~ auth_key=app.config['RETHINKDB_AUTH'], #~ db=app.config['RETHINKDB_DB']) self.rStore=RethinkDBJobStore() #======= # ~ self.rStore=RethinkDBJobStore(host=app.config['RETHINKDB_HOST'], # ~ port=app.config['RETHINKDB_PORT'], # ~ auth_key=app.config['RETHINKDB_AUTH']) #>>>>>>> fe171dc30ddd8a2dabafa7b2085cbb60e6432c35 self.scheduler = BackgroundScheduler(timezone=pytz.timezone('UTC')) self.scheduler.add_jobstore('rethinkdb',self.rStore, database='isard', table='scheduler_jobs',host=app.config['RETHINKDB_HOST'], port=app.config['RETHINKDB_PORT'], auth_key=app.config['RETHINKDB_AUTH']) self.scheduler.remove_all_jobs() #~ scheduler.add_job(alarm, 'date', run_date=alarm_time, args=[datetime.now()]) #~ app.sched.shutdown(wait=False) self.turnOn() def add_scheduler(self,kind,action,hour,minute): id=kind+'_'+action+'_'+str(hour)+str(minute) function=getattr(isardScheduler,action) if kind == 'cron': self.scheduler.add_job(function, kind, hour=int(hour), minute=int(minute), jobstore=self.rStore, replace_existing=True, id=id) if kind == 'interval': self.scheduler.add_job(function, kind, hours=int(hour), minutes=int(minute), jobstore=self.rStore, replace_existing=True, id=id) if kind == 'date': alarm_time = datetime.now() + timedelta(hours=int(hour),minutes=int(minute)) self.scheduler.add_job(function, kind, run_date=alarm_time, jobstore=self.rStore, replace_existing=True, id=id) with app.app_context(): r.table('scheduler_jobs').get(id).update({'kind':kind,'action':action,'name':action.replace('_',' '),'hour':hour,'minute':minute}).run(db.conn) return True ''' Scheduler actions ''' def stop_domains(): with app.app_context(): r.table('domains').get_all('Started',index='status').update({'status':'Stopping'}).run(db.conn) def stop_domains_without_viewer(): with app.app_context(): r.table('domains').get_all('Started',index='status').filter({'viewer':{'client_since':False}}).update({'status':'Stopping'}).run(db.conn) def delete_old_stats(reduce_interval=300,delete_interval=86400): # 24h with app.app_context(): # domains_status r.table('domains_status_history').filter(r.row['when'] < int(time.time()) - delete_interval).delete().run(db.conn) reduced=[] cursor = r.table('domains_status').filter(r.row['when'] < int(time.time()) - reduce_interval).order_by('when').run(db.conn) r.table('domains_status').filter(r.row['when'] < int(time.time()) - reduce_interval).delete().run(db.conn) i=0 for c in cursor: if i % 50 == 0: reduced.append(c) i+=1 r.table('domains_status_history').insert(reduced).run(db.conn) # Hypervisors_status r.table('hypervisors_status_history').filter(r.row['when'] < int(time.time()) - delete_interval).delete().run(db.conn) reduced=[] cursor = r.table('hypervisors_status').filter(r.row['when'] < int(time.time()) - reduce_interval).order_by('when').run(db.conn) r.table('hypervisors_status').filter(r.row['when'] < int(time.time()) - reduce_interval).delete().run(db.conn) i=0 for c in cursor: if i % 50 == 0: reduced.append(c) i+=1 r.table('hypervisors_status_history').insert(reduced).run(db.conn) # Hypervisors_events (does not grow at the same speed) r.table('hypervisors_events').filter(r.row['when'] < int(time.time()) - delete_interval).delete().run(db.conn) def turnOff(self): self.scheduler.shutdown() def turnOn(self): self.scheduler.start() def removeJobs(self): self.scheduler.remove_all_jobs() ''' BULK ACTIONS ''' def bulk_action(self,table,tbl_filter,tbl_update): with app.app_context(): log.info('BULK ACTION: Table {}, Filter {}, Update {}'.format(table,filter, update)) r.table(table).filter(filter).update(update).run(db.conn)
data['analyzer'] = c['analyzer'] data['host'] = host data['date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') data['param'] = c['param'] r.publish('talos:q:cmd',json.dumps(data)) def cronstart(configPath): try: json_config = open(configPath, 'r') jsons = json.loads(json_config.read()) except Exception, e: print 'cmd collector config Error!!!' print e return scheduler.remove_all_jobs() for c in jsons: cron = c['time'].split(' ') job = scheduler.add_job(myjob,args=[c],trigger='cron', year=cron[5], month=cron[4], day=cron[3], hour=cron[2], minute=cron[1], second=cron[0]) print 'add job finished.' def init(msgQueue,talosPath,redis): global r r = redis cronstart(talosPath) scheduler.start() while True: time.sleep(1) msg = msgQueue.get() if msg=='update':
class StationControl(object): # GPIO Pins (BCM numbering). OSPI uses 4 pins for shift register. clock_pin = 4 out_pin = 17 data_pin = 27 latch_pin = 22 def __init__(self, data_handler): self.data_handler = data_handler self.station_status = {station_id: False for station_id in data_handler.settings["active_stations"]} self.active_stations = data_handler.settings["active_stations"] self.num_stations = len(self.station_status) self.utc_timezone_offset = self.data_handler.settings["utc_timezone_offset"] self.timezone_name = self.data_handler.settings["timezone_name"] print("Operating in {} timezone ({})".format(self.timezone_name, self.utc_timezone_offset)) self.bg_scheduler = BackgroundScheduler(timezone=utc) self.set_schedule(self.data_handler.get_schedule()) if GPIO: atexit.register(self.cleanup) # GPIO.setwarnings(False) self.setup_gpio() def set_schedule(self, settings_json): self.bg_scheduler.remove_all_jobs() stations = settings_json["schedule"] for station in stations: station_id = int(station[-1]) if station_id not in self.active_stations: continue for day in stations[station]: for start_time in stations[station][day]["start_times"]: time_str = day + " " + start_time["time"] + " " + self.utc_timezone_offset # convert to 12 hour time to time-zone aware datetime object (24 hour UTC time) for use internally utc_time = timestr_to_utc(time_str) fixed_duration = int(start_time["duration"]) print("Station {} will start at {} UTC for {} minutes".format(station_id, utc_time, fixed_duration)) args = { "datetime": str(utc_time).replace(" ", "T"), "station": station_id, "fixed_duration": fixed_duration, "manual": 0, } self.bg_scheduler.add_job(self.water, "interval", days=7, start_date=utc_time, args=[args]) # start the scheduler if it's not already running if not self.bg_scheduler.state: self.bg_scheduler.start() def pause_schedule(self): print("Pausing schedule...") jobs_paused = 0 for job in self.bg_scheduler.get_jobs(): job.pause() print("Paused job {}".format(job)) jobs_paused += 1 return jobs_paused def resume_schedule(self): print("Resuming schedule...") for job in self.bg_scheduler.get_jobs(): job.resume() print("Resumed job {}".format(job)) print("Resumed schedule") def manual_watering(self, watering_request): # pause normal schedule jobs_paused = self.pause_schedule() start, last_duration_seconds = Arrow.utcnow(), 5 start_buffer_seconds = 5 # for every station, set a scheduling for the duration specified # stations are ran serially for station, duration in watering_request.items(): station_id = int(station) job_start = start.replace(seconds=last_duration_seconds) dt = job_start.format("YYYY-MM-DDTHH:mm:ssZZ").replace("-00:00", "+00:00") args = {"datetime": dt, "station": station_id, "fixed_duration": duration, "manual": 1} self.bg_scheduler.add_job(self.water, "date", run_date=job_start.datetime, args=[args]) last_duration_seconds = duration * 60 # reschedule the original schedule after all stations have watered job_start = start.replace(seconds=last_duration_seconds + start_buffer_seconds) self.bg_scheduler.add_job(self.resume_schedule, "date", run_date=job_start.datetime) # check if schedule contains: paused jobs, manual watering jobs, and extra job to resume paused jobs if len(self.bg_scheduler.get_jobs()) == (jobs_paused + len(watering_request) + 1): return True return False def set_station(self, station, signal): """ Sets station [0,..., 7] to True or False (On | Off) in memory. Use set_shift_register_values() to activate GPIO """ self.station_status[station] = signal def set_shift_register_values(self): """ Activates GPIO based on self.station_status values """ if not GPIO: print("Error: set_shift_register_values() doesn't have GPIO module") return GPIO.output(StationControl.clock_pin, False) GPIO.output(StationControl.latch_pin, False) for station in range(0, self.num_stations): GPIO.output(StationControl.clock_pin, False) GPIO.output(StationControl.data_pin, self.station_status[self.num_stations - 1 - station]) GPIO.output(StationControl.clock_pin, True) GPIO.output(StationControl.latch_pin, True) def toggle_shift_register_output(self, value): if value: GPIO.output(StationControl.out_pin, False) else: GPIO.output(StationControl.out_pin, True) def setup_gpio(self): if not GPIO: print("Error: setup_gpio() doesn't have GPIO module") return # setup GPIO pins to interface with shift register GPIO.setmode(GPIO.BCM) GPIO.setup(StationControl.clock_pin, GPIO.OUT) GPIO.setup(StationControl.out_pin, GPIO.OUT) self.toggle_shift_register_output(False) GPIO.setup(StationControl.data_pin, GPIO.OUT) GPIO.setup(StationControl.latch_pin, GPIO.OUT) self.set_shift_register_values() self.toggle_shift_register_output(True) self.reset_stations() print("GPIO setup successfully") def reset_stations(self): self.station_status = [False] * self.num_stations def cleanup(self): self.reset_stations() GPIO.cleanup() def optimize_duration(self, fixed_duration): optimized, forecasted_temp, base_temp = fixed_duration, 70, 75 # call data handler and get historical for last 7 days, inc. today # if it rained today, don't water, return 0 # else return int(forecasted_temp * (fixed_duration/avg_temp)) return optimized, forecasted_temp, base_temp def water(self, args): """ args parameter contains a dict with args that are necessary for watering (station, duration). other args are for optimizing the duration (fixed_duration) the return values from optimize_duration will also be inserted into the dict args will be passed to data_handler for insertion; all keys map directly to columns in the historical table :param args: dictionary with keys 'datetime', 'station', 'fixed_duration', 'manual' """ station = args["station"] fixed_duration = args["fixed_duration"] optimized_duration, forecasted_temp, base_temp = self.optimize_duration(fixed_duration) print("Station {} watering for {} min at {}".format(station, optimized_duration, datetime.now().strftime("%c"))) # activate solenoid self.set_station(station, True) self.set_shift_register_values() # water and wait seconds = int(optimized_duration) * 60 while seconds > 0: print("Drip.... second {}".format(seconds)) sleep(1) seconds -= 1 # deactivate solenoid self.set_station(station, False) self.set_shift_register_values() print("Station {} finished watering".format(station)) # add a few more k, v pairs before passing to data_handler for building a SQL insert statement args["forecasted_temp"] = forecasted_temp args["base_temp"] = base_temp args["optimized_duration"] = optimized_duration # send args to data handler for insertion to db self.data_handler.insert_historical_record(args)