def test_get_next_fire_time(self, run_date, alter_tz, previous, now, expected, timezone, freeze_time): trigger = DateTrigger(run_date, alter_tz or timezone) previous = timezone.localize(previous) if previous else None now = timezone.localize(now) expected = timezone.localize(expected) if expected else None assert trigger.get_next_fire_time(previous, now) == expected
def test_dst_change(self, is_dst): """ Making sure that DateTrigger works during the ambiguous "fall-back" DST period. Note that you should explicitly compare datetimes as strings to avoid the internal datetime comparison which would test for equality in the UTC timezone. """ eastern = pytz.timezone("US/Eastern") run_date = eastern.localize(datetime(2013, 11, 3, 1, 5), is_dst=is_dst) fire_date = eastern.normalize(run_date + timedelta(minutes=55)) trigger = DateTrigger(run_date=fire_date, timezone=eastern) assert str(trigger.get_next_fire_time(None, fire_date)) == str(fire_date)
''' if isinstance(event, JobSubmissionEvent): print("任务{}触发执行".format(event.job_id)) if isinstance(event, JobExecutionEvent) and event.exception: print("任务{}抛出异常:{}".format(event.job_id, event.exception)) jobstores = { 'default': SQLAlchemyJobStore(url='sqlite:///db.sqlite3') #定时器任务持久化地址 } scheduler = BlockingScheduler(jobstores=jobstores) scheduler.add_job(func=aps_test, trigger=CronTrigger(hour=20, minute=48), replace_existing=True, id="sb") scheduler.add_job(func=aps_test, replace_existing=True, trigger=DateTrigger(run_date=datetime.datetime.now() + datetime.timedelta(seconds=12)), id="sbsb") scheduler.add_job(func=aps_test, # 任务函数 trigger=IntervalTrigger(seconds=5), # 执行计划 replace_existing=True, id="sbsbsb" # 唯一的 ) scheduler.add_listener(my_listener) scheduler.start()
print("run test2") @print_start_time def test3(): print("run test3") @print_start_time def test4(): print("run test4") @print_start_time def test5(): print("run test5") scheduler.add_job(test1, trigger=DateTrigger(datetime.datetime.now() + datetime.timedelta(seconds=5)), args=("test_job_id",)) scheduler.add_job(test2, trigger=CronTrigger.from_crontab("* * * * *")) scheduler.add_job(test3, trigger="cron", minute="*/2", hour="*", day="*", month="*", day_of_week="*") scheduler.add_job(test4, trigger="interval", seconds=10) scheduler.add_job(test5, trigger="interval", minutes=1) scheduler.start() while 1: time.sleep(1) # scheduler.print_jobs()
def test_str(self, timezone): trigger = DateTrigger(datetime(2009, 7, 6), timezone) assert str(trigger) == "date[2009-07-06 00:00:00 CEST]"
def DScheduler(cls, action, start_date=None, execute_date=None, end_date=None, execute_interval=3, tz=None, **kwargs): """ 一个依托于时间驱动的实时任务,action所挂载的任务由相应的时间驱动, 这跟run方法由K线更新驱动不一样,时区功能未起作用 :param action: :param start_date:like '09:30:00' :param execute_date:like '09:30:00-11:30:00' or '09:30:00-11:30:00 13:00:00-15:00:00' :param end_date:like '15:00:00' :param execute_interval:连续任务的执行时间间隔,以秒计 :param tz:时区 :return: """ fmt = '%Y-%m-%d %H:%M:%S' if start_date is not None: try: sdt = dt.datetime.strptime('2000-01-01 ' + start_date, fmt) except Exception: raise TypeError( 'this start_date param like a "09:30:00" string') if execute_date is not None: try: xdt = [] dts = execute_date.split(' ') for et in dts: t = et.split('-') s = dt.datetime.strptime('2000-01-01 ' + t[0], fmt) e = dt.datetime.strptime('2000-01-01 ' + t[1], fmt) # if s > e: # 如果execute的start大于end说明是当天的end到第二天的start # raise TypeError('execute start datetime must less than end') xdt.append([s, e]) del s, e, t del dts except Exception: raise TypeError( 'this start_date param like a "09:30:00-11:30:00" or' ' "09:30:00-11:30:00 13:00:00-15:00:00"') if end_date is not None: try: edt = dt.datetime.strptime('2000-01-01 ' + end_date, fmt) except Exception: raise TypeError( 'this start_date param like a "15:30:00" string') if tz is not None: if tz not in pytz.all_timezones: raise ValueError( 'Only timezones from the pytz library are supported') else: tz = pytz.timezone(tz) from apscheduler.triggers.date import DateTrigger from apscheduler.triggers.interval import IntervalTrigger from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor while 1: # scheduler = BlockingScheduler(daemonic=False) # crt = CalfDateTime.now(tz, offset) crt = dt.datetime.now() if tz is None else dt.datetime.now( tz=tz).replace(tzinfo=None) tdy = dt.datetime(crt.year, crt.month, crt.day) # 非交易日 if not action.is_trade_day(tdy): print(fontcolor.F_RED + '-' * 80) print('Note:Non-transaction date;Datetime:' + str(crt)) print('-' * 80 + fontcolor.END) delta = (tdy + dt.timedelta(days=1) - crt).seconds delta = 1 if delta < 1 else delta time.sleep(delta) # sleep to next day continue # 交易日 else: try: from pytz import FixedOffset, utc nsds = list() executors = { 'default': ThreadPoolExecutor(4), 'processpool': ProcessPoolExecutor(4) } job_defaults = {'coalesce': True, 'max_instances': 1} scheduler = BackgroundScheduler(executors=executors, job_defaults=job_defaults, daemonic=False, timezone=tz) if start_date is not None: d = tdy + dt.timedelta(hours=sdt.hour, minutes=sdt.minute, seconds=sdt.second) nsds.append(d + dt.timedelta(days=1)) def action_start(args): print(fontcolor.F_GREEN + '-' * 80) print('Calf-Note:start task running on ', dt.datetime.now(tz=tz)) print('-' * 80 + fontcolor.END) try: action.start(args=args) except Exception as ep: ExceptionInfo(ep) scheduler.add_job(func=action_start, trigger=DateTrigger(d), id='action_start', args=[kwargs]) if execute_date is not None: def action_execute(args): print(fontcolor.F_GREEN + '-' * 80) print('Calf-Note:execute task running on ', dt.datetime.now(tz=tz)) print('-' * 80 + fontcolor.END) try: action.execute(args=args) except Exception as ep: ExceptionInfo(ep) for x in xdt: sd = tdy + dt.timedelta(hours=x[0].hour, minutes=x[0].minute, seconds=x[0].second) ed = tdy + dt.timedelta(hours=x[1].hour, minutes=x[1].minute, seconds=x[1].second) if sd > ed: # 当出现了‘21:30:00-04:00:00’这种类型的格式,表示任务执行时间应该 # 从当天的21:30到第二天的04:00 ed = ed + dt.timedelta(days=1) else: pass scheduler.add_job(func=action_execute, trigger=IntervalTrigger( seconds=execute_interval, start_date=sd, end_date=ed), args=[kwargs]) nsds.append(sd + dt.timedelta(days=1)) if end_date is not None: def action_end(args): print(fontcolor.F_GREEN + '-' * 80) print('Calf-Note:end task running on ', dt.datetime.now(tz=tz)) print('-' * 80 + fontcolor.END) try: action.end(args=args) except Exception as ep: ExceptionInfo(ep) d = tdy + dt.timedelta(hours=edt.hour, minutes=edt.minute, seconds=edt.second) nsds.append(d + dt.timedelta(days=1)) scheduler.add_job(func=action_end, trigger=DateTrigger(d), id='action_end', timezone=tz, args=[kwargs]) print(fontcolor.F_GREEN + '-' * 80) print('Note:enter Calf real task and mount these tasks:') scheduler.print_jobs() print('Datetime:' + str(crt)) print('-' * 80 + fontcolor.END) scheduler.start() # 计算距离下一次启动应该休眠多久 if len(nsds) == 0: break # ed = CalfDateTime.now(tz, offset) nd = dt.datetime.now() if tz is None else dt.datetime.now( tz=tz).replace(tzinfo=None) delta = (min(nsds) - nd) delta = delta.seconds + delta.days * 86400 print(fontcolor.F_YELLOW + '-' * 80) print( 'Note:Calf will sleep {0} seconds and restart on {1}:'. format(delta, min(nsds))) print('Datetime:', str(crt)) print('-' * 80 + fontcolor.END) delta = 1 if delta < 1 else delta time.sleep(delta) scheduler.shutdown(wait=False) del scheduler except Exception as e: ExceptionInfo(e) pass
kwargs={ "target": "C1234", "task": "play music" }, trigger=CronTrigger(day_of_week="mon", hour="9", minute="0", week="*"), ), Job( id="yyy", scheduler=test_scheduler, func=dummy_func, args=(), kwargs={ "target": "S1234", "task": "do groceries" }, trigger=DateTrigger(run_date=datetime(2020, 5, 4, 18, 0)), ), Job( id="zzz", scheduler=test_scheduler, func=dummy_func, args=(), kwargs={ "target": "C6789", "task": "play guitar" }, trigger=CronTrigger(day_of_week="tue", hour="9", minute="0", week="*/2"), ),
async def _(bot: Bot, event: Event, state: dict) -> None: global SP_temp_list user = event.user_id group = event.group_id res = randint(1, 5) check_sepi() if countX(SP_temp_list, user) == 5: add_sepi(user) # type: ignore SP_temp_list = list(set(SP_temp_list)) delta = timedelta(hours=1) trigger = DateTrigger(run_date=datetime.now() + delta) scheduler.add_job(func=del_sepi, trigger=trigger, args=(user, ), misfire_grace_time=60) return if setu_type == 1: DATA_PATH = Path( '.') / 'ATRI' / 'data' / 'data_Sqlite' / 'setu' / 'nearR18.db' if not DATA_PATH.is_file(): await setu.finish("数据库...她是空的!!!") con = sqlite3.connect(DATA_PATH) cur = con.cursor() msg = cur.execute('SELECT * FROM nearR18 ORDER BY RANDOM() limit 1;') for i in msg: pid = i[0] title = i[1] img = i[7] msg0 = "setu info:\n" msg0 += f"Title: {title}\n" msg0 += f"Pid: {pid}\n" msg0 += f"[CQ:image,file=file:///{compress_image(await aio_download_pics(img))}]" if 1 <= res < 5: SP_temp_list.append(user) await setu.finish(msg0) elif res == 5: await bot.send(event, "我找到涩图了!但我发给主人了\nο(=•ω<=)ρ⌒☆") for sup in config['bot']['superusers']: await bot.send_private_msg( user_id=sup, message= f"主人,从群{group}来的涩图!热乎着!\nTitle: {title}\nPid: {pid}\n[CQ:image,file=file:///{compress_image(await aio_download_pics(img))}]" ) else: params = {"apikey": key_LoliconAPI, "r18": "0", "num": "1"} data = {} try: data = json.loads( request_get('https://api.lolicon.app/setu/', params)) except Exception: await setu.finish(errorRepo("请求数据失败,也可能为接口调用次数达上限")) msg0 = "setu info:\n" msg0 += f'Title: {data["data"][0]["title"]}\n' msg0 += f'Pid: {data["data"][0]["pid"]}\n' msg0 += f'[CQ:image,file=file:///{compress_image(await aio_download_pics(data["data"][0]["url"]))}]' if 1 <= res < 5: SP_temp_list.append(user) await setu.finish(msg0) elif res == 5: await bot.send(event, "我找到涩图了!但我发给主人了\nο(=•ω<=)ρ⌒☆") for sup in config['bot']['superusers']: await bot.send_private_msg( user_id=sup, message= f'主人,从群{group}来的涩图!热乎着!\nTitle: {data["data"][0]["title"]}\nPid: {data["data"][0]["pid"]}\n[CQ:image,file=file:///{compress_image(await aio_download_pics(data["data"][0]["url"]))}]' )
async def from_rss_async(self, source) -> str: rss_source = self.rss[source] print( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "检查RSS源:{}".format(rss_source["name"])) try: async with aiohttp.ClientSession() as session: async with session.get( rss_source["source"], headers=rss_source.get("headers")) as response: code = response.status if code != 200: print("rss源错误:{},返回值:{}".format( rss_source["name"], code)) return None res = await response.text() except aiohttp.client_exceptions.ClientConnectionError: print("rss源连接错误:" + rss_source["name"]) return None except Exception as e: print("未知错误{} {}".format(type(e).__name__, e)) return None feed = feedparser.parse(res) if feed["bozo"]: print("rss源解析错误:" + rss_source["name"]) return None if self.news_interval_auto: updated = feed.feed.updated if updated is not None: # 获取rss上次刷新时间 lastBuildDate = parsedate_tz(updated) nt = datetime.datetime.fromtimestamp( time.mktime(lastBuildDate[:-1]) - lastBuildDate[-1] + 28800) nt += datetime.timedelta(minutes=25) after10min = datetime.datetime.now() + datetime.timedelta( minutes=10) if nt > after10min: # 执行时间改为上次刷新后25分钟 self.scheduler.reschedule_job( job_id=source, jobstore='default', trigger=DateTrigger(nt), ) if len(feed["entries"]) == 0: print("rss无效:" + rss_source["name"]) return None last_id = rss_source.get("last_id") rss_source["last_id"] = feed["entries"][0]["id"] if last_id is None: print("rss初始化:" + rss_source["name"]) return None news_list = list() for item in feed["entries"]: if item["id"] == last_id: break news_list.append(rss_source["pattern"].format_map(item)) if news_list: return (rss_source["name"] + "更新:\n=======\n" + "\n-------\n".join(news_list)) else: return None
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logging.basicConfig() if not os.path.exists("settings/config.json"): print("|! Couldn't find config.json!") print("|! rename the config.json.example to config.json and edit it as required.") print("|! After that, run the script again.") exit(1) utils = Utils() schedule_interval = utils.readConfig()['interval'] use_schedule = utils.readConfig()['use_schedule'] scheduler = BlockingScheduler() scheduler.add_job(Cachet, trigger=DateTrigger(run_date=datetime.datetime.now()), id='initial') scheduler.add_job(Cachet, 'interval', seconds=schedule_interval, id='constant') print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) if use_schedule: try: scheduler.start() except (KeyboardInterrupt, SystemExit): pass else: Cachet()
async def _(bot: Bot, event: Event, state: dict): global lsp_stack user = str(event.user_id) group = str(event.group_id) if not is_enabled(_func_name, group): await setu_get.finish("该功能不可用") if check_list(user): await setu_get.finish("冲的太多了,休息一下吧") if counter(lsp_stack, user) == 5: en_lsp(user) lsp_stack = list(set(lsp_stack)) delta = timedelta(minutes=10) trigger = DateTrigger(run_date=datetime.now() + delta) scheduler.add_job(func=de_lsp, trigger=trigger, args=(user, ), misfire_grace_time=60) r18_switch = 0 ON_FILE = Path("./src/plugins/pic_search/on_list.json") if ON_FILE.is_file(): with open(ON_FILE, 'r') as file: data = ujson.load(file) if group in data: r18_switch = 1 if data[group] == "on" else 0 args = str(event.message).strip().split() if len(args) > 1: state["keyword"] = args[1] await bot.send(event, "别急,涩图在搜索了") if state.get("keyword") == "local": setu = await setu_loader(user) else: key = state.get("keyword") setu = await setu_linker(user, key, mode=r18_switch) if not setu: msg = [{ "type": "at", "data": { "qq": user } }, { "type": "text", "data": { "text": "连接超时,涩图找丢了" } }] await setu_get.finish(msg) lsp_stack.append(user) sender_info = dict(event.sender) if sender_info["card"]: user_name = sender_info["card"] else: user_name = sender_info["nickname"] # lsp榜单更新 KSP = Path("./src/plugins/pic_search/King_of_LSP.json") if not KSP.is_file(): sp_data = {} else: with open(KSP, 'r') as f: sp_data = ujson.load(f) if group not in sp_data: sp_data[group] = {} if user not in sp_data[group]: sp_data[group][user] = {"name": user_name, "times": 0} if sp_data[group][user]["name"] != user_name: sp_data[group][user]["name"] = user_name sp_data[group][user]["times"] += 1 with open(KSP, 'w') as file: ujson.dump(sp_data, file) await setu_get.finish(setu)
mktime(entry.published_parsed)) pub_localized = utc.localize(pub_datetime) pub_eastern = pub_localized.astimezone(est) if not FeedItem.objects.filter( title=entry.title.encode('ascii', 'ignore')): FeedItem.objects.create( feed=feed, title=entry.title.encode('ascii', 'ignore'), url=entry.link, summary=strip_tags(entry.summary.encode('ascii', 'ignore')), pub_date=pub_eastern) @sched.scheduled_job('interval', days=1) def remove_old_feed_items(): for item in FeedItem.objects.all(): if item.pub_date < utc.localize(datetime.datetime.now() - datetime.timedelta( days=1)).astimezone(est): item.delete() # Run jobs immediately on deploy sched.add_job(func=update_newsfeed, trigger=DateTrigger(run_date=datetime.datetime.now())) sched.add_job(func=remove_old_feed_items, trigger=DateTrigger(run_date=datetime.datetime.now())) sched.start()
timezone=get_localzone(), # "Asia/Seoul" daemon=True) # JOB Task 생성 # - 입력받은 텍스트를 출력 def execute(text): print(text) # JOB Schedule 등록 # - task : 입력받은 텍스트를 출력 # - trigger : date # - jobstore : default scheduler.add_job(execute, DateTrigger(), args=["[DateTrigger] Hello, Apscheduler!!!"]) # JOB Schedule 등록 # - task : 5초마다 반복하며, 입력받은 텍스트를 출력 # - trigger : interval # - jobstore : default scheduler.add_job(execute, IntervalTrigger(seconds=5), args=["[IntervalTrigger] Hello, Apscheduler!!!"]) # JOB Schedule 등록 # - task : 3초마다 반복하며, 입력받은 텍스트를 출력 # - trigger : cron # - jobstore : default scheduler.add_job(execute,
def configure(self, aasxconfig): """Configures the triggers and jobs out of the given configuration :param lxml.etree.ElementTree configuration: XML DOM tree of the configuration """ # add each trigger of the configuration to the scheduler triggerList = aasxconfig.GetTriggersList() for trigger_xe in triggerList: trigger_type = trigger_xe.tag trigger_id = trigger_xe.attrib["ID"] if trigger_type == "DateTrigger": kwargs = {} kwargs["run_date"] = trigger_xe.attrib["RunDateTime"] if "TimeZone" in trigger_xe.attrib: kwargs["timezone"] = trigger_xe.attrib["TimeZone"] self.triggers[trigger_id] = DateTrigger(**kwargs) elif trigger_type == "IntervalTrigger": kwargs = {} if "Weeks" in trigger_xe.attrib: kwargs["weeks"] = int(trigger_xe.attrib["Weeks"]) if "Days" in trigger_xe.attrib: kwargs["days"] = int(trigger_xe.attrib["Days"]) if "Hours" in trigger_xe.attrib: kwargs["hours"] = int(trigger_xe.attrib["Hours"]) if "Minutes" in trigger_xe.attrib: kwargs["minutes"] = int(trigger_xe.attrib["Minutes"]) if "Seconds" in trigger_xe.attrib: kwargs["seconds"] = int(trigger_xe.attrib["Seconds"]) if "StartDateTime" in trigger_xe.attrib: kwargs["start_date"] = trigger_xe.attrib["StartDateTime"] if "EndDateTime" in trigger_xe.attrib: kwargs["end_date"] = trigger_xe.attrib["EndDateTime"] if "TimeZone" in trigger_xe.attrib: kwargs["timezone"] = trigger_xe.attrib["TimeZone"] self.triggers[trigger_id] = IntervalTrigger(**kwargs) elif trigger_type == "CronTrigger": kwargs = {} if "Year" in trigger_xe.attrib: kwargs["year"] = trigger_xe.attrib["Year"] if "Month" in trigger_xe.attrib: kwargs["month"] = trigger_xe.attrib["Month"] if "Day" in trigger_xe.attrib: kwargs["day"] = trigger_xe.attrib["Day"] if "Week" in trigger_xe.attrib: kwargs["week"] = trigger_xe.attrib["Week"] if "WeekDay" in trigger_xe.attrib: kwargs["day_of_week"] = trigger_xe.attrib["WeekDay"] if "Hour" in trigger_xe.attrib: kwargs["hour"] = trigger_xe.attrib["Hour"] if "Minute" in trigger_xe.attrib: kwargs["minute"] = trigger_xe.attrib["Minute"] if "Second" in trigger_xe.attrib: kwargs["second"] = trigger_xe.attrib["Second"] if "StartDateTime" in trigger_xe.attrib: kwargs["start_date"] = trigger_xe.attrib["StartDateTime"] if "EndDateTime" in trigger_xe.attrib: kwargs["end_date"] = trigger_xe.attrib["EndDateTime"] if "TimeZone" in trigger_xe.attrib: kwargs["timezone"] = trigger_xe.attrib["TimeZone"] self.triggers[trigger_id] = CronTrigger(**kwargs) else: raise Exception( "This PYAAS implementation can not handle the trigger type '{0}'" .format(trigger_type)) job_xes = trigger_xe.xpath("./Job") for job_xe in job_xes: job_id = job_xe.attrib["ID"] module_name = job_xe.attrib["Function"] params = [self.saas] param_xes = job_xe.xpath("./*") for param_xe in param_xes: if param_xe.tag == "ChannelRef": params.append(param_xe.attrib["RefID"]) elif param_xe.tag == "Constant": t = param_xe.attrib["Type"] v = param_xe.attrib["Value"] if t == "str": params.append(str(v)) if t == "int": params.append(int(v)) if t == "float": params.append(float(v)) else: raise Exception("ERROR: Disallowed Job parameter") if module_name not in sys.modules: self.f_modules[module_name] = import_module("modules" + module_name) f = self.f_modules[module_name].function trigger = self.triggers[trigger_id] self.scheduler.add_job(f, trigger=trigger, args=params, id=job_id, replace_existing=True)
def trigger(self) -> DateTrigger: """Trigger for ReserveTimeoutJobs.""" return DateTrigger(run_date=current_timestamp() + timedelta(seconds=30))
def trigger(self) -> DateTrigger: """Trigger for ReserveAbortJobs.""" return DateTrigger(run_date=None) # Run immediately
def run_songlyrics_script(): trigger = DateTrigger() scheduler.add_job(songlyrics.run, trigger)
def get_or_create_trigger(self) -> BaseTrigger: return DateTrigger()
def reschedule_stored_job(job_id, time): job_trigger = DateTrigger(run_date=time) scheduler.reschedule_job(job_id, trigger=job_trigger)
class JobManager: """ Central overseer that manages the measurement jobs. """ _instance: Optional['JobManager'] = None @classmethod def initialize( cls, datadir: Path ) -> 'JobManager': if cls._instance is None: cls._instance = JobManager( datadir ) return cls._instance @classmethod def get_instance( cls ) -> 'JobManager': if cls._instance is None: raise RuntimeError( "Attempted to obtain manager before initializing it." ) return cls._instance @classmethod def run_job( cls, job: Job ) -> None: """ Executes the specified job once. :param job: The job to execute. """ _LOGGER.debug( "Running job '%s'.", job.id ) timestamp = datetime.now( pytz.utc ) try: output = speedtest.run_test( server_id = job.server_id, server_name = job.server_name ) result = { 'success': True, 'time': timestamp.isoformat(), 'result': output } except speedtest.TestError as e: _LOGGER.exception( "Test could not be completed." ) result = { 'success': False, 'timestamp': timestamp.isoformat(), 'error': str( e ), 'stdout': e.stdout, 'stderr': e.stderr, } with open( cls.get_instance().output_file( job ), 'a' ) as f: f.write( json.dumps( result ) ) f.write( ',\n' ) # Line break to make it slightly more readable _LOGGER.debug( "Finished running job '%s'.", job.id ) def __init__( self, datadir: Path ): """ Initializes a new manager that uses the specified directory to store data. :param datadir: The path of the directory where data should be stored. """ _LOGGER.debug( "Initializing manager." ) try: _LOGGER.info( "Using %s", speedtest.get_version() ) # Also implicitly check installed except speedtest.TestError: _LOGGER.exception( "Obtaining Speedtest CLI version caused an error." ) _LOGGER.critical( "The Speedtest CLI could not accessed. Is it installed in this system?" ) sys.exit( 1 ) database_path = datadir / 'jobs.db' self.storage = datadir / 'results' self.storage.mkdir( mode = 0o770, exist_ok = True ) self.engine = create_engine( f'sqlite:///{database_path}' ) Base.metadata.create_all( self.engine ) self.Session = orm.sessionmaker( bind = self.engine ) jobstores = { 'default': SQLAlchemyJobStore( engine = self.engine ) } executors = { 'default': ThreadPoolExecutor( 1 ) } job_defaults = { 'coalesce': True, 'max_instances': 1, 'misfire_grace_time': 5 * 60, # Can be up to 5 minutes late } self.scheduler = BackgroundScheduler( jobstores = jobstores, executors = executors, job_defaults = job_defaults, timezone = pytz.utc ) self.scheduler.add_listener( self.job_stopped, mask = events.EVENT_JOB_REMOVED ) _LOGGER.debug( "Manager initialized." ) def start( self ): """ Starts processing jobs. """ _LOGGER.info( "Manager starting." ) self.scheduler.start() _LOGGER.debug( "Manager started." ) def shutdown( self, wait: bool = True ): """ Shuts down the manager, stopping job processing. :param wait: If True, waits for all currently executing jobs to finish before returning. """ _LOGGER.info( "Manager stopping." ) self.scheduler.shutdown( wait = wait ) _LOGGER.debug( "Manager stopped." ) def job_stopped( self, event: events.JobEvent ) -> None: id: str = event.job_id with self.transaction() as session: job: JobMetadata = session.query( JobMetadata ).filter_by( id = id ).first() job.running = False def output_file( self, job: Job ) -> Path: """ Determines the path to the output file of the job identified by the given ID. :param job: The job to get the path for. :return: The path of the output file for the given job. """ return self.storage / f'{job.id}.result' # Not really proper JSON def load_results( self, job: Job ) -> Sequence[JSONData]: """ Loads the results obtained so far for the given job. :param job: The job to load results for. :return: The results of the given job, as a list of JSON objects. """ output_file = self.output_file( job ) if not output_file.exists(): return [] with open( output_file, 'r' ) as f: results = f.read() results = '[' + results[:-2] + ']' # Remove trailing comma and line break and add brackets return json.loads( results ) @contextmanager def transaction( self ) -> orm.Session: """ Provide a transactional scope around a series of operations. """ session: orm.Session = self.Session() try: yield session session.commit() except: session.rollback() raise finally: session.close() def new_job( self, job: Job ) -> None: """ Registers the given job. :param job: The job to register. :raises IDExistsError: if the ID of the given job is already in use. """ _LOGGER.info( "Registering job '%s'.", job.id ) _LOGGER.debug( "Job '%s' (%s) has target %d|'%s', starts at %s and ends at %s with interval %s.", job.id, job.title, job.server_id, job.server_name, job.start, job.end, job.interval ) with self.transaction() as session: try: if session.query( JobMetadata ).filter_by( id = job.id ).count() > 0: raise IDExistsError( "There is already metadata for the given ID." ) new_job = JobMetadata( job ) session.add( new_job ) if job.interval: _LOGGER.debug( "Creating an interval-triggered job." ) if job.end is not None and job.end < ( now := datetime.now( pytz.utc ) ): raise PastEndError( now, job ) trigger = IntervalTrigger( seconds = int( job.interval.total_seconds() ), start_date = job.start if job.start is not None else datetime.now( pytz.utc ), end_date = job.end ) else: _LOGGER.debug( "Creating a date-triggered job." ) trigger = DateTrigger( run_date = job.start )
def add_test_to_publish(test_id: int, date_time: datetime): trigger = DateTrigger(run_date=date_time, timezone='Asia/Tashkent') _scheduler.add_job(publishing.publish_test, trigger, [test_id], id='test_'+str(test_id))
def add_date_job(self,mission,widget=None): if not widget: widget = self.__widget trigger = DateTrigger(run_date=mission['time']) self.add_job(func=lambda:widget.send_show_signal(mission['id']), trigger=trigger, id=mission['id'])
def add_job(self, job, name=None, max_instances=1, coalesce=True, args=None, kwargs=None): self.aps_scheduler.add_job( job, DateTrigger(run_date=datetime.now()), name=name, id=name, max_instances=max_instances, coalesce=coalesce, args=args, kwargs=kwargs)
async def _(session: CommandSession): global noobList1 user = session.event.user_id group = session.event.group_id try: with open( Path('.') / 'ATRI' / 'plugins' / 'noobList' / 'noobGroup.json', 'r') as f: data = json.load(f) except: data = {} try: with open( Path('.') / 'ATRI' / 'plugins' / 'noobList' / 'noobList.json', 'r') as f: data1 = json.load(f) except: data1 = {} if str(group) in data.keys(): pass else: if str(user) in data1.keys(): pass else: if 0 <= now_time() < 5.5: pass else: msg = str(session.event.message) bL = {} pattern = r"[nNηиɴИ][tT][rR]|[牛🐂]头人" if re.findall(pattern, msg): await session.send('你妈的,牛头人,' + response.request_api(KC_URL)) noobList1.append(user) print(noobList1) print(countX(noobList1, user)) if countX(noobList1, user) == 5: if user == master: await session.send( '是主人的话...那算了...呜呜\n即使到达了ATRI的最低忍耐限度......') noobList1 = list(set(noobList1)) pass else: await session.send( f'[CQ:at,qq={user}]哼!接下来10分钟别想让我理你!') bL[f"{user}"] = f"{user}" file = Path( '.' ) / 'ATRI' / 'plugins' / 'noobList' / 'noobList.json' f = open(file, 'w') f.write(json.dumps(bL)) f.close() noobList1 = list(set(noobList1)) print(noobList1) delta = timedelta(minutes=10) trigger = DateTrigger(run_date=datetime.now() + delta) scheduler.add_job( #type: ignore func=rmQQfromNoobLIST, trigger=trigger, args=(user), misfire_grace_time=60, )
def test_pickle(self, timezone): """Test that the trigger is pickleable.""" trigger = DateTrigger(date(2016, 4, 3), timezone=timezone) data = pickle.dumps(trigger, 2) trigger2 = pickle.loads(data) assert trigger2.run_date == trigger.run_date
def schedule(self, task, start_time: dt.datetime = None): trigger = None if start_time is not None: trigger = DateTrigger(start_time) self._scheduler.add_job(task, trigger=trigger)
def update_cron(self, cron_id, cron_info): if not isinstance(cron_id, str): raise TypeError('cron_id must be str') if not isinstance(cron_info, dict): raise TypeError('cron_info must be dict') trigger_type = cron_info.get('triggerType') interval = cron_info.get('interval') run_date = cron_info.get('runDate') test_case_suite_id_list = cron_info.get('testCaseSuiteIdList') is_execute_forbiddened_case = cron_info.get('isExecuteForbiddenedCase') test_case_id_list = cron_info.get('testCaseIdList') test_domain = cron_info.get('testDomain') alarm_mail_list = cron_info.get('alarmMailList') is_ding_ding_notify = cron_info.get('isDingDingNotify') ding_ding_access_token = cron_info.get('dingdingAccessToken') ding_ding_notify_strategy = cron_info.get('dingdingNotifyStrategy') is_enterprise_wechat_notify = cron_info.get('isEnterpriseWechatNotify') enterprise_wechat_access_token = cron_info.get( 'enterpriseWechatAccessToken') enterprise_wechat_notify_strategy = cron_info.get( 'enterpriseWechatNotifyStrategy') cron_name = cron_info.get('name') try: if trigger_type == 'interval' and int(interval) > 0: self.scheduler.modify_job( job_id=cron_id, trigger=IntervalTrigger(seconds=interval)) elif trigger_type == 'date': # TODO 判断run_date类型 self.scheduler.modify_job( job_id=cron_id, trigger=DateTrigger(run_date=run_date)) else: raise TypeError('更新定时任务触发器失败!') if run_date: cron = Cron( test_case_suite_id_list=test_case_suite_id_list, is_execute_forbiddened_case=is_execute_forbiddened_case, test_domain=test_domain, alarm_mail_list=alarm_mail_list, is_ding_ding_notify=is_ding_ding_notify, ding_ding_access_token=ding_ding_access_token, ding_ding_notify_strategy=ding_ding_notify_strategy, is_enterprise_wechat_notify=is_enterprise_wechat_notify, enterprise_wechat_access_token= enterprise_wechat_access_token, enterprise_wechat_notify_strategy= enterprise_wechat_notify_strategy, trigger_type=trigger_type, # 更新定时器时,此参数并没有真正起到作用, 仅修改展示字段 test_case_id_list=test_case_id_list, run_date=run_date, cron_name=cron_name) # 更新定时器时,此参数并没有起到作用, 仅修改展示字段 else: cron = Cron( test_case_suite_id_list=test_case_suite_id_list, is_execute_forbiddened_case=is_execute_forbiddened_case, test_domain=test_domain, alarm_mail_list=alarm_mail_list, is_ding_ding_notify=is_ding_ding_notify, ding_ding_access_token=ding_ding_access_token, ding_ding_notify_strategy=ding_ding_notify_strategy, is_enterprise_wechat_notify=is_enterprise_wechat_notify, enterprise_wechat_access_token= enterprise_wechat_access_token, enterprise_wechat_notify_strategy= enterprise_wechat_notify_strategy, trigger_type=trigger_type, # 更新定时器时,此参数并没有起到作用, 仅修改展示字段 test_case_id_list=test_case_id_list, seconds=interval, # 更新定时器时,此参数并没有起到作用, 仅修改展示字段 cron_name=cron_name) # 玄学,更改job的时候必须改args,不能改func self.scheduler.modify_job(job_id=cron_id, coalesce=True, args=[cron]) except BaseException as e: raise TypeError('更新定时任务失败: %s' % e)
def add_date(self, run_date=None, timezone=None): """decorator, add a date type task""" trigger = DateTrigger(run_date=run_date, timezone=timezone) return lambda func: self._scheduler.add_job(func, trigger)
def test_repr(self, timezone): trigger = DateTrigger(datetime(2009, 7, 6), timezone) assert repr( trigger) == "<DateTrigger (run_date='2009-07-06 00:00:00 CEST')>"
def reschedule_lottery(job_id, time): job_trigger = DateTrigger(run_date=time) scheduler.reschedule_job(job_id, trigger=job_trigger)
def run_spammer_script(): trigger = DateTrigger() scheduler.add_job(lambda: spammer.run('Dor', 'zombie', '1'), trigger)
def trigger(self) -> DateTrigger: """Return APScheduler trigger information for scheduling ReserveJob's.""" return DateTrigger(run_date=None) # Run immediately