def format(self, record): levelname = record.levelname color = self.COLORS[levelname][0] bg_color = self.COLORS[levelname][1] message = logging.Formatter.format(self, record) message = message.replace("$RESET", self.Style.RESET_ALL) \ .replace("$BRIGHT", self.Style.BRIGHT) \ .replace("$COLOR", color) \ .replace("$BGCOLOR", bg_color) for k, v in self.CCOLORS.items(): message = message.replace("$" + k, self.COLOR_SEQ % (v + 30)) \ .replace("$BG" + k, self.COLOR_SEQ % (v + 40)) return message + self.Style.RESET_ALL client = discordbot.DiscordBot() client.scheduler = AsyncIOScheduler(event_loop=client.loop, timezone=pytz.utc) # Start if __name__ == '__main__': if not pathlib.Path("./c_log/").exists(): os.mkdir("./c_log/") init() logging.ColorFormatter = ColorFormatter logging.config.fileConfig('logging.conf') logger = logging.getLogger(__name__) client.scheduler.start() client.load_cogs() client.run()
from aiogram import Bot, Dispatcher, types from aiogram.contrib.fsm_storage.memory import MemoryStorage from apscheduler.schedulers.asyncio import AsyncIOScheduler from data import config from keyboards.default import keyboards from utils.db_api.postgresql import db from utils.news_parser import ParseNews bot = Bot(token=config.TOKEN, parse_mode=types.ParseMode.HTML) storage = MemoryStorage() dp = Dispatcher(bot, storage=storage) kb = keyboards pn = ParseNews aiosched = AsyncIOScheduler() __all__ = ['bot', 'storage', 'dp', 'kb', 'pn', 'aiosched', 'db']
"`12:00 Am, Group Is Closing Till 6 Am. Night Mode Started !` \n**Powered By @DaisyXbot**", ) await tbot( functions.messages.EditChatDefaultBannedRightsRequest( peer=int(warner.chat_id), banned_rights=hehes)) if CLEAN_GROUPS: async for user in tbot.iter_participants(int(warner.chat_id)): if user.deleted: await tbot.edit_permissions(int(warner.chat_id), user.id, view_messages=False) except Exception as e: print(f"Unable To Close Group {warner} - {e}") scheduler = AsyncIOScheduler(timezone="Asia/Kolkata") scheduler.add_job(job_close, trigger="cron", hour=23, minute=55) scheduler.start() async def job_open(): ws_chats = get_all_chat_id() if len(ws_chats) == 0: return for warner in ws_chats: try: await tbot.send_message( int(warner.chat_id), "`06:00 Am, Group Is Opening.`\n**Powered By @DaisyXBot**", ) await tbot(
def __init__(self): self.scheduler = AsyncIOScheduler()
return whiteeyeping.rmping(url) await event.edit( f"**URL :** `{url}` **Sucessfully Removed From DataBase**") async def ping_servers(): hmm_p = 0 url_s = whiteeyeping.get_all_url() header_s = {"User-Agent": "Server Pinged By @WhiteEyeDevs"} if len(url_s) == 0: return for i in url_s: try: ws = requests.get(url=i.url, headers=header_s).status_code logger.info(f"Pinged {i.url} // Status Code Recived : {ws}") except: hmm_p += 1 success_l = len(url_s) - hmm_p logger.info(f"Sucessfully Pinged {success_l} Urls Out Of {len(url_s)}") scheduler = AsyncIOScheduler( executors={ "threadpool": ThreadPoolExecutor(max_workers=9), "processpool": ProcessPoolExecutor(max_workers=3), }) scheduler.add_job(ping_servers, "interval", minutes=60, executor="threadpool") scheduler.start()
async def init_scheduler(): scheduler = AsyncIOScheduler() scheduler.start() return scheduler
def create_scheduler(event_loop=None) -> AsyncIOScheduler: scheduler = AsyncIOScheduler(timezone=pytz.utc, event_loop=event_loop) scheduler.run_later = MethodType(run_later, scheduler) return scheduler
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.triggers.cron import CronTrigger from app.db import db_engine, db_session from app.models import Route, Schedule log = logging.getLogger(__name__) jobstores = {'default': SQLAlchemyJobStore(engine=db_engine)} jobdefaults = {'misfire_grace_time': 30, 'coalesce': True} executors = {'default': AsyncIOExecutor()} Scheduler = AsyncIOScheduler( jobstores=jobstores, executors=executors, timezone='Europe/Moscow', job_defaults=jobdefaults, ) def get_active_schedules(): """Returns all schedules with is_active property.""" schedules = Schedule.get_active() log.info('Found %s active schedules.', len(schedules)) return schedules async def send_single_route(route_id: int): # REFACTOR ME PLS!!! from app.main import bot # pylint: disable=import-outside-toplevel
async def main(): """The first function that is run when starting the bot""" # Read user-configured options from a config file. # A different config file path can be specified as the first command line argument if len(sys.argv) > 1: config_path = sys.argv[1] else: config_path = "config.yaml" # Read the parsed config file and create a Config object config = Config(config_path) # Configure the database store = Storage(config.database) # Configuration options for the AsyncClient client_config = AsyncClientConfig( max_limit_exceeded=0, max_timeouts=0, store_sync_tokens=True, encryption_enabled=True, ) # Initialize the matrix client client = AsyncClient( config.homeserver_url, config.user_id, device_id=config.device_id, store_path=config.store_path, config=client_config, ) if config.user_token: client.access_token = config.user_token client.user_id = config.user_id # Set up event callbacks callbacks = Callbacks(client, store, config) client.add_event_callback(callbacks.message, (RoomMessageText, )) client.add_event_callback(callbacks.invite, (InviteMemberEvent, )) client.add_event_callback(callbacks.decryption_failure, (MegolmEvent, )) client.add_event_callback(callbacks.unknown, (UnknownEvent, )) # Set up a scheduler scheduler = AsyncIOScheduler() # Set up MISPAlert misp_alert = MISPAlert(client, config, store) # Add a job that checks for new taged events every minute trigger = IntervalTrigger( seconds=60, start_date=datetime.now() + timedelta(seconds=2), ) # Add the query job scheduler.add_job(misp_alert.alerter, trigger=trigger) # Keep trying to reconnect on failure (with some time in-between) while True: try: if config.user_token: # Use token to log in client.load_store() # Sync encryption keys with the server if client.should_upload_keys: await client.keys_upload() else: # Try to login with the configured username/password try: login_response = await client.login( password=config.user_password, device_name=config.device_name, ) # Check if login failed if type(login_response) == LoginError: logger.error("Failed to login: %s", login_response.message) return False except LocalProtocolError as e: # There's an edge case here where the user hasn't installed the correct C # dependencies. In that case, a LocalProtocolError is raised on login. logger.fatal( "Failed to login. Have you installed the correct dependencies? " "https://github.com/poljar/matrix-nio#installation " "Error: %s", e, ) return False # Login succeeded! logger.info(f"Logged in as {config.user_id}") # Allow jobs to fire try: scheduler.start() except SchedulerAlreadyRunningError: pass await client.sync_forever(timeout=30000, full_state=True) except (ClientConnectionError, ServerDisconnectedError): logger.warning( "Unable to connect to homeserver, retrying in 15s...") # Sleep so we don't bombard the server with login requests sleep(15) finally: # Make sure to close the client connection on disconnect await client.close()
def asyncio_scheduler(event_loop): scheduler = AsyncIOScheduler(event_loop=event_loop) scheduler.start(paused=True) yield scheduler scheduler.shutdown(False)
def main(): print("""============================== _ _ | | | | _ _ ___ | |__ ___ | |_ | | | |/ _ \| '_ \ / _ \| __| | |_| | (_) | |_) | (_) | |_ \__, |\___/|_.__/ \___/ \__| __/ | |___/ ==============================""") print("正在初始化...") if os.path.exists("yobot_config.json"): with open("yobot_config.json", "r") as f: config = json.load(f) token = config.get("access_token", None) if token is None: print("*************************************************") print("警告:没有设置access_token,这会直接暴露机器人接口") print("这意味着允许机器人执行任何人的请求,造成安全隐患") print("请在yobot_config.json文件中修改access_token项") print("并使其与httpapi中的access_token保持一致") print("*************************************************") # input("(按回车继续)") else: token = None cqbot = CQHttp(access_token=token, enable_http_post=False) bot = yobot.Yobot( data_path=".", quart_app=cqbot.server_app, bot_api=cqbot._api, ) host = bot.glo_setting.get("host", "0.0.0.0") port = bot.glo_setting.get("port", 9222) @cqbot.on_message async def handle_msg(context): if context["message_type"] == "group" or context[ "message_type"] == "private": reply = await bot.proc_async(context) else: reply = None if reply != "" and reply is not None: return {'reply': reply, 'at_sender': False} else: return None async def send_it(func): if asyncio.iscoroutinefunction(func): to_sends = await func() else: to_sends = func() if to_sends is None: return tasks = [cqbot.send_msg(**kwargs) for kwargs in to_sends] await asyncio.gather(*tasks) jobs = bot.active_jobs() if jobs: sche = AsyncIOScheduler() for trigger, job in jobs: sche.add_job(func=send_it, args=(job, ), trigger=trigger, coalesce=True, max_instances=1, misfire_grace_time=60) sche.start() print("初始化完成,启动服务...") cqbot.run(host=host, port=port, debug=False, use_reloader=False)
def __init__(self, harmony): self.harmony = harmony self.schedule = AsyncIOScheduler() self.schedule.start() self.populateScheduler()
def start(self, interval): self.scheduler = AsyncIOScheduler() self.scheduler.add_job(self.exec_listener, 'interval', seconds=interval) self.scheduler.start()
def __init__(self): self.guild = None self.PREFIX = PREFIX self.ready = False self.scheduler = AsyncIOScheduler() super().__init__(command_prefix=PREFIX, owner_ids=OWNERIDS)
async def test_load_bars_sync_jobs(self): origin = cfg.omega.sync.bars try: cfg.omega.sync.bars = [ { "frame": "1m", "start": "2020-01-02", "stop": "2020-01-02", "delay": 3, "cat": [], "include": "000001.XSHE", "exclude": "000001.XSHG", }, { "frame": "5m", "start": "2020-01-2", "stop": "2020-01-03", "delay": 3, "cat": [], "include": "000001.XSHE", "exclude": "000001.XSHG", }, { "frame": "15m", "start": "2020-01-02", "stop": "2020-01-03", "delay": 3, "cat": [], "include": "000001.XSHE", "exclude": "000001.XSHG", }, { "frame": "30m", "start": "2020-01-02", "stop": "2020-01-03", "delay": 3, "cat": [], "include": "000001.XSHE", "exclude": "000001.XSHG", }, { "frame": "60m", "start": "2020-01-02", "stop": "2020-01-03", "delay": 3, "cat": [], "include": "000001.XSHE 000004.XSHE", "exclude": "000001.XSHG", }, { "frame": "1d", "start": "2020-01-02", "stop": "2020-01-03", "delay": 3, "cat": [], "include": "000001.XSHE", "exclude": "000001.XSHG", }, { "frame": "1W", "start": "2020-01-02", "stop": "2020-01-03", "delay": 3, "cat": [], "include": "000001.XSHE", "exclude": "000001.XSHG", }, { "frame": "1M", "start": "2020-01-02", "stop": "2020-01-03", "delay": 3, "cat": [], "include": "000001.XSHE", "exclude": "000001.XSHG", }, ] scheduler = AsyncIOScheduler(timezone=cfg.tz) syncjobs.load_bars_sync_jobs(scheduler) actual = set([job.name for job in scheduler.get_jobs()]) expected = set([ "1m:9:31-59", "1m:10:*", "1m:11:0-30", "1m:13-14:*", "1m:15:00", "5m:9:35-55/5", "5m:10:*/5", "5m:11:0-30/5", "5m:13-14:*/5", "5m:15:00", "15m:9:45", "15m:10:*/5", "15m:11:15,30", "15m:13-14:*/15", "15m:15:00", "30m:10-11:*/30", "30m:13:30", "30m:14-15:*/30", "60m:10:30", "60m:11:30", "60m:14-15:00", "1d:15:00", "1M:15:00", ]) self.assertSetEqual(expected, actual) finally: cfg.omega.sync.bars = origin
from eLink.PowerMeter import * import random from datetime import datetime import os from apscheduler.schedulers.asyncio import AsyncIOScheduler try: import asyncio except ImportError: import trollius as asyncio default_elinkvkm = "10.42.0.101" new_ramdom = random.choice(list(VncMode)) action = "None" testScheduler = AsyncIOScheduler() powermeter = PowerMeterController(name="test Power", port="COM13") testScheduler.start() count = 0 def getPowerMeterChannel(): powermeter.sendGetDataChannel(InaChannel.INA1_ID, action) powermeter.sendGetDataChannel(InaChannel.INA2_ID, action) def remove_and_upload_file(elinkObj): filemnger = FileManager(elinkObj) entry = filemnger.find_entry("opencv_world341.dll") powermeter.getPowerInfo(vnc_switch_mode.__name__, Action.FILE_TRANSFER, ActionState.ACTION_START)
def __init__(self, bot): self.bot = bot self.scheduler = AsyncIOScheduler( job_defaults={"misfire_grace_time": 900})
基础的任务调度演示 """ import time from typing import Union from datetime import datetime from fastapi import FastAPI, Query, Body from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore from apscheduler.triggers.cron import CronTrigger Schedule = AsyncIOScheduler( jobstores={ 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite') } ) Schedule.start() app = FastAPI() # 简单定义返回 def resp_ok(*, code=0, msg="ok", data: Union[list, dict, str] = None) -> dict: return {"code": code, "msg": msg, "data": data} def resp_fail(*, code=1, msg="fail", data: Union[list, dict, str] = None): return {"code": code, "msg": msg, "data": data}
from apscheduler.executors.pool import ThreadPoolExecutor from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore from apscheduler.schedulers.asyncio import AsyncIOScheduler import scrape import settings jobstores = {"default": SQLAlchemyJobStore(url=settings.JOBSTORES_URI)} executors = {"default": ThreadPoolExecutor(1)} job_defaults = { "coalesce": True, "max_instances": 1, "misfire_grace_time": 3600 } scheduler = AsyncIOScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults) scheduler.add_job(**scrape.job)
from apscheduler.executors.asyncio import AsyncIOExecutor from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore from apscheduler.schedulers.asyncio import AsyncIOScheduler from pytz import utc from loop import event_loop from settings import DATABASE_CONNECTION_URL jobstores = { 'default': SQLAlchemyJobStore(url=DATABASE_CONNECTION_URL), } executors = { 'default': AsyncIOExecutor(), } job_defaults = { 'coalesce': False, 'max_instances': 3, } scheduler = AsyncIOScheduler( jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc, event_loop=event_loop, )
def start(self): self.scheduler = AsyncIOScheduler() self.scheduler.start()
intents.typing = False intents.dm_typing = False intents.guild_typing = False member_cache_flags: discord.MemberCacheFlags = discord.MemberCacheFlags().none( ) member_cache_flags.joined = True # === Intents Section === bot: commands.Bot = commands.Bot(command_prefix='>', description=BOT_DESCRIPTION, case_insensitive=True, help_command=MyHelp(), intents=intents) scheduler: AsyncIOScheduler = AsyncIOScheduler(timezone="Asia/Tokyo") @bot.listen() async def on_ready() -> None: print('Ready!') print(bot.user.name) print(bot.user.id) print('------') await bot.change_presence(status=discord.Status.online, activity=discord.Game(">help")) if scheduler.state == 0: scheduler.remove_all_jobs() scheduler.add_job(prompt_tv,
def __init__(self, loop): self.loop = loop self.scheduler = AsyncIOScheduler({'apscheduler.executors.default': {'class': 'apscheduler.executors.pool:ThreadPoolExecutor','max_workers': '100'}}) self.scheduler.start()
from app import config BASE_JOB_ID = "posting_{channel_id}_{user_id}" jobstores = { "default": RedisJobStore(host=config.REDIS_HOST, password=config.REDIS_PASSWORD) } executors = {"default": AsyncIOExecutor()} job_defaults = { "coalesce": False, "max_instances": 3, "misfire_grace_time": 330 } apscheduler = AsyncIOScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc) def setup(): apscheduler.start() def shutdown(): apscheduler.shutdown()
await event.edit("`Processing..`") url = event.text.split(" ", maxsplit=1)[1] if not jserver.is_ping_indb(str(url)): await event.edit("**Server Not Found In Db !**") return jserver.rmping(url) await event.edit(f"**URL :** `{url}` **Sucessfully Removed From Db**") async def ping_servers(): hmm_p = 0 url_s = jserver.get_all_url() header_s = {"User-Agent": 'Server Pinged By @FridayOT'} if len(url_s) == 0: return for i in url_s: try: ws = requests.get(url=i.url, headers=header_s).status_code logger.info(f"Pinged {i.url} // Status Code Recived : {ws}") except: hmm_p += 1 success_l = len(url_s) - hmm_p logger.info(f"Sucessfully Pinged {success_l} Urls Out Of {len(url_s)}") scheduler = AsyncIOScheduler(executors={ 'default': AsyncIOExecutor(), }) scheduler.add_job(ping_servers, 'interval', minutes=Config.PING_SERVER_EVERY_MINUTE_VALUE) scheduler.start()
import discord, AsyncDataBase, reaction_code, message_code, randomstatus, os, decouple #imports from apscheduler.schedulers.asyncio import AsyncIOScheduler from pytz import utc from string import digits from discord.ext import commands client = commands.Bot(command_prefix="&", self_bot=False, intents=discord.Intents.all()) #initializing client activity = discord.Activity(name=randomstatus.randomstatus(), type=discord.ActivityType.watching) scheduler = AsyncIOScheduler(timezone=utc) async def offline_mode_on(): activity = discord.Activity(name="Mod Mail Is Offline", type=discord.ActivityType.watching) await AsyncDataBase.update("Offline", 1, BOOL=True) await client.change_presence(activity=activity) async def offline_mode_off(): activity = discord.Activity(name="Mod Mail Is Online", type=discord.ActivityType.watching) await AsyncDataBase.update("Offline", 1, BOOL=False) await client.change_presence(activity=activity) # this is used to print out in terminal when the bot is ready @client.event async def on_ready():
""" author: thomaszdxsn """ import asyncio import logging import arrow import pytz from apscheduler.schedulers.asyncio import AsyncIOScheduler from scripts.mongo2s3 import main logging.basicConfig(level=logging.INFO) async def task(): start, end = arrow.utcnow().shift(weeks=-1), arrow.utcnow().shift(days=-1) await main(start, end) scheduler = AsyncIOScheduler(timezone=pytz.UTC) scheduler.add_job(task, trigger='cron', hour=0, minute=15) print('starting scheduler') scheduler.start() asyncio.get_event_loop().run_forever()
def __init__(self): self._scheduler = AsyncIOScheduler() self._scheduler.configure(timezone=utc) # utc作为调度程序的时区
import asyncio try: import uvloop asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) except ModuleNotFoundError: pass from apscheduler.schedulers.asyncio import AsyncIOScheduler import modian.Schedule as modianSchedule from modian.configs.ModianConfig import config if __name__ == "__main__": sche = AsyncIOScheduler() if "dailyInterval" in config and config['dailyInterval'] != 0: dailyInterval = config['dailyInterval'] else: dailyInterval = 25 if "pkInterval" in config and config['pkInterval'] != 0: pkInterval = config['pkInterval'] else: pkInterval = 30 if "daily" in config: sche.add_job(modianSchedule.dailySchedule, 'interval', seconds=dailyInterval, max_instances=3)
def __init__(self, logger, jobs, notebooks, loop): self.__logger = logger self.__notebooks = notebooks self.__jobs = jobs self.__scheduler = AsyncIOScheduler({"event_loop": loop}) self.state = self.__scheduler.state