def start_scheduler(settings): assert settings['scheduler.store'] in ('redis', 'sqlalchemy', 'rethinkdb'), \ 'Uknown job store, must by one of redis or sqlalchemy' if settings['scheduler.store'] == 'redis': jobstores = { 'default': RedisJobStore(host=settings['scheduler.host'], db=settings['scheduler.db']) } else: jobstores = { 'default': SQLAlchemyJobStore(url=settings['scheduler.url']) } executors = { 'default': { 'type': settings['scheduler.executors.type'], 'max_workers': settings['scheduler.executors.max_workers'] }, 'processpool': ProcessPoolExecutor( max_workers=settings['scheduler.executors.processpool.max_workers'] ) } job_defaults = { 'coalesce': False, 'max_instances': settings['scheduler.job_defaults.max_instances'] } scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=timezone('UTC')) if settings['scheduler.autostart'] == 'true': scheduler.start()
class Config(object): """Configuration for APScheduler.""" SCHEDULER_JOBSTORES = { 'default': SQLAlchemyJobStore(url='postgresql:///yattk_jobstore') } SCHEDULER_API_ENABLED = False
class DevelopmentConfig(BasicConfig): DEBUG = True SCHEDULER_API_ENABLED = True SCHEDULER_API_PREFIX = '/snapmgr_api' # cinder client configuration AUTH_URL = environs['auth_url'].strip() if environs.get( 'auth_url') else "http://172.31.11.204:5000/v2.0" AUTH_VERSION = environs['auth_version'].strip() if environs.get( 'auth_version') else "3" USERNAME = environs['username'].strip() if environs.get( 'username') else "admin" PASSWORD = environs['password'].strip() if environs.get( 'password') else "admin" TENANT_NAME = environs['tenant_name'].strip() if environs.get( 'tenant_name') else "admin" REGION_NAME = environs['region_name'].strip() if environs.get( 'region_name') else "RegionOne" # 定时任务持久化设置 SCHEDULER_JOBSTORES = { 'default': SQLAlchemyJobStore(url=_mysql_config(host=mysql_host, name=mysql_name, user=mysql_user, password=mysql_password, port=mysql_port)) }
class ProductionConfig(Config): SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:[email protected]:3306/api_test' # 123456表示密码,test代表数据库名称 # SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:[email protected]:3306/api_test' # 123456表示密码,test代表数据库名称 SCHEDULER_JOBSTORES = { 'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI) } SQLALCHEMY_TRACK_MODIFICATIONS = True SQLALCHEMY_POOL_TIMEOUT = 20
def __init__(self): threads = 10 self.is_mule = False # If scheduler is already started, use uwsgi ipc to send job to mule process self.lock_f = open('/tmp/scheduler.lock', 'w') try: fcntl.lockf(self.lock_f, fcntl.LOCK_EX | fcntl.LOCK_NB) except BlockingIOError: self.use_mule = True else: self.use_mule = False caller = self.get_caller(caller=inspect.currentframe()) if caller == 'api': sqlalchemy_url = cnaas_nms.db.session.get_sqlalchemy_conn_str() self._scheduler = BackgroundScheduler( executors={'default': ThreadPoolExecutor(threads)}, jobstores={'default': SQLAlchemyJobStore(url=sqlalchemy_url)}, job_defaults={}, timezone=utc) logger.info( "Scheduler started with persistent jobstore, {} threads". format(threads)) elif caller == 'mule': sqlalchemy_url = cnaas_nms.db.session.get_sqlalchemy_conn_str() self._scheduler = BackgroundScheduler( executors={'default': ThreadPoolExecutor(threads)}, jobstores={'default': SQLAlchemyJobStore(url=sqlalchemy_url)}, job_defaults={}, timezone=utc) logger.info( "Scheduler started with persistent jobstore, {} threads". format(threads)) self.is_mule = True elif self.use_mule: logger.info( "Use uwsgi to send jobs to mule process".format(threads)) self._scheduler = None else: self._scheduler = BackgroundScheduler( executors={'default': ThreadPoolExecutor(threads)}, jobstores={'default': MemoryJobStore()}, job_defaults={}, timezone=utc) logger.info( "Scheduler started with in-memory jobstore, {} threads".format( threads))
def start_sched(): sched.configure( jobstores={ 'default': SQLAlchemyJobStore(url=settings.SCHEDULER_DB_URL) }) log.info("Starting scheduler") sched.start() return sched
class DevelopmentConfig(Config): """Dev config.""" DEBUG = True SCHEDULER_API_ENABLED = True SCHEDULER_JOBSTORES = { "default": SQLAlchemyJobStore(url="sqlite:///website/"+db_name) }
class ProductionConfig(BaseConfig): """Production configuration""" SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID') TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN') SCHEDULER_JOBSTORES = { 'default': SQLAlchemyJobStore(url=os.environ.get('DATABASE_URL')) }
class DevConfig(Config): DEBUG = True _sqlite_db_path = os.path.join(ROOT_PATH, "flask_demo.db") SQLALCHEMY_DATABASE_URI = "sqlite:///{}".format(_sqlite_db_path) # flask-apscheduler 存储的位置,用于定时任务的持久化 SCHEDULER_JOBSTORES = { 'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI) }
class DevConfig(Config): """ Set Flask configuration vars for development. """ DEBUG = True # SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') SQLALCHEMY_DATABASE_URI = db_url # Apscheduler Config SCHEDULER_JOBSTORES = {'default': SQLAlchemyJobStore(url=db_url)}
class Config(object): SCHEDULER_JOBSTORES = { 'default': SQLAlchemyJobStore(url=DATABASE_URI) } SCHEDULER_API_ENABLED = True SQLALCHEMY_DATABASE_URI = DATABASE_URI SECRET_KEY = 'g6DGM5y2bVhb0mxdCRELI5m7fnzzoJ2y' SQLALCHEMY_TRACK_MODIFICATIONS = False SEND_FILE_MAX_AGE_DEFAULT = 1296000
class DevelopmentConfig(BaseConfig): """Development configuration""" DEBUG = True SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID') TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN') SCHEDULER_JOBSTORES = { 'default': SQLAlchemyJobStore(url=os.environ.get('DATABASE_URL')) }
class Config: """Prod config.""" DEBUG = False TESTING = False SCHEDULER_API_ENABLED = True SCHEDULER_JOBSTORES = { "default": SQLAlchemyJobStore(url="sqlite:///website/"+db_name) }
async def load_schedule_or_create_blank(): # 存放在本地sqlite文件中 持续化 job_stores = { 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite') } schedule = AsyncIOScheduler(jobstores=job_stores) schedule.start() app.state.schedule = schedule logger.info("Created Schedule Object")
class ConfigTask(object): """ 定时任务配置 """ jobstores = {'default': SQLAlchemyJobStore(url="sqlite:///" + os.path.join(basedir, "data.sqlite"))} executors = {'default': ThreadPoolExecutor(10), 'processpool': ProcessPoolExecutor(3)} def __init__(self): self.scheduler = BackgroundScheduler(jobstores=self.jobstores, executors=self.executors)
def initialize_job_schedules(_app, force=False): global app app = _app global sched if app.config['JOB_SCHEDULING_ENABLED']: db_jobstore = SQLAlchemyJobStore(url=app.config['SQLALCHEMY_DATABASE_URI'], tablename='apscheduler_jobs') sched = BackgroundScheduler(jobstores={'default': db_jobstore}) sched.start() schedule_all_jobs(force)
class Config: """App configuration.""" JOBS = [{"id": "job1", "func": show_users, "trigger": "interval", "seconds": 2}] SCHEDULER_JOBSTORES = { "default": SQLAlchemyJobStore(url="sqlite:///flask_context.db") } SCHEDULER_API_ENABLED = True
def init_scheduler(self) -> None: """ 初始化 apscheduler :return: """ job_stores = { 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite') } self._schedule = AsyncIOScheduler(jobstores=job_stores) self._schedule.start()
def __init__(self, bot): Gallery.bot = self self.bot = bot self.db = None #Gallery.config = Config() self.gal_guild_id= 0 self.gal_enable= False self.gal_channel_ids= [] self.gal_channels= [] self.gal_text_expirein= None self.gal_user_wl= [] self.gal_allow_links= False self.gal_link_wl= [] self.jobstore = SQLAlchemyJobStore(url='sqlite:///gallery.sqlite') jobstores = {"default": self.jobstore} self.scheduler = AsyncIOScheduler(jobstores=jobstores) self.scheduler.add_listener(self.job_missed, events.EVENT_JOB_MISSED)
class TestingConfig(BaseConfig): """Testing configuration""" TESTING = True PRESERVE_CONTEXT_ON_EXCEPTION = False TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_TEST_ACCOUNT_SID') TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_TEST_AUTH_TOKEN') SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_TEST_URL') SCHEDULER_JOBSTORES = { 'default': SQLAlchemyJobStore(url=os.environ.get('DATABASE_URL')) }
class ProductionConfig(Config): SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://tianxing_product_admin:!@#2021tianxing*()@rm-2ze175qyb41pw468190150.mysql.rds.aliyuncs.com:3306/autotest?charset=utf8mb4' # 123456表示密码,test代表数据库名称 # SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:[email protected]:3306/api_test' # 123456表示密码,test代表数据库名称 # SCHEDULER_JOBSTORES = {'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI, # engine_options={'pool_pre_ping': True})} SCHEDULER_JOBSTORES = {'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI, engine_options={'pool_pre_ping': True})} SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_POOL_SIZE = 1000 SQLALCHEMY_POOL_RECYCLE = 1800
class Config: # 项目的bug模式 DEBUG = True # session配置 SECRET_KEY = os.urandom(24) PERMANENT_SESSION_LIFETIME = timedelta(hours=1) # db HOST = '192.168.10.19' PORT = 3306 USER = "******" PASSWORD = '******' DBNAME = 'egc_sf' CHARSET = 'utf8mb4' # 日志等级 LOG_INFO = DEBUG SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{USER}:{PASSWORD}@{HOST}/{DBNAME}" # 调度器开关 SCHEDULER_API_ENABLED = True # 持久化配置 SCHEDULER_JOBSTORES = { 'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI) } SCHEDULER_EXECUTORS = { 'default': { 'type': 'threadpool', 'max_workers': 20 } } # 配置rabbitmq连接配置 RABBITMQ_HOST = '192.168.10.112' RABBITMQ_USERNAME = '******' RABBITMQ_PASSWORD = '******' RABBITMQ_VHOST = '/sf_egc_dev' POOL = PooledDB(creator=pymysql, maxconnections=10, mincached=2, maxcached=5, maxshared=3, blocking=True, maxusage=None, setsession=[], ping=0, host=HOST, port=PORT, user=USER, password=PASSWORD, database=DBNAME, charset=CHARSET)
def _set_up_scheduler(self): # Creates scheduler with local database and logger # returns scheduler jobstores = { 'default': SQLAlchemyJobStore(url='sqlite:///'+self.storage_location+'/jobs.db') } scheduler = BackgroundScheduler(jobstores = jobstores) scheduler.name = 'apscheduler' self._setup_logger(scheduler.name) # scheduler.add_job(memory_info, 'interval', seconds= 60 , name = 'memory', id = 'memory', replace_existing=True) return scheduler
def config_scheduler(scheduler_settings): # validate input if not isinstance(scheduler_settings, dict): raise TypeError('Scheduler settings must be a dictionary.') # construct default configuration scheduler_configs = {} # add job store to scheduler job_store_on = False job_store_settings = [] job_store_login_names = [] job_store_login_keys = ['user', 'pass', 'host', 'port'] for key in job_store_login_keys: key_name = 'scheduler_job_store_%s' % key job_store_login_names.append(key_name) if scheduler_settings[key_name]: job_store_settings.append(scheduler_settings[key_name]) job_store_on = True if job_store_on: if len(job_store_settings) != len(job_store_login_keys): raise IndexError( 'Initialization of the scheduler job store requires values for all %s login fields.' % job_store_login_names) from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore job_store_url = 'postgresql://%s:%s@%s:%s' % ( job_store_settings[0], job_store_settings[1], job_store_settings[2], job_store_settings[3]) postgresql_store = SQLAlchemyJobStore(url=job_store_url) jobstore_settings = {'default': postgresql_store} scheduler_configs['jobstores'] = jobstore_settings # adjust job default settings scheduler_job_defaults = {} if scheduler_settings['scheduler_job_defaults_coalesce']: scheduler_job_defaults['coalesce'] = True if scheduler_settings['scheduler_job_defaults_max_instances']: scheduler_job_defaults['max_instances'] = scheduler_settings[ 'scheduler_job_defaults_max_instances'] if scheduler_job_defaults: scheduler_configs['job_defaults'] = scheduler_job_defaults # adjust executor settings # scheduler_executors = {} # if scheduler_settings['scheduler_executors_type']: # scheduler_executors['type'] = scheduler_settings['scheduler_executors_type'] # if scheduler_settings['scheduler_executors_max_workers']: # scheduler_executors['max_workers'] = scheduler_settings['scheduler_executors_max_workers'] # if scheduler_executors: # scheduler_configs['SCHEDULER_EXECUTORS'] = scheduler_executors return scheduler_configs
class TestConfig(Config): """ Set Flask configuration vars for testing. """ # Globally turn off authentication (when unit testing) LOGIN_DISABLED = True DEBUG = True TESTING = True SQLALCHEMY_DATABASE_URI = db_url # Apscheduler Config SCHEDULER_JOBSTORES = {'default': SQLAlchemyJobStore(url=db_url)}
class Config(object): SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess' SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \ 'sqlite:///' + os.path.join(basedir, 'app.db') SQLALCHEMY_TRACK_MODIFICATIONS = False SCHEDULER_JOBSTORES = { 'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI) } SCHEDULER_API_ENABLED = True
def __init__(self): if self.is_api_caller(caller=inspect.currentframe()): sqlalchemy_url = cnaas_nms.db.session.get_sqlalchemy_conn_str() jobstores = {'default': SQLAlchemyJobStore(url=sqlalchemy_url)} else: jobstores = {'default': MemoryJobStore()} self._scheduler = BackgroundScheduler( executors={'default': ThreadPoolExecutor(10)}, jobstores=jobstores, job_defaults={}, timezone=utc)
def init_scheduler(): # APScheduler Configuration jobstores = { 'default' : SQLAlchemyJobStore(url = 'db_location') } executors = { 'default' : ThreadPoolExecutor(20) } scheduler = BackgroundScheduler(jobstores = jobstores, executors = executors) scheduler.start() return scheduler
def init_scheduler(): app.config.update({ "SCHEDULER_JOBSTORES": { "default": SQLAlchemyJobStore(config.DB_URL), }, "executors": { "default": AsyncIOExecutor, } }) scheduler = APScheduler(AsyncIOScheduler()) scheduler.init_app(app) scheduler.start()
def config_Scheduler(self, timezone): jobstores = { 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite') } executors = {'processpool': ProcessPoolExecutor(max_workers=5)} job_defaults = {'coalesce': False, 'max_instances': 3} scheduler = BackgroundScheduler() scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=timezone) sched.add_job(job_function, 'interval', seconds=1)