def __init__(self):
     self.jobstores = {
         'default': RedisJobStore(),
         'redis': RedisJobStore()
     }
     self.executors = {
         'default': ProcessPoolExecutor(max_workers=get_config()["scheduler"].get("process_pool_max_workers", 20))
     }
     self.job_defaults = {
         'coalesce': True,
         'max_instances': 1
     }
     self.scheduler = BackgroundScheduler(jobstores=self.jobstores,
                                          executors=self.executors,
                                          job_defaults=self.job_defaults)
示例#2
0
def init_scheduler(app):
    redis_url = app.config.get("REDIS_URL")

    if not redis_url:
        raise Exception("Undefined REDIS_URL app config var")

    redis_kwargs = redis_kwargs_from_url(redis_url)

    scheduler_kwargs = {
        "jobstores": {
            "default": RedisJobStore(**redis_kwargs)
        },
        "job_defaults": {
            "coalesce": True,
            "max_instances": 1
        },
        "executors": {
            "default": {
                "type": "threadpool"
            }
        }
    }

    _logger.debug(
        "APScheduler configuration:\n%s",
        pprint.pformat(scheduler_kwargs))

    apscheduler = BackgroundScheduler(**scheduler_kwargs)
    app.apscheduler = apscheduler
    app.apscheduler.start()

    add_scheduled_harvest(app, force=False)

    _logger.info("Running scheduler: %s", app.apscheduler)
示例#3
0
def start_scheduler(settings):
    assert settings['scheduler.store'] in ('redis', 'sqlalchemy'),\
        'Uknown job store, must by one of redis or sqlalchemy'

    if settings['scheduler.store'] == 'redis':
        jobstores = {
            'default': RedisJobStore(db=settings['scheduler.db'])
        }
    else:
        jobstores = {
            'default': SQLAlchemyJobStore(url=settings['scheduler.url'])
        }
        
    executors = {
        'default': {
            'type': settings['scheduler.executors.type'],
            'max_workers': settings['scheduler.executors.max_workers']
        },
        'processpool': ProcessPoolExecutor(
            max_workers=settings['scheduler.executors.processpool.max_workers']
        )
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': settings['scheduler.job_defaults.max_instances']
    }
    scheduler.configure(
        jobstores=jobstores,
        executors=executors,
        job_defaults=job_defaults,
        timezone=timezone('UTC')
    )
    if settings['scheduler.autostart'] == 'true':
        scheduler.start()
示例#4
0
class Config:
    SECRET_KEY = "Mobile Device Borrowing by Tavis D"
    # database
    SQLALCHEMY_TRACK_MODIFICATIONS = False
    DB_HOST = os.environ.get('DB_HOST') or '127.0.0.1'
    DB_PORT = os.environ.get('DB_PORT') or '3306'
    DB_USER = os.environ.get('DB_USER') or 'root'
    DB_PASSWORD = os.environ.get('DB_PASSWORD') or '123456'
    SQLALCHEMY_DATABASE_URI = "mysql+pymysql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/mobile_device_borrowing?charset=utf8mb4".format(
        DB_USER=DB_USER,
        DB_PASSWORD=DB_PASSWORD,
        DB_HOST=DB_HOST,
        DB_PORT=DB_PORT)
    # redis
    REDIS_HOST = os.environ.get('REDIS_HOST') or '127.0.0.1'
    REDIS_URL = "redis://{host}:6379/0".format(host=REDIS_HOST)
    # apscheduler
    SCHEDULER_JOBSTORES = {'default': RedisJobStore(host=REDIS_HOST)}
    # jwt
    JWT_SECRET_KEY = 'Mobile Device Borrowing by Tavis D'
    JWT_ACCESS_TOKEN_EXPIRES = 900  # 900秒=15min * 60
    JWT_BLACKLIST_ENABLED = True
    JWT_BLACKLIST_TOKEN_CHECKS = ['access']
    # marshmallow
    JSON_SORT_KEYS = False

    @staticmethod
    def init_app(app):
        pass
示例#5
0
 def __init__(self, jobs_key, run_times_key, host, port):
     self._scheduler = BackgroundScheduler()
     self.redis_js = RedisJobStore(jobs_key=jobs_key,
                                   run_times_key=run_times_key,
                                   host=host,
                                   port=port)
     self._scheduler.add_jobstore(self.redis_js)
示例#6
0
def run():
    # Daemon can be disabled in development to avoid importing too much production data.
    if settings.KOCHERGA_IMPORTER_DISABLED:
        # Auto-detect settings change?
        while True:
            # Logging for clarity about what's going on.
            logger.info('Importer daemon disabled.')
            time.sleep(60)

    scheduler = BlockingScheduler(
        executors={"default": ThreadPoolExecutor(2)},
        jobstores={"default": RedisJobStore(**get_redis_connect_args())},
    )

    importers = all_importers()
    importers_gauge.set(len(all_importers()))

    for i, importer in enumerate(importers):
        success_counter.labels(importer=importer.name).inc(0)
        failure_counter.labels(importer=importer.name).inc(0)

        scheduler.add_job(
            id=importer.name,
            name=importer.name,
            replace_existing=True,
            coalesce=True,
            func=importer.__class__.import_new,
            args=[importer],
            trigger="interval",
            **importer.interval(),
            jitter=300,
            start_date=datetime.now() + timedelta(seconds=i * 5),
        )

    scheduler.start()
示例#7
0
    def __init__(self):
        # conf = YAMLConfig.parse_app_config(
        #     "{}/{}".format(os.path.expanduser('~'), ".demo_text"), 'demo_text.yaml')
        conf = YAMLConfig.parse_app_config(os.getcwd(), 'demo_text.yaml')
        conf['LOG_LEVEL'] = getattr(logging, conf['LOG_LEVEL'].upper())

        # 数据库配置
        if 'SQLALCHEMY_DATABASE_URI' not in conf:
            conf[
                'SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://{USER}:{PASSWORD}@{HOST}:{PORT}/{DATABASE}?charset=utf8mb4'.format(
                **conf['DATABASE']['DEFAULT'])
        if 'MASTER' in conf['DATABASE'] and 'MASTER_DATABASE_URI' not in conf:
            conf[
                'MASTER_DATABASE_URI'] = 'mysql+pymysql://{USER}:{PASSWORD}@{HOST}:{PORT}/{DATABASE}?charset=utf8mb4'.format(
                **conf['DATABASE']['MASTER'])

        conf['SQLALCHEMY_POOL_SIZE'] = 40
        conf['SQLALCHEMY_POOL_TIMEOUT'] = 30
        conf['SQLALCHEMY_POOL_RECYCLE'] = 3600
        conf['SQLALCHEMY_TRACK_MODIFICATIONS'] = False

        conf['REDIS_KEY_ACTUAL_DELAY'] = "actual_delay"
        conf['REDIS_KEY_PROPERTY'] = "property"
        conf['REDIS_KEY_EVENT_SERVICE'] = "event_service"

        conf['SCHEDULER_JOBSTORES'] = {
            'redis': RedisJobStore(db=conf['REDIS_DB'], host=conf['REDIS_HOST'], port=conf['REDIS_PORT']),
            'mysql': SQLAlchemyJobStore(url="mysql://{USER}:{PASSWORD}@{HOST}:{PORT}/{DATABASE}?charset=utf8".format(
                **conf['DATABASE']['DEFAULT']))
        }

        self.__dict__ = conf
示例#8
0
    def add_second_jobstore(self, db=14, *, jobs_key="apschedulers.second_jobs", run_times_key="apschedulers.second_run_times"):
        self._jobstores_alias = "async"
        logging.debug(f"AsyncIOScheduler add second jobstores: <{self._jobstores_alias}>")
        config = _redis.copy()
        config.update({"db": db, "jobs_key": jobs_key, "run_times_key": run_times_key})

        jobstores = RedisJobStore(**config)
        self.add_jobstore(jobstores, self._jobstores_alias)
示例#9
0
def get_scheduler(host, port, password=None):
    """ Returns an APScheduler instance with Redis store """
    scheduler = BackgroundScheduler()
    scheduler.configure(
        jobstores={
            'default': RedisJobStore(host=host, port=port, password=password)
        })
    return scheduler
示例#10
0
class Config(object):
    BTCPAY_HOST = os.environ.get('BTCPAY_HOST')
    CALLBACK_URL = os.environ.get('CALLBACK_URL')
    REDIS_URL = os.environ.get('REDIS_URL') or 'redis://'
    REDIS_HOST = os.environ.get('REDIS_HOST') or 'localhost'
    SECRET_KEY = '00000000000'
    SCHEDULER_JOBSTORES = {'default': RedisJobStore(host=REDIS_HOST)}
    REFRESH_MINS = os.environ.get('REFRESH_MINS') or 50
示例#11
0
def make_default_scheduler():
    if config.scheduler_job_store == 'redis':
        jobstore = RedisJobStore(host=config.redis_host, port=config.redis_port)
    elif config.scheduler_job_store == 'memory':
        jobstore = MemoryJobStore()
    else:
        raise ValueError("unknown or unsupported job store type '{}'".format(config.scheduler_job_store))
    return BackgroundScheduler(executors={'default': ThreadPoolExecutor(config.worker_pool_size)},
                               jobstores={'default': jobstore },
                               job_defaults={'misfire_grace_time': config.update_frequency})
示例#12
0
文件: schedule.py 项目: Freeaes/CTF
class Config(object):
    SCHEDULER_JOBSTORES = {
        'default': RedisJobStore(host="cache", port=6379, password="", db=15)
    }
    SCHEDULER_EXECUTORS = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        }
    }
    SCHEDULER_API_ENABLED = True
示例#13
0
def get_async_scheduler(redis):
    cfg = redis.connection_pool.connection_kwargs
    jobstores = {
        'default':
        RedisJobStore(host=cfg.get("host"),
                      port=cfg.get("port"),
                      db=cfg.get("db"),
                      password=cfg.get("password"))
    }
    job_defaults = {"misfire_grace_time": 3600}
    return AsyncIOScheduler(jobstores=jobstores, job_defaults=job_defaults)
示例#14
0
class APSchedulerConfig(object):

    SCHEDULER_TIMEZONE = 'Asia/Shanghai'
    # 存储位置
    SCHEDULER_JOBSTORES = {
        'default': RedisJobStore(host="localhost", port=6379, db=0, password='')
    }
    # 线程池配置
    SCHEDULER_EXECUTORS = {
        'default': {'type': 'threadpool', 'max_workers': 20}
    }
示例#15
0
 def init_sheduler(cls):
     from Config import Environment
     if 'JOBS' not in cls._app.config:
         cls._app.config['JOBS'] = []
     cls._app.config['SCHEDULER_API_ENABLED'] = True
     cls._app.config['SCHEDULER_JOBSTORES'] = {
         'default':
         RedisJobStore(port=Environment.Services['redis']['PORT'],
                       host=Environment.Services['redis']['HOST'],
                       db=10)
     }
     return
示例#16
0
async def init_scheduler():
    _bot = nb.get_bot()
    jobstores = {
        "default":
        RedisJobStore(host="redis", port=6379, password=Config.REDIS_PASSWORD)
    }  # 存储器
    if nbscheduler and nbscheduler.running:
        nbscheduler.shutdown(wait=False)

    if scheduler and not scheduler.running:
        scheduler.configure(_bot.config.APSCHEDULER_CONFIG,
                            jobstores=jobstores)
        scheduler.start()
示例#17
0
class DevelopmentConfig(BaseConfig):
    # 本项目使用的域名与端口号
    PROJECT_PORT = 5000
    PROJECT_DOMAIN = f"http://*****:*****@127.0.0.1:13306/common_web_service?charset=utf8mb4'
    SQLALCHEMY_TRACK_MODIFICATIONS = False
    SQLALCHEMY_ENCODING = "utf8mb4"

    # 添加celery配置
    broker_url = 'redis://localhost:6379'
    result_backend = 'redis://localhost:6379'
    imports = ('proStruct.services.flask_linux_crontab')

    # 配置redis
    REDIS_HOST = '127.0.0.1'
    REDIS_PORT = 6379
    REDIS_PASSWORD = None
    REDIS_DB = None

    # 测试账号
    TEST_APP_ID = 'dc601e113be8a2e622f9f9a3f363eb93'
    TEST_ACCOUNT = '15845623256'
    TEST_PASSWORD = '******'  # aaasss123

    # 日志配置: 线上需要重新设置
    LOG_FILE_PATH = os.path.join(project_root_path,
                                 modifyPath('logs/web/web_common.log'))
    LOG_LEVEL = logging.INFO
    LOG_FILE_SIZE = 10 * 1204 * 1024
    LOG_FILE_COUNT = 10

    # apscheduler 定时任务调度配置
    JOB_STORES = {
        "redis":
        RedisJobStore(host=REDIS_HOST,
                      port=REDIS_PORT),  # 设置一个名为redis的job存储,后端使用 redis
        # 一个名为 default 的 job 存储,后端使用数据库(使用 Sqlite)
        # "default": SQLAlchemyJobStore(url="sqlite:///flask_linux_crontab.sqlite")
        "backend_db": SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI)
    }
    JOB_EXECUTORS = {
        "default": ThreadPoolExecutor(1),  # 设置一个名为 default的线程池执行器, 最大线程设置为20个
        # TODO 线程过多,会出现同一个任务被多次执行的情况
        "processpool":
        ProcessPoolExecutor(1),  # 设置一个名为 processpool的进程池执行器,最大进程数设为5个
    }
    # 开启job合并,设置job最大实例上限为3
    JOB_DEFAULT = {'coalesce': False, 'max_instances': 3}
示例#18
0
def get_scheduler():
    urlparse.uses_netloc.append('redis')
    redis_url_parsed = urlparse.urlparse(redis_url)
    jobstores = {
        'default':
        RedisJobStore(host=redis_url_parsed.hostname,
                      port=redis_url_parsed.port,
                      db=0,
                      password=redis_url_parsed.password)
    }
    sched = BackgroundScheduler(jobstores=jobstores)
    if not sched.running:
        sched.start()
    return sched
示例#19
0
class Config(object):
    SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
    MYSQL_HOST = os.environ.get('MYSQL_HOST') or 'localhost'
    MYSQL_PORT = os.environ.get('MYSQL_PORT') or 3306
    MYSQL_USER = os.environ.get('MYSQL_USER') or 'user'
    MYSQL_PW = os.environ.get('MYSQL_PW') or 'pw'
    MYSQL_DB = os.environ.get('MYSQL_DB') or 'expenseapp'
    SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
        'mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8mb4'.format(MYSQL_USER, MYSQL_PW, MYSQL_HOST, MYSQL_PORT, MYSQL_DB)
    SQLALCHEMY_TRACK_MODIFICATIONS = False
    SQLALCHEMY_POOL_RECYCLE = 480
    MAIL_SERVER = os.environ.get('MAIL_SERVER') or 'localhost'
    MAIL_PORT = int(os.environ.get('MAIL_PORT') or 1025)
    MAIL_USE_TLS = (os.environ.get('MAIL_USE_TLS')
                    is not None) and (os.environ.get('MAIL_USE_SSL') is None)
    MAIL_USE_SSL = (os.environ.get('MAIL_USE_SSL')
                    is not None) and (os.environ.get('MAIL_USE_TLS') is None)
    MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
    MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
    ADMIN_NOREPLY_SENDER = os.environ.get(
        'ADMIN_NOREPLY_SENDER') or 'no-reply@expenseapp'
    ADMIN_USERNAME = os.environ.get('ADMIN_USERNAME') or 'admin'
    ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD') or 'pw'
    ADMIN_EMAIL = os.environ.get('ADMIN_EMAIL') or 'admin@expenseapp'
    IMAGE_DEFAULT_FORMAT = os.environ.get('IMAGE_DEFAULT_FORMAT') or 'JPEG'
    IMAGE_ROOT_PATH = os.environ.get('IMAGE_ROOT_PATH') or './app'
    IMAGE_TMP_PATH = os.environ.get('IMAGE_TMP_PATH') or 'static/tmp/'
    IMAGE_IMG_PATH = os.environ.get('IMAGE_IMG_PATH') or 'static/img/'
    IMAGE_TIMG_PATH = os.environ.get('IMAGE_TIMG_PATH') or 'static/timg/'
    UPLOADS_DEFAULT_DEST = os.path.join(IMAGE_ROOT_PATH, IMAGE_TMP_PATH)
    UPLOADED_IMAGES_DEST = os.path.join(IMAGE_ROOT_PATH, IMAGE_TMP_PATH)
    THUMBNAIL_SIZES = [32, 64, 128, 256, 512, 1024, 2048]
    ITEMS_PER_PAGE = 10
    MESSAGES_PER_PAGE = 10
    LANGUAGES = ['en', 'de']
    REDIS_HOST = os.environ.get('REDIS_HOST') or 'localhost'
    REDIS_PORT = os.environ.get('REDIS_PORT') or 6379
    REDIS_DB = os.environ.get('REDIS_DB') or 0
    REDIS_URL = os.environ.get('REDIS_URL') or 'redis://{}:{}/{}'.format(
        REDIS_HOST, REDIS_PORT, REDIS_DB)
    SCHEDULER_API_ENABLED = True
    SCHEDULER_JOBSTORES = {
        'default':
        RedisJobStore(db=REDIS_DB,
                      jobs_key='housekeeping_jobs',
                      run_times_key='housekeeping_jobs_running',
                      host=REDIS_HOST,
                      port=REDIS_PORT)
    }
示例#20
0
def get_redis_jobstores():
    jobstores = {
        'default':
        RedisJobStore(jobs_key='xpm_cron.jobs',
                      run_times_key='xpm_cron.run_times',
                      host='192.168.2.227',
                      port=6379,
                      password='******',
                      db=11)
    }
    executors = {
        'default': ThreadPoolExecutor(100),
        'processpool': ProcessPoolExecutor(5)
    }
    scheduler = TornadoScheduler(jobstores=jobstores, executors=executors)
    return scheduler
    def __init__(self,
                 job_store: Optional[Any] = None,
                 executor: Optional[Any] = None,
                 job_defaults: Optional[Any] = None,
                 time_zone: Optional[Any] = None,
                 scheduler_type: Optional[str] = 'default',
                 **kwargs) -> Any:
        if not executor:
            executor = {
                'default': ThreadPoolExecutor(20),
                'processpool': ProcessPoolExecutor(5)
            }
        if not job_store:
            REDIS = {
                'host':
                config_map.get(env).REDIS_HOST,
                'port':
                config_map.get(env).REDIS_PORT,  #,'32360',
                'db':
                config_map.get(env).REDIS_DB,
                'password':
                CommonHelper.base64_decode(config_map.get(env).REDIS_PASSWORD)
            }
            default_redis_jobstore = RedisJobStore(**REDIS)
            job_store = {'redis': default_redis_jobstore}
        global job_stores
        job_stores = 'redis'

        if not job_defaults:
            job_defaults = {'coalesce': False, 'max_instances': 3}
        if not time_zone:
            time_zone = utc
        init_scheduler_options = {
            "job_defaults": job_defaults,
            "jobstores": job_store,
            "executors": executor,
            "timezone": time_zone
        }
        global scheduler
        if scheduler_type == 'default':
            scheduler = BackgroundScheduler(**init_scheduler_options)
        elif scheduler_type == 'async':
            scheduler = AsyncIOScheduler(**init_scheduler_options)
        elif scheduler_type == 'block':
            scheduler = BlockingScheduler(**init_scheduler_options)
        scheduler.add_listener(self.job_execute_listener, EVENT_JOB_EXECUTED)
        scheduler.start()
示例#22
0
def init_scheduler():
    global scheduler
    jobstores = {
        'default': RedisJobStore(db=settings.TN_SCHEDULER['REDIS_DB'])
    }
    executors = {
        'default': ThreadPoolExecutor(settings.TN_SCHEDULER['THREAD_POOL_SIZE']),
        'processpool': ProcessPoolExecutor(5)
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': settings.TN_SCHEDULER['MAX_JOB_INSTANCE']
    }
    scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults,
                                    timezone=pytz.timezone(settings.TIME_ZONE))
    scheduler.start()
    print('Scheduler is running...')
示例#23
0
class SchedulerConfig:
    REDIS_HOST = environ["REDIS_HOST"]
    REDIS_PORT = 6333

    SCHEDULER_JOBSTORES = {
        "default": RedisJobStore(host=REDIS_HOST, port=REDIS_PORT)
    }
    SCHEDULER_EXECUTORS = {
        "default": ThreadPoolExecutor(20),
        "processpool": ProcessPoolExecutor(5),
    }
    SCHEDULER_JOB_DEFAULTS = {
        "coalesce": False,
        "max_instances": 5,
        "misfire_grace_time": 10,
    }

    SCHEDULER_API_ENABLED = True
示例#24
0
def _get_scheduler_obj(redis):
    job_defaults = {"misfire_grace_time": 3600}

    if not isinstance(redis, _NoneModule):
        cfg = redis.connection_pool.connection_kwargs
        jobstores = {
            'default':
            RedisJobStore(host=cfg.get("host", "localhost"),
                          port=cfg.get("port", 6379),
                          db=cfg.get("db", 0),
                          password=cfg.get("password"))
        }
    else:
        jobstores = {"default": MemoryJobStore()}

    scheduler = AsyncIOScheduler(jobstores=jobstores,
                                 job_defaults=job_defaults)

    return scheduler
示例#25
0
    def __init__(self):
        '''
            初始化:
                pool 持久化连接池,此处使用redis做持久化序列
                job_stores 任务仓库,使用redis任务仓库
                executors 执行器,默认使用线程执行
                job_defaults 任务默认参数
        '''

        self.pool = redis.ConnectionPool(host='127.0.0.1', port=6379)

        self.job_stores = dict(redis=RedisJobStore(connection_pool=self.pool))

        self.executors = dict(default=ThreadPoolExecutor(200),
                              processpool=ProcessPoolExecutor(10))

        self.job_defaults = dict(coalesce=True,
                                 max_instances=1,
                                 misfire_grace_time=60)
示例#26
0
 def init_scheduler(self):
     """ instantiate the scheduler and make it available in the class """
     self.jobstores = {
         "default":
         RedisJobStore(jobs_key=config.redis_schedule_store,
                       run_times_key=config.redis_schedule_store_stats,
                       **self.connect_args)
     }
     self.executors = {
         "default": ThreadPoolExecutor(config.apscheduler_num_threads),
         "processpool":
         ProcessPoolExecutor(config.apscheduler_num_processes)
     }
     job_defaults = {"coalesce": False}
     self.scheduler = scheduler = BackgroundScheduler(
         jobstores=self.jobstores,
         executors=self.executors,
         job_defaults=job_defaults)
     scheduler.start()
def get_scheduler(store_path=None, log_file=None):
    if store_path is None:
        store_path = r'jobstore.sqlite'
    if log_file is None:
        log_file = r'logger.log'
    scheduler = TornadoScheduler({'apscheduler.timezone': 'Asia/Shanghai'})
    jobstores = {
        'default': RedisJobStore(host='10.134.103.241', port=6379)
    }
    executors = {
        'default': ThreadPoolExecutor(20),
        'processpool': ProcessPoolExecutor(5)
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 1
    }
    scheduler.configure(jobstores=jobstores, executors=executors)
    # 日志定制
    scheduler._logger = modify_logger(scheduler._logger, log_file=log_file)
    return scheduler
def get_scheduler(store_path=None, log_file=None):
    if store_path is None:
        store_path = r'jobstore.sqlite'
    if log_file is None:
        log_file = r'logger.log'
    scheduler = TornadoScheduler({'apscheduler.timezone': 'Asia/Shanghai'})
    jobstores = {'default': RedisJobStore(host='10.134.103.241', port=6379)}
    executors = {
        'default': ThreadPoolExecutor(20),
        'processpool': ProcessPoolExecutor(5)
    }
    job_defaults = {'coalesce': False, 'max_instances': 1}
    scheduler.configure(jobstores=jobstores, executors=executors)
    # # 事件记录
    # scheduler.add_listener(
    #     lambda event: event_listener(event, scheduler),
    #     EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_ADDED | EVENT_JOB_SUBMITTED | EVENT_JOB_REMOVED
    # )
    # 日志定制
    scheduler._logger = modify_logger(scheduler._logger, log_file=log_file)
    return scheduler
示例#29
0
def start_scheduler(settings):
    jobstores = {'default': RedisJobStore(db=settings['scheduler.db'])}
    executors = {
        'default': {
            'type': settings['scheduler.executors.type'],
            'max_workers': settings['scheduler.executors.max_workers']
        },
        'processpool':
        ProcessPoolExecutor(
            max_workers=settings['scheduler.executors.processpool.max_workers']
        )
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': settings['scheduler.job_defaults.max_instances']
    }
    scheduler.configure(jobstores=jobstores,
                        executors=executors,
                        job_defaults=job_defaults,
                        timezone=timezone('UTC'))
    if settings['scheduler.autostart'] == 'true':
        scheduler.start()
示例#30
0
    def _init_scheduler():
        redis_pool = RedisPipeline()

        job_stores: Dict = {
            "redis": RedisJobStore(
                db=1,
                jobs_key="blogs_crawler.jobs",
                run_times_key="blogs_crawler.run_times",
                connection_pool=redis_pool,
            )
        }
        executors = {
            "default": {"type": "threadpool", "max_workers": THREADS_NUM},
            "processpool": ProcessPoolExecutor(max_workers=PROCESS_NUM),
        }
        job_defaults = {"coalesce": False, "max_instances": 5, "misfire_grace_time": 60}
        background_scheduler = BackgroundScheduler(
            jobstores=job_stores, executors=executors, job_defaults=job_defaults
        )

        # 设置定时任务的 logger
        background_scheduler._logger = logger

        # 设置任务监听
        def init_scheduler_listener(event):
            if event.exception:
                logger.error("定时任务出现异常!")

        background_scheduler.add_listener(
            init_scheduler_listener, EVENT_JOB_ERROR | EVENT_JOB_EXECUTED
        )

        # 清理任务
        background_scheduler.remove_all_jobs()

        # 启动定时任务对象
        background_scheduler.start()
        return background_scheduler