Exemplo n.º 1
0
class EmailScheduler():
    jobstores = {
        'default':
        SQLAlchemyJobStore(
            f'postgresql://192.168.5.172/billtrak?user=dj&password={os.getenv("dbpw")}'
        )
    }
    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        },
        'processpool': ProcessPoolExecutor(max_workers=5)
    }
    job_defaults = {'coalesce': False, 'max_instances': 3}
    scheduler = BackgroundScheduler()

    def getcron(self):
        return cron.CronTrigger()

    scheduler.configure(jobstores=jobstores,
                        executors=executors,
                        job_defaults=job_defaults,
                        timezone=utc)

    def getscheduler(self):
        return self.scheduler
Exemplo n.º 2
0
def start_scheduler(settings):
    assert settings['scheduler.store'] in ('redis', 'sqlalchemy'),\
        'Uknown job store, must by one of redis or sqlalchemy'

    if settings['scheduler.store'] == 'redis':
        jobstores = {
            'default': RedisJobStore(db=settings['scheduler.db'])
        }
    else:
        jobstores = {
            'default': SQLAlchemyJobStore(url=settings['scheduler.url'])
        }
        
    executors = {
        'default': {
            'type': settings['scheduler.executors.type'],
            'max_workers': settings['scheduler.executors.max_workers']
        },
        'processpool': ProcessPoolExecutor(
            max_workers=settings['scheduler.executors.processpool.max_workers']
        )
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': settings['scheduler.job_defaults.max_instances']
    }
    scheduler.configure(
        jobstores=jobstores,
        executors=executors,
        job_defaults=job_defaults,
        timezone=timezone('UTC')
    )
    if settings['scheduler.autostart'] == 'true':
        scheduler.start()
Exemplo n.º 3
0
    def run(self):
        sql_session = self.sql_session
        self.period = self.get_monitoring_period()

        executors = {
            'default': {'type': 'threadpool', 'max_workers': 20},
            'processpool': ProcessPoolExecutor(max_workers=5)
        }
        job_defaults = { 'coalesce': False, 'max_instances': 3 }
        self.scheduler = BackgroundScheduler()
        self.scheduler.configure(executors=executors, job_defaults=job_defaults)

        clusters = sql_session.query(GnCluster).filter(GnCluster.status=='Running').all()
        for cluster in clusters:
            if cluster.type == 'docker':
                docker_url = 'http://%s/monitor' % (str(cluster.ip))
                self.scheduler.add_job(lambda : self.docker_monitor(docker_url), trigger='interval', seconds=int(self.period))
            elif cluster.type == 'kvm':
                kvm_url = 'http://%s/monitor' % (str(cluster.ip))
                self.scheduler.add_job(lambda : self.kvm_monitor(kvm_url), trigger='interval', seconds=int(self.period))
            elif cluster.type == 'hyperv':
                hyperv_url = 'http://%s/monitor' % (str(cluster.ip))
                self.scheduler.add_job(lambda : self.hyperv_monitor(hyperv_url), trigger='interval', seconds=int(self.period))
            else:
                print 'type = %s is not scheduling' % cluster.type
        self.scheduler.start()
        sql_session.commit()
Exemplo n.º 4
0
class Config:
    '''
    app settings
    '''
    DEBUG           = parser.config['app']['debug']
    TESTING         = parser.config['app']['testing']
    BCRYPT_LEVEL    = parser.config['app']['bcrypt_level']
    APP_NAME        = parser.config['app']['app_name']
    SECRET_KEY      = parser.config['app']['secret_key']
    WTF_CSRF_ENABLED    = parser.config['app']['wtf_csrf_enabled']

    # APScheduler
    JOBS = []
    JSON_AS_ASCII = True # 支持json显示中文
    SCHEDULER_API_ENABLED = parser.config['scheduler']['api_enabled']
    SCHEDULER_JOBSTORES = {
        'default': MongoDBJobStore(database=parser.config['scheduler']['mongodb_db'], client=MongoClient(parser.config['scheduler']['mongodb_uri']))
    }
    SCHEDULER_EXECUTORS = {
        'default': ThreadPoolExecutor(20),
        'processpool': ProcessPoolExecutor(5)
    }
    SCHEDULER_JOB_DEFAULTS = parser.config['scheduler']['job_defaults']

    # mongodb settings
    MONGODB_SETTINGS = parser.config['mongodb']['mongodb_settings']

    @staticmethod
    def init_app(app):
        pass
Exemplo n.º 5
0
def dealDataScheduleJob(mon_conn):
    #每次重启程序时需要删除队列
    mon_conn.kctest.dealDataJob.remove({})
    jobstores = {
        'mongo':
        MongoDBJobStore(collection='dealDataJob',
                        database='kctest',
                        client=mon_conn),
        'default':
        MemoryJobStore()
    }
    executors = {
        'default': ThreadPoolExecutor(4),
        'processpool': ProcessPoolExecutor(1)
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 3  #avoid block
    }

    scheduler = BlockingScheduler(jobstores=jobstores,
                                  executors=executors,
                                  job_defaults=job_defaults)
    scheduler.add_job(readWriteBusRunInfo,
                      'interval',
                      seconds=15,
                      jobstore="mongo")
    #scheduler.add_job(distinctBusInfo,'interval',seconds=30, jobstore='mongo')
    #定时任务
    #scheduler.add_job(statistics,"cron",hour=23,minute=59,jobstore='mongo')
    #scheduler.add_job(statistics,"interval",seconds=30,jobstore='default')
    scheduler.start()
Exemplo n.º 6
0
 def __init__(self):
     self.jobstores = {
         'mongo':
         MongoDBJobStore(collection='job1',
                         database='saasjob',
                         client=_mongoclient),
         'default':
         MemoryJobStore()
     }
     self.executors = {
         'default': ThreadPoolExecutor(1),
         'processpool': ProcessPoolExecutor(1)
     }
     self.job_defaults = {
         'coalesce': False,
         'misfire_grace_time': 1,
         'max_instances': 1
     }
     self._sched = BackgroundScheduler(jobstores=self.jobstores,
                                       executors=self.executors,
                                       job_defaults=self.job_defaults)
     # 添加 任务提交 事件监听
     self._sched.add_listener(self.when_job_submitted, EVENT_JOB_SUBMITTED)
     # 添加 任务执行完成 事件监听
     self._sched.add_listener(self.when_job_executed, EVENT_JOB_EXECUTED)
     # 添加 任务异常退出 事件监听
     self._sched.add_listener(self.when_job_crashed, EVENT_JOB_ERROR)
     self._jobs = {}
     self._jobhandlers = {}  # format, key: jobid,  value: jobhandler
     self._jobs_key = ["name", "func", "args", "kwargs"]
     self.start()
Exemplo n.º 7
0
def get_mongo_job_stores():
    from pytz import utc
    from apscheduler.jobstores.mongodb import MongoDBJobStore, MongoClient
    from apscheduler.executors.pool import ProcessPoolExecutor
    from libs.db_context import get_mongo_client

    client = get_mongo_client()

    jobstores = {
        'mongo':
        MongoDBJobStore(collection='job',
                        database='apscheduler',
                        client=client),
        'default':
        MongoDBJobStore(collection='job',
                        database='apscheduler2',
                        client=client),
    }
    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        },
        'processpool': ProcessPoolExecutor(max_workers=5)
    }
    job_defaults = {'coalesce': False, 'max_instances': 3}
    scheduler = TornadoScheduler()
    scheduler.configure(jobstores=jobstores,
                        executors=executors,
                        job_defaults=job_defaults,
                        timezone=utc)

    return scheduler
Exemplo n.º 8
0
class Config:
    SECRET_KEY = 'QWERTYUIOPASDFGHJ'
    MAIL_SERVER = 'smtp.126.com'
    MAIL_PORT = 587
    MAIL_USE_TLS = True
    MAIL_USERNAME = '******'
    MAIL_PASSWORD = '******'
    FLASKY_MAIL_SUBJECT_PREFIX = 'SUBJECT_PREFIX'
    FLASKY_MAIL_SENDER = 'MAIL_SENDER'
    FLASKY_ADMIN = 'ADMIN'
    SSL_REDIRECT = False

    # logging level
    LOGGING_LEVEL = logging.INFO
    AUTO_HOME = os.getcwd().replace('\\', '/') + '/.beats'

    AUTO_ROBOT = []

    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        },
        'processpool': ProcessPoolExecutor(max_workers=5)
    }

    job_defaults = {'coalesce': False, 'max_instances': 10}

    @staticmethod
    def init_app(app):
        pass
Exemplo n.º 9
0
def main():
    # Clean
    logger.info("Clean memcached before init")
    memcached_host, memcached_port = get_memcached_config()
    mem_nfv = MemcachedNFV(memcached_host, memcached_port)
    mem_nfv.clean_memcached()
    mem_nfv.disconnect()
    # Init server list
    init_server_cached_list_api()

    # Scheduler for get statistic
    data_config = read_config_json_file(NFV_CONFIG_PATH)
    interval = int(data_config['interval'])

    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        },
        'processpool': ProcessPoolExecutor(max_workers=5)
    }
    job_defaults = {'coalesce': False, 'max_instances': 3}
    scheduler = BlockingScheduler()
    scheduler.configure(executors=executors, job_defaults=job_defaults)
    scheduler.add_job(scheduler_get_statistic_job,
                      'interval',
                      seconds=interval)
    scheduler.start()
Exemplo n.º 10
0
    def __init__(self, background=False, deamon=True, **kwargs):
        logging.basicConfig(format="[%(asctime)s] %(message)s",
                            atefmt="%Y-%m-%d %H:%M:%S")
        logging.getLogger('apscheduler').setLevel(logging.DEBUG)

        if background:
            self.sched = BackgroundScheduler(deamon=deamon)  # background
        else:
            self.sched = BlockingScheduler(deamon=deamon)  # foreground

        # TODO: Read from configuration file.
        self.sched.configure(
            jobstores={
                # "sqlite": SQLAlchemyJobStore(url='sqlite:///app/database/example.db'),
                # "default": MemoryJobStore()
                "default":
                SQLAlchemyJobStore(url='sqlite:///app/database/example.db')
            },
            executors={
                'default': ThreadPoolExecutor(20),
                'processpool': ProcessPoolExecutor(5)
            },
            job_defaults={
                'coalesce': False,
                'max_instances': 3
            },
            timezone=get_localzone()  # Asia/Seoul
        )

        self.retried = 0
        self.logger = logging.getLogger('apscheduler')

        super(JobLauncher, self).__init__()
Exemplo n.º 11
0
def runScheduler():
    runProxyFetch()

    timezone = ConfigHandler().timezone
    scheduler_log = LogHandler("scheduler")
    scheduler = BlockingScheduler(logger=scheduler_log, timezone=timezone)

    scheduler.add_job(runProxyFetch,
                      'interval',
                      minutes=4,
                      id="proxy_fetch",
                      name="proxy采集")
    scheduler.add_job(runProxyCheck,
                      'interval',
                      minutes=2,
                      id="proxy_check",
                      name="proxy检查")

    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        },
        'processpool': ProcessPoolExecutor(max_workers=8)  # default 5
    }
    job_defaults = {'coalesce': False, 'max_instances': 10}

    scheduler.configure(executors=executors,
                        job_defaults=job_defaults,
                        timezone=timezone)

    scheduler.start()
Exemplo n.º 12
0
    def __init__(self,
                 elastic,
                 crawler_dir="crawlers",
                 crawler_args={},
                 **cron_defaults):
        """Initializes the scheduler by binding it to it's elasticsearch db.

        Args:
            elastic (elasticsearch.Elasticsearh): The es-client to save the
                crawling jobs in.
            crawler_dir (str): the directory, where the crawlers will be found.
                Defaults to "crawlers".
            job_defaults (dict): a dictionary of keyword arguments for
                the schedulers job_defaults.
            **cron_defaults (dict): a dictionary of keyword arguments for
                the schedulers job_defaults.

        Returns:
            Scheduler: a fresh Scheduler instance.
        """
        jobstores = {
            "default": {
                "type": "memory"
            },
            "elastic": InjectorJobStore(kwargs=crawler_args, client=elastic)
        }

        executors = {
            "default": ThreadPoolExecutor(10),
            "processpool": ProcessPoolExecutor(10)
        }

        job_defaults = {
            "misfire_grace_time": 5 * 60,  # 5min
            "coalesce": True,
        }

        self.cron_defaults = utility.DefaultDict(
            {
                # standard is every day at 00:00:00
                "hour": 0,
                "minute": 0,
                "second": 0
            },
            **cron_defaults)

        self.scheduler = BackgroundScheduler(jobstores=jobstores,
                                             executors=executors,
                                             job_defaults=job_defaults,
                                             timezone=utc)

        self.crawlers = _detect_crawlers()
        # set up the validator schema.
        self.job_validator = cerberus.Validator(SCHEMATA["job"]({
            "trigger_ids":
            list(self.TRIGGERS)
        }),
                                                allow_unknown=True)
        self.scheduler.start()
Exemplo n.º 13
0
 def __init__(self):
     self.run_date = datetime.datetime.now() + datetime.timedelta(seconds=3)
     self.run_date = self.run_date.strftime('%Y-%m-%d %H:%M:%S')
     self.tm = time.strftime('%Y%m%d%H%M%S',time.localtime())
     self.scheduler = BackgroundScheduler()
     self.executors = {'default': ThreadPoolExecutor(10), 'processpool': ProcessPoolExecutor(5)}
     self.job_defaults = {'coalesce': False, 'max_instances': 1}
     self.scheduler.configure(timezone=pytz.timezone('Asia/Shanghai'),job_defaults=self.job_defaults,executors=self.executors)
Exemplo n.º 14
0
class ConfigTask(object):
    """
    定时任务配置
    """
    jobstores = {'default': SQLAlchemyJobStore(url="sqlite:///" + os.path.join(basedir, "data.sqlite"))}
    executors = {'default': ThreadPoolExecutor(10), 'processpool': ProcessPoolExecutor(3)}

    def __init__(self):
        self.scheduler = BackgroundScheduler(jobstores=self.jobstores, executors=self.executors)
Exemplo n.º 15
0
    def __init__(self, time_offset, sampling_freq=5):
        self.sampling_interval = 1.0 / sampling_freq  # 5 Hz by default
        self.total_mem = psutil.virtual_memory().total
        self.manager = Manager()
        self.samples = self.manager.list()
        self.time_offset = time_offset

        self.scheduler = BackgroundScheduler(
            executors={'default': ProcessPoolExecutor(1)})
        self.job = None
Exemplo n.º 16
0
 def run(self):
     executors = {
         'default': {'type': 'threadpool', 'max_workers': 20},
         'processpool': ProcessPoolExecutor(max_workers=5)
     }
     job_defaults = { 'coalesce': False, 'max_instances': 3 }
     self.scheduler = BackgroundScheduler()
     self.scheduler.configure(executors=executors, job_defaults=job_defaults)
     self.scheduler.add_job(lambda : self.backup(), trigger='cron', hour=0, minute=10)
     self.scheduler.start()
Exemplo n.º 17
0
def getBlockingScheduler():
    executors = {
        'default': ThreadPoolExecutor(10),
        'processpool': ProcessPoolExecutor(5)
    }

    job_defaults = {'coalesce': False, 'max_instances': 5}
    scheduler = BlockingScheduler(executors=executors,
                                  job_defaults=job_defaults)
    return scheduler
Exemplo n.º 18
0
 def getSchedule(self):
     executors = {
         'default': ThreadPoolExecutor(3),
         'processpool': ProcessPoolExecutor(1)
     }
     job_defaults = {'coalesce': True, 'max_instances': 1}
     scheduler = BackgroundScheduler(executors=executors,
                                     job_defaults=job_defaults,
                                     timezone=utc)
     return scheduler
Exemplo n.º 19
0
 def __init__(self, job_type, store_executor_alias, process_count):
     self.sche = TornadoScheduler()
     self.host = MONGO_CONFIG.get('host')
     self.mongo_client = MongoClient(self.host)
     self.job_type = job_type
     self.mongo_job_store = MongoDBJobStore(collection='job',
                                            database=DBNAME,
                                            client=self.mongo_client)
     self.store_executor_alise = store_executor_alias
     self.process_poll = ProcessPoolExecutor(process_count)
Exemplo n.º 20
0
def executor(request, mock_scheduler):
    if request.param == 'threadpool':
        from apscheduler.executors.pool import ThreadPoolExecutor
        executor_ = ThreadPoolExecutor()
    else:
        from apscheduler.executors.pool import ProcessPoolExecutor
        executor_ = ProcessPoolExecutor()

    executor_.start(mock_scheduler, 'dummy')
    yield executor_
    executor_.shutdown()
Exemplo n.º 21
0
 def run(self):
     executors = {
         'default': {'type': 'threadpool', 'max_workers': 20},
         'processpool': ProcessPoolExecutor(max_workers=5)
     }
     job_defaults = { 'coalesce': False, 'max_instances': 3 }
     self.scheduler = BackgroundScheduler()
     self.scheduler.configure(executors=executors, job_defaults=job_defaults)
     # this job will run every first day of month at am 01:01:00
     self.scheduler.add_job(lambda : self.invoice_calc(), trigger='cron', day=1, hour=1, minute=0)
     self.scheduler.start()
Exemplo n.º 22
0
def main():
    args = parse_args()

    print(args)

    logfile = path.expanduser(args.logfile)
    if not path.exists(path.dirname(logfile)):
        os.makedirs(path.dirname(logfile))

    root_logger = logging.getLogger()
    formatter = logging.Formatter(
        "%(asctime)s:%(levelname)s:%(name)s:%(message)s")
    handler = logging.handlers.RotatingFileHandler(args.logfile,
                                                   maxBytes=args.logsize,
                                                   backupCount=args.logcount)
    handler.setFormatter(formatter)
    root_logger.addHandler(handler)
    root_logger.setLevel(args.loglevel)

    jobstores = {'memory': MemoryJobStore()}
    executors = {
        'default': ProcessPoolExecutor(args.processes),
        'threadpool': ThreadPoolExecutor(args.threads)
    }
    job_defaults = {'max_instances': 10000}
    scheduler = BackgroundScheduler(jobstores=jobstores,
                                    executors=executors,
                                    job_defaults=job_defaults)
    ''' Add jobs here '''
    x = 1
    for x in range(1, 10000):
        interval = random.randint(30, 120)
        scheduler.add_job(handle_job,
                          'interval',
                          seconds=interval,
                          kwargs={
                              'id': str(x),
                              'interval': str(interval)
                          })
        x += 1

    print("\nStarting Scheduler...")

    scheduler.start()

    while True:
        time.sleep(1)

    print("Scheduleder started")

    print("Shutting down... please wait!")

    scheduler.shutdown()
    logging.shutdown()
Exemplo n.º 23
0
def executor(request, mock_scheduler):
    if request.param == 'threadpool':
        from apscheduler.executors.pool import ThreadPoolExecutor
        executor_ = ThreadPoolExecutor()
    else:
        from apscheduler.executors.pool import ProcessPoolExecutor
        executor_ = ProcessPoolExecutor()

    executor_.start(mock_scheduler, 'dummy')
    request.addfinalizer(executor_.shutdown)
    return executor_
Exemplo n.º 24
0
 def __init__(self):
     self.scheduler = BlockingScheduler(executors={
         'default':
         ThreadPoolExecutor(15),
         'processpool':
         ProcessPoolExecutor(1)
     },
                                        job_defaults={
                                            'coalesce': False,
                                            'max_instances': 1
                                        })
Exemplo n.º 25
0
    def __init__(self, parent=None):
        super(TimingTasksManager, self).__init__(parent)
        executors = {
            'default': ThreadPoolExecutor(10),
            'processpool': ProcessPoolExecutor(3)
        }

        self.scheduler = BackgroundScheduler(executors=executors)
        self._load_jobs()
        self.scheduler.start()
        log.info(u'成功启动定时任务')
Exemplo n.º 26
0
 def config_Scheduler(self, timezone):
     jobstores = {
         'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
     }
     executors = {'processpool': ProcessPoolExecutor(max_workers=5)}
     job_defaults = {'coalesce': False, 'max_instances': 3}
     scheduler = BackgroundScheduler()
     scheduler.configure(jobstores=jobstores,
                         executors=executors,
                         job_defaults=job_defaults,
                         timezone=timezone)
     sched.add_job(job_function, 'interval', seconds=1)
 def __init__(self, ctx):
     self.ctx = ctx
     log1.setLevel(ctx.logLevel)
     log2.setLevel(ctx.logLevel)
     self.executors = {
         'default': ThreadPoolExecutor(20),
         'processpool': ProcessPoolExecutor(5)
     }
     self.job_defaults = {'coalesce': False, 'max_instances': 1}
     self.scheduler = BackgroundScheduler(executors=self.executors,
                                          job_defaults=self.job_defaults)
     self.start()
Exemplo n.º 28
0
def init_scheduler():
    """初始化调度器"""
    def my_listener(event):
        """事件监听"""
        if event.exception:
            logger.exception('========== The job crashed :( ==========')
            logger.exception(str(event.exception))
        else:
            logger.info('============ The job worked :) ===========')

    job_sqlite_path = os.path.join(CommonConf.SQLITE_DIR, 'jobs.sqlite')
    # 每次启动任务时删除数据库
    os.remove(job_sqlite_path) if os.path.exists(job_sqlite_path) else None
    jobstores = {
        'default': SQLAlchemyJobStore(
            url='sqlite:///' + job_sqlite_path)  # SQLAlchemyJobStore指定存储链接
    }
    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 10
        },  # 最大工作线程数20
        'processpool': ProcessPoolExecutor(max_workers=1)  # 最大工作进程数为5
    }
    job_defaults = {'coalesce': True, 'max_instances': 3}
    scheduler._logger = logger

    scheduler.configure(jobstores=jobstores,
                        executors=executors,
                        job_defaults=job_defaults)
    # 查找数据库中没能成功上传的数据,重新上传
    scheduler.add_job(ThirdEtcApi.reupload_etc_deduct_from_db,
                      trigger='cron',
                      hour='*/1',
                      id='reupload_etc_deduct_from_db')
    # 检测天线心跳状态, 心跳停止过长,重启天线
    scheduler.add_job(RsuStatus.check_rsu_heartbeat,
                      trigger='cron',
                      minute='*/3',
                      id='check_rsu_heartbeat',
                      kwargs={'callback': ThirdEtcApi.tianxian_heartbeat},
                      max_instances=2)
    # 平台参数下载-发行方黑名单接口
    ThirdEtcApi.download_fxf_blacklist()  # 先立即执行一次
    scheduler.add_job(ThirdEtcApi.download_fxf_blacklist,
                      trigger='cron',
                      hour='*/12',
                      id='download_fxf_blacklist')
    scheduler.add_listener(my_listener,
                           events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR)
    logger.info("启动调度器...")

    scheduler.start()
Exemplo n.º 29
0
    def __init__(self):
        jobstores = {'default': MemoryJobStore()}
        executors = {
            'default': ThreadPoolExecutor(20),
            'processpool': ProcessPoolExecutor(5)
        }
        job_defaults = {'coalesce': False, 'max_instances': 3}

        self.__sched = BackgroundScheduler(jobstores=jobstores,
                                           executors=executors,
                                           job_defaults=job_defaults,
                                           timezone=utc)
        self.__sched.start()
Exemplo n.º 30
0
class DevelopmentConfig(BaseConfig):
    # 本项目使用的域名与端口号
    PROJECT_PORT = 5000
    PROJECT_DOMAIN = f"http://*****:*****@127.0.0.1:13306/common_web_service?charset=utf8mb4'
    SQLALCHEMY_TRACK_MODIFICATIONS = False
    SQLALCHEMY_ENCODING = "utf8mb4"

    # 添加celery配置
    broker_url = 'redis://localhost:6379'
    result_backend = 'redis://localhost:6379'
    imports = ('proStruct.services.flask_linux_crontab')

    # 配置redis
    REDIS_HOST = '127.0.0.1'
    REDIS_PORT = 6379
    REDIS_PASSWORD = None
    REDIS_DB = None

    # 测试账号
    TEST_APP_ID = 'dc601e113be8a2e622f9f9a3f363eb93'
    TEST_ACCOUNT = '15845623256'
    TEST_PASSWORD = '******'  # aaasss123

    # 日志配置: 线上需要重新设置
    LOG_FILE_PATH = os.path.join(project_root_path,
                                 modifyPath('logs/web/web_common.log'))
    LOG_LEVEL = logging.INFO
    LOG_FILE_SIZE = 10 * 1204 * 1024
    LOG_FILE_COUNT = 10

    # apscheduler 定时任务调度配置
    JOB_STORES = {
        "redis":
        RedisJobStore(host=REDIS_HOST,
                      port=REDIS_PORT),  # 设置一个名为redis的job存储,后端使用 redis
        # 一个名为 default 的 job 存储,后端使用数据库(使用 Sqlite)
        # "default": SQLAlchemyJobStore(url="sqlite:///flask_linux_crontab.sqlite")
        "backend_db": SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI)
    }
    JOB_EXECUTORS = {
        "default": ThreadPoolExecutor(1),  # 设置一个名为 default的线程池执行器, 最大线程设置为20个
        # TODO 线程过多,会出现同一个任务被多次执行的情况
        "processpool":
        ProcessPoolExecutor(1),  # 设置一个名为 processpool的进程池执行器,最大进程数设为5个
    }
    # 开启job合并,设置job最大实例上限为3
    JOB_DEFAULT = {'coalesce': False, 'max_instances': 3}