Exemple #1
0
def get_scheduler(store_path=None, log_file=None):
    if store_path is None:
        store_path = r'jobstore.sqlite'
    if log_file is None:
        log_file = r'logger.log'
    scheduler = BackgroundScheduler({'apscheduler.timezone': 'Asia/Shanghai'})
    jobstores = {
        'default': SQLAlchemyJobStore(url='sqlite:///{0}'.format(store_path))
    }
    executors = {
        'default': ThreadPoolExecutor(20),
        'processpool': ProcessPoolExecutor(5)
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 1
    }
    scheduler.configure(jobstores=jobstores, executors=executors)
    # 事件记录
    scheduler.add_listener(
        lambda event: event_listener(event, scheduler),
        EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_ADDED | EVENT_JOB_SUBMITTED | EVENT_JOB_REMOVED
    )
    # 日志定制
    scheduler._logger = modify_logger(scheduler._logger, log_file=log_file)
    return scheduler
Exemple #2
0
def doSchedulejob():
    # 创建调度器:BlockingScheduler
    scheduler = BackgroundScheduler()
    # 添加任务,时间间隔2S
    scheduler.add_job(monitorSystem, 'interval', seconds=2, id='test_job1')
    # 添加任务,时间间隔5S
    scheduler.add_job(monitorNetWork, 'interval', seconds=3, id='test_job2')
    scheduler.add_listener(apschedulerListener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    scheduler._logger = logging
    scheduler.start()
Exemple #3
0
def schdeuler(log):
    scheduler = BackgroundScheduler()

    job_defaults = {
        'coalesce': False,
        'max_instances': 50,
        'misfire_grace_time': 100,
    }
    scheduler.configure(job_defaults=job_defaults)
    scheduler._logger = log
    return scheduler
Exemple #4
0
def daily_jobs():
    """seched of weekly jobs"""
    executors = {'default': ThreadPoolExecutor(max_workers=30), \
                 'processpool': ProcessPoolExecutor(max_workers=30)}
    sched = BackgroundScheduler(executors=executors)
    for job in ['regularClose_job']:
        cf = localConfig.cf
        if job == 'regularClose_job':
            sched.add_job(regularClose_job, cf.get(job, 'type'), day_of_week=cf.get(job, 'day_of_week'), hour=cf.get(job, 'hour'), minute=cf.get(job, 'minute'), \
                            second=cf.get(job, 'second'), misfire_grace_time=int(cf.get(job, 'misfire_grace_time')))
        elif job == 'regularMonitor':
            sched.add_job(
                regularMonitor,
                cf.get(job, 'type'),
                hours=int(cf.get(job, 'hours')))
    sched.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    sched._logger = logging
    sched.start()
    while True:
        time.sleep(3)
Exemple #5
0
def start_apshedule():

    sched = BackgroundScheduler(daemonic=False)
    sched._logger = LOGGER
    # sched.add_job(func=scrapAndSaveFinMsg, replace_existing=True, misfire_grace_time=3,
    #               trigger='cron', name='基金查询_预估值', id='scrap_fin', seconds='0/30', hour='9-15', day_of_week='0-4')

    sched.add_job(func=scrapAndSaveFinMsg,
                  replace_existing=True,
                  misfire_grace_time=3,
                  trigger='cron',
                  name='基金查询_预估值_test',
                  id='scrap_fin',
                  second='0/30')

    #
    # sched.add_job(func=scrapAndSenFinMsg, args=[nickNames], replace_existing=True, misfire_grace_time=3,
    #               trigger='cron', name='基金查询_测试', id='scrap_fin_test', minute='0/2')
    sched.start()
    global _schedual
    _schedual = sched
Exemple #6
0
    def _init_scheduler():
        redis_pool = RedisPipeline()

        job_stores: Dict = {
            "redis": RedisJobStore(
                db=1,
                jobs_key="blogs_crawler.jobs",
                run_times_key="blogs_crawler.run_times",
                connection_pool=redis_pool,
            )
        }
        executors = {
            "default": {"type": "threadpool", "max_workers": THREADS_NUM},
            "processpool": ProcessPoolExecutor(max_workers=PROCESS_NUM),
        }
        job_defaults = {"coalesce": False, "max_instances": 5, "misfire_grace_time": 60}
        background_scheduler = BackgroundScheduler(
            jobstores=job_stores, executors=executors, job_defaults=job_defaults
        )

        # 设置定时任务的 logger
        background_scheduler._logger = logger

        # 设置任务监听
        def init_scheduler_listener(event):
            if event.exception:
                logger.error("定时任务出现异常!")

        background_scheduler.add_listener(
            init_scheduler_listener, EVENT_JOB_ERROR | EVENT_JOB_EXECUTED
        )

        # 清理任务
        background_scheduler.remove_all_jobs()

        # 启动定时任务对象
        background_scheduler.start()
        return background_scheduler
Exemple #7
0
def daily_jobs():
    """seched of weekly jobs"""
    executors = {'default': ThreadPoolExecutor(max_workers=30), \
                 'processpool': ProcessPoolExecutor(max_workers=30)}
    sched = BackgroundScheduler(executors=executors)
    cf = localConfig.cf
    job = 'regularClose_job'
    sched.add_job(regularClose_job, cf.get(job, 'type'), day_of_week=cf.get(job, 'day_of_week'), hour=cf.get(job, 'hour'), minute=cf.get(job, 'minute'), \
                    second=cf.get(job, 'second'), misfire_grace_time=int(cf.get(job, 'misfire_grace_time')))
    job = 'giteeMigrateIssue_job'
    sched.add_job(Main, cf.get(job, 'type'), day_of_week=cf.get(job, 'day_of_week'), hour=cf.get(job, 'hour'), minute=cf.get(job, 'minute'), \
                    second=cf.get(job, 'second'), misfire_grace_time=int(cf.get(job, 'misfire_grace_time')))
    job = 'giteeMergePR_job'
    sched.add_job(gitee_merge_pr, cf.get(job, 'type'), day_of_week=cf.get(job, 'day_of_week'), hour=cf.get(job, 'hour'), minute=cf.get(job, 'minute'), \
                    second=cf.get(job, 'second'), misfire_grace_time=int(cf.get(job, 'misfire_grace_time')))
    job = 'giteeMigratePR_job'
    sched.add_job(githubPrMigrateGitee().main, cf.get(job, 'type'), day_of_week=cf.get(job, 'day_of_week'), hour=cf.get(job, 'hour'), minute=cf.get(job, 'minute'), \
                    second=cf.get(job, 'second'), misfire_grace_time=int(cf.get(job, 'misfire_grace_time')))

    sched.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    sched._logger = logging
    sched.start()
    while True:
        time.sleep(3)
Exemple #8
0
3、重启task_manager.py任务

参数说明:
1、定时任务模块,所有任务之间异步执行,互相不阻塞
2、trigger='interval' 表示循环执行,seconds表示间隔时间,也可以是minutes、hours、days;jitter表示浮动区间,比如每个小时执行,
但前后浮动10s,避免并发问题;next_run_time=now() 表示启动后立刻执行,不设定的话就正常调度
trigger = 'cron', month='6-8,11-12', day='3rd fri', hour='0-3'
------------ 定时任务 Start ------------'''

# ------------ 定时任务 END ------------
# 定时任务内容写在上面
# 下面的不要动

scheduler.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
scheduler.add_listener(miss_listener, EVENT_JOB_MISSED)
scheduler._logger = logging


def query_task_status():
    scheduler.print_jobs()
    job_list = scheduler.get_jobs()
    for job in job_list:
        message = "任务状态展示: %s" % job
        print(message)
        record_info_log(task_log_file, message)


def scheduler_main():
    count = 1  # 计数,控制时间用
    task_startup = False  # 任务调度未启动,用于初次执行scheduler.start()
    lock_status = False  # 是否本机的锁
Exemple #9
0
from apscheduler.events import *
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.schedulers.blocking import BlockingScheduler

logging.basicConfig()
logging.getLogger('apscheduler').setLevel(logging.ERROR)

jobstores = {
    'default':
    SQLAlchemyJobStore(url='mysql+pymysql://root:12345678@localhost:3306/test')
}

# sched = BlockingScheduler()
sched = BackgroundScheduler(jobstores=jobstores)
sched._logger = logging


def my_job():
    print(datetime.datetime.now())
    print(1 / 0)


def append_file():
    with open('file.txt', 'a') as f:
        f.write(str(datetime.datetime.now()))
        f.write('\n')


def listener(event: SchedulerEvent):
    if isinstance(event, JobExecutionEvent) and event.exception:
Exemple #10
0
# -*- coding: utf-8 -*-

from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.schedulers.blocking import BlockingScheduler
from tools import log
import events
from config import *

if __name__ == '__main__':
    # 程序启动初始化
    logger = log.logger_generator(logger_name='MusicClock')
    mk_dirs([excluded_file, tts_location, time_report_tts_location])

    # 从启动器启动任务/进行初始化
    events.schedules.initiator(logger_name='MusicClock')

    # 初始化任务调度器
    BlockScheduler = BlockingScheduler()
    BackScheduler = BackgroundScheduler()
    BackScheduler._logger = logger
    BlockScheduler._logger = logger
    logger.info(f'[ {logger.name} ] 的 [ 任务调度器 ] 初始化完成')

    # 在调度器上增加任务
    events.schedules.add_block_schedule_jobs(BlockScheduler)
    events.schedules.add_back_schedule_jobs(BackScheduler)
    # 获取所有的进程任务及时间安排,发送钉钉
    BackScheduler.start()
    BlockScheduler.start()
Exemple #11
0
from apscheduler.schedulers.background import  BackgroundScheduler

from apps.manage.task.resolvePeaLog import resolvePeaLog
from apps.manage.task.resolveYyyLog import resolveYyyLog

from apps.manage.management.commands.monitor import Command as monitor
from apps.manage.management.commands.importYyyUser import Command as importYyyUser
from apps.manage.management.commands.importPeanutUser import Command as importPeanutUser
from apps.helper.timeHelper import timeHelper

import logging
scheduler =  BackgroundScheduler()
scheduler._logger = logging.getLogger("apscheduler")

def resolveClickhouseLog():
    resolvePeaLog().resolveAll()
    resolveYyyLog().resolveAll()

def monitorTask():
    monitor().monitorTask()

def yiUsertask():
    yesterday = timeHelper.getFormatDate("none",timeHelper.getTimestamp(-86400))
    importYyyUser().doImport(str(yesterday), True)

def peanurUsertask():
    yesterday = timeHelper.getFormatDate("none", timeHelper.getTimestamp(-86400))
    importPeanutUser().doImport(str(yesterday), True)

# 线上用
scheduler.add_job(resolveClickhouseLog, 'cron', minute='*/10', id='test_job1', misfire_grace_time=3600)