Exemplo n.º 1
0
def clean():
    log_file, log_level = log.get_log_args()
    logger = log.Logger(log_file, log_level)
    logger.logger.info("清理程序启动...")

    keep_days = conf.get("autocheck", "keep_days")[0]

    scheduler = BlockingScheduler()
    scheduler.add_job(clean_data,
                      'cron',
                      args=[logger, int(keep_days)],
                      day_of_week='0-6',
                      hour=1,
                      minute=10,
                      id=f'clean')
    scheduler.start()
Exemplo n.º 2
0
def resource_show(hostname, check_dict, granularity_level, sender_alias,
                  receive, subject):
    log_file, log_level = log.get_log_args()
    logger = log.Logger(log_file, log_level)
    db = database.db()
    now_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    modifier = "-24 hour"
    message = ""

    # 重置统计文件
    report_dir = "report"
    shutil.rmtree(report_dir, ignore_errors=True)
    os.makedirs(report_dir, exist_ok=True)

    logger.logger.info("统计资源记录信息...")

    printf(f"统计开始时间: {now_time}")
    printf(f"主机名: {hostname}")
    printf("-" * 100)

    # 系统启动时间
    sql = "select boot_time from boot_time order by record_time desc"
    boot_time = db.query_one(sql)[0]
    printf(f"系统启动时间: {boot_time}")
    printf("*" * 100)

    # 磁盘
    logger.logger.info("统计Disk记录信息...")
    printf("磁盘统计:")
    sql = "select distinct mounted from disk"
    disk_names = db.query_all(sql)
    disk_granularity_level = int(60 / int(check_dict['host_check'][0]) *
                                 granularity_level)
    disk_granularity_level = disk_granularity_level if disk_granularity_level != 0 else 1
    for i in disk_names:
        i = i[0]
        table = pt.PrettyTable(
            ["记录时间", "挂载点", "磁盘名称", "磁盘大小", "已使用大小", "已使用百分比", "可用"])
        sql=f"select record_time, name, total, used, used_percent, avail from disk "\
                f"where mounted=? "\
                f"and record_time > datetime('{now_time}', '{modifier}') "\
                f"order by record_time"
        disk_data = db.query_all(sql, (i, ))
        for index, item in enumerate(disk_data):
            if index % disk_granularity_level == 0 or index == 0:
                total = format_size(item[2])
                used = format_size(item[3])
                used_percent = f"{item[4]}%"
                avail = format_size(item[5])
                table.add_row(
                    (item[0], i, item[1], total, used, used_percent, avail))
        printf(f"{i}磁盘统计:")
        printf(table)
        printf("*" * 100)

    # CPU
    logger.logger.info("统计CPU记录信息...")
    printf("CPU统计:")
    cpu_granularity_level = int(60 / int(check_dict['host_check'][1]) *
                                granularity_level)
    cpu_granularity_level = cpu_granularity_level if cpu_granularity_level != 0 else 1
    table = pt.PrettyTable(["记录时间", "CPU核心数", "CPU使用率"])
    sql=f"select record_time, cpu_count, cpu_used_percent from cpu "\
            f"where record_time > datetime('{now_time}', '{modifier}') "\
            f"order by record_time"
    cpu_data = db.query_all(sql)
    for index, item in enumerate(cpu_data):
        if index % cpu_granularity_level == 0 or index == 0:
            used_percent = f"{item[2]}%"
            table.add_row((item[0], item[1], used_percent))
    printf(table)
    printf("*" * 100)

    # MEM
    logger.logger.info("统计Mem记录信息...")
    printf("内存统计:")
    mem_granularity_level = int(60 / int(check_dict['host_check'][2]) *
                                granularity_level)
    mem_granularity_level = mem_granularity_level if mem_granularity_level != 0 else 1
    table = pt.PrettyTable(
        ["记录时间", "内存大小", "可用(avail)", "已使用", "已使用百分比", "剩余(free)"])
    sql=f"select record_time, total, avail, used, used_percent, free from memory "\
            f"where record_time > datetime('{now_time}', '{modifier}') "\
            f"order by record_time"
    mem_data = db.query_all(sql)
    for index, item in enumerate(mem_data):
        if index % mem_granularity_level == 0 or index == 0:
            total = format_size(item[1])
            avail = format_size(item[2])
            used = format_size(item[3])
            used_percent = f"{item[4]}%"
            free = format_size(item[5])
            table.add_row((item[0], total, avail, used, used_percent, free))
    printf(table)
    printf("*" * 100)

    # Swap
    logger.logger.info("统计Swap记录信息...")
    printf("Swap统计:")
    swap_granularity_level = int(60 / int(check_dict['host_check'][3]) *
                                 granularity_level)
    swap_granularity_level = swap_granularity_level if swap_granularity_level != 0 else 1
    table = pt.PrettyTable(["记录时间", "Swap大小", "已使用", "已使用百分比", "剩余"])
    sql=f"select record_time, total, used, used_percent, free from swap "\
            f"where record_time > datetime('{now_time}', '{modifier}') "\
            f"order by record_time"
    swap_data = db.query_all(sql)
    for index, item in enumerate(swap_data):
        if index % swap_granularity_level == 0 or index == 0:
            total = format_size(item[1])
            used = format_size(item[2])
            used_percent = f"{item[3]}%"
            free = format_size(item[4])
            table.add_row((item[0], total, used, used_percent, free))
    printf(table)
    printf("*" * 100)

    # Tomcat
    if check_dict["tomcat_check"][0] == "1":
        logger.logger.info("统计Tomcat记录信息...")
        printf("Tomcat统计:")
        tomcat_granularity_level = int(
            60 / int(check_dict['tomcat_check'][1]) * granularity_level)
        tomcat_granularity_level = tomcat_granularity_level if tomcat_granularity_level != 0 else 1
        version = db.query_one("select version from tomcat_java_version")[0]
        printf(f"Java版本: {version}")
        printf("*" * 100)
        #sql="select distinct port from tomcat_constant"
        #tomcat_ports=db.query_all(sql)
        tomcat_ports = conf.get("tomcat", "tomcat_port")[0].split(",")
        tomcat_constant_data = []
        for i in tomcat_ports:
            port = int(i.strip())
            constant_sql=f"select record_time, pid, port, boot_time, cmdline from tomcat_constant "\
                    f"where port=? "\
                    f"and '{now_time}' >= record_time "\
                    f"order by record_time desc"
            variable_sql=f"select record_time, pid, men_used, mem_used_percent, connections, threads_num from tomcat_variable "\
                    f"where port=? "\
                    f"and record_time > datetime('{now_time}', '{modifier}') "\
                    f"order by record_time"
            if version == "8":
                jvm_sql=f"select record_time, S0, S1, E, O, M, CCS, YGC, YGCT, FGC, FGCT, GCT from tomcat_jstat8 "\
                        f"where port=? "\
                        f"and record_time > datetime('{now_time}', '{modifier}') "\
                        f"order by record_time"
                jvm_table = pt.PrettyTable([
                    "记录时间", "S0", "S1", "E", "O", "M", "CCS", "YGC", "YGCT",
                    "FGC", "FGCT", "GCT"
                ])
            elif version == "7":
                jvm_sql=f"select record_time, S0, S1, E, O, P, YGC, YGCT, FGC, FGCT, GCT from tomcat_jstat7 "\
                        f"where port=? "\
                        f"and record_time > datetime('{now_time}', '{modifier}') "\
                        f"order by record_time"
                jvm_table = pt.PrettyTable([
                    "记录时间", "S0", "S1", "E", "O", "P", "YGC", "YGCT", "FGC",
                    "FGCT", "GCT"
                ])

            constant_table = pt.PrettyTable(
                ["记录时间", "Pid", "端口", "启动时间", "启动参数"])
            tomcat_constant_data = (db.query_one(constant_sql, (port, )))
            constant_table.add_row(tomcat_constant_data)

            variable_table = pt.PrettyTable(
                ["记录时间", "Pid", "内存使用", "内存使用率", "连接数", "线程数"])
            tomcat_variable_data = (db.query_all(variable_sql, (port, )))
            for index, item in enumerate(tomcat_variable_data):
                if index % tomcat_granularity_level == 0 or index == 0:
                    mem_used = format_size(item[2])
                    mem_used_percent = f"{item[3]:.2f}%"
                    variable_table.add_row(
                        (item[0], item[1], mem_used, mem_used_percent, item[4],
                         item[5]))

            tomcat_jvm_data = (db.query_all(jvm_sql, (port, )))
            for index, item in enumerate(tomcat_jvm_data):
                if index % tomcat_granularity_level == 0 or index == 0:
                    jvm_table.add_row(item)

            printf(f"Tomcat({port})统计信息:")
            printf("启动信息:")
            printf(constant_table)
            printf("运行信息:")
            printf(variable_table)
            printf("Jvm内存信息:")
            printf(jvm_table)
            printf("*" * 100)

    # Redis
    if check_dict["redis_check"][0] == "1":
        logger.logger.info("统计Redis记录信息...")
        printf("Redis统计:")
        redis_granularity_level = int(60 / int(check_dict['redis_check'][1]) *
                                      granularity_level)
        redis_granularity_level = redis_granularity_level if redis_granularity_level != 0 else 1
        printf("*" * 100)

        constant_sql=f"select record_time, pid, port, boot_time from redis_constant "\
                f"where '{now_time}' >= record_time "\
                f"order by record_time desc"
        variable_sql=f"select record_time, pid, mem_used, mem_used_percent, connections, threads_num from redis_variable "\
                f"where record_time > datetime('{now_time}', '{modifier}') "\
                f"order by record_time"

        # 启动信息
        constant_table = pt.PrettyTable(["记录时间", "Pid", "端口", "启动时间"])
        constant_data = (db.query_one(constant_sql))
        constant_table.add_row(constant_data)

        # 运行信息
        variable_table = pt.PrettyTable(
            ["记录时间", "Pid", "内存使用", "内存使用率", "连接数", "线程数"])
        variable_data = (db.query_all(variable_sql))
        for index, item in enumerate(variable_data):
            if index % tomcat_granularity_level == 0 or index == 0:
                mem_used = format_size(item[2])
                mem_used_percent = f"{item[3]:.2f}%"
                variable_table.add_row((item[0], item[1], mem_used,
                                        mem_used_percent, item[4], item[5]))

        # master_slave信息
        role = db.query_one("select role from redis_role")[0]
        if role == "master":
            master_slave_sql = "select a.record_time, connected_slave, slave_ip, slave_port, slave_state from redis_master a ,redis_slaves_info b on a.record_time=b.record_time where a.record_time=(select max(record_time) from redis_master)"
            master_slave_table = pt.PrettyTable(
                ["记录时间", "Slave数量", "Slave IP", "Slave端口", "Slave状态"])
            master_slave_data = (db.query_all(master_slave_sql))
            for i in master_slave_data:
                master_slave_table.add_row(i)
        elif role == "slave":
            master_slave_sql = "select record_time, pid, master_host, master_port, master_link_status from redis_slave order by record_time desc"
            master_slave_table = pt.PrettyTable(
                ["记录时间", "Pid", "master主机", "master端口", "与master连接状态"])
            master_slave_data = (db.query_one(master_slave_sql))
            master_slave_table.add_row(master_slave_data)

        # sentinel监控信息
        sentinel_sql = "select a.record_time, role, host, a.port from redis_sentinel a, redis_constant b on a.record_time=b.record_time where b.record_time=(select max(record_time) from redis_constant)"
        sentinel_table = pt.PrettyTable(["记录时间", "角色", "IP", "端口"])
        sentinel_data = (db.query_all(sentinel_sql))
        for i in sentinel_data:
            sentinel_table.add_row(i)

        printf("启动信息:")
        printf(constant_table)
        printf("运行信息:")
        printf(variable_table)
        printf("集群信息:")
        printf(f"当前角色: {role}")
        printf(master_slave_table)
        printf("Sentinel监控信息:")
        printf(sentinel_table)
        printf("*" * 100)

    # backup
    if check_dict["backup_check"] == "1":
        logger.logger.info("统计备份记录信息...")
        printf("备份统计:")
        backup_dirs = conf.get("backup", "dir")[0].split(",")
        for i in backup_dirs:
            directory = i.strip()
            table = pt.PrettyTable(["记录时间", "备份文件", "大小", "创建时间"])
            sql=f"select record_time, filename, size, ctime from backup "\
                    f"where directory=?"\
                    f"order by ctime"
            backup_data = db.query_all(sql, (directory, ))
            for j in backup_data:
                if j[2] is not None:
                    size = format_size(j[2])
                    table.add_row((j[0], j[1], size, j[3]))

            printf(f"备份({directory})统计信息:")
            printf(table)
            printf("*" * 100)

    # MySQL
    if check_dict["mysql_check"][0] == "1":
        logger.logger.info("统计MySQL记录信息...")
        printf("MySQL统计:")
        mysql_granularity_level = int(60 / int(check_dict['mysql_check'][1]) *
                                      granularity_level)
        mysql_granularity_level = mysql_granularity_level if mysql_granularity_level != 0 else 1
        printf("*" * 100)

        constant_sql=f"select record_time, pid, port, boot_time from mysql_constant "\
                f"where '{now_time}' >= record_time "\
                f"order by record_time desc"
        variable_sql=f"select record_time, pid, mem_used, mem_used_percent, connections, threads_num from mysql_variable "\
                f"where record_time > datetime('{now_time}', '{modifier}') "\
                f"order by record_time"

        # 启动信息
        constant_table = pt.PrettyTable(["记录时间", "Pid", "端口", "启动时间"])
        constant_data = (db.query_one(constant_sql))
        constant_table.add_row(constant_data)

        # 运行信息
        variable_table = pt.PrettyTable(
            ["记录时间", "Pid", "内存使用", "内存使用率", "连接数", "线程数"])
        variable_data = (db.query_all(variable_sql))
        for index, item in enumerate(variable_data):
            if index % mysql_granularity_level == 0 or index == 0:
                mem_used = format_size(item[2])
                mem_used_percent = f"{item[3]:.2f}%"
                variable_table.add_row((item[0], item[1], mem_used,
                                        mem_used_percent, item[4], item[5]))

        # master_slave信息
        role = db.query_one("select role from mysql_role")[0]
        if role == "master":
            master_slave_sql = "select record_time, pid, slave_num, binlog_do_db, binlog_ignore_db from mysql_master order by record_time desc"
            master_slave_table = pt.PrettyTable(
                ["记录时间", "Pid", "Slave数量", "Binlog_do_db", "Binlog_ignore_db"])
        elif role == "slave":
            master_slave_sql="select record_time, pid, master_host, master_port, replicate_do_db, replicate_ignore_db, "\
                    "slave_io_thread, slave_io_state, slave_sql_thread, slave_sql_state, "\
                    "master_uuid, retrieved_gtid_set, executed_gtid_set, seconds_behind_master "\
                    "from mysql_slave order by record_time desc"
            master_slave_table = pt.PrettyTable([
                "记录时间", "Pid", "Master主机", "Master端口", "同步数据库", "非同步数据库",
                "Slave_IO线程", "Slave_IO状态", "Slave_SQL线程", "Slave_SQL状态",
                "Master_UUID", "已接收的GTID集合", "已执行的GTID集合", "Slave落后Master的秒数"
            ])
        master_slave_data = (db.query_one(master_slave_sql))
        if master_slave_data is not None:
            master_slave_table.add_row(master_slave_data)

        printf("启动信息:")
        printf(constant_table)
        printf("运行信息:")
        printf(variable_table)
        printf("集群信息:")
        printf(f"当前角色: {role}")
        printf(master_slave_table)
        printf("*" * 100)

        # 慢日志
        printf("慢日志信息:")
        mysql_user, mysql_ip, mysql_port, mysql_password = conf.get(
            "mysql", "mysql_user", "mysql_ip", "mysql_port", "mysql_password")
        mysql_flag, msg = mysql.export_slow_log(
            logger, mysql_user, mysql_ip, mysql_password, mysql_port,
            f"{report_dir}/slow_analysis.log", f"{report_dir}/slow.log")
        if mysql_flag == 1:
            message = f"该附件存在MySQL慢日志"
        printf(msg)
        printf("*" * 100)

    # Oracle表空间
    if check_dict["oracle_check"][0] == "1":
        logger.logger.info("统计Oracle表空间记录信息...")
        printf("Oracle表空间统计:")
        oracle_granularity_level = int(
            60 / int(check_dict['oracle_check'][1]) * granularity_level)
        oracle_granularity_level = oracle_granularity_level if oracle_granularity_level != 0 else 1

        sql = "select distinct tablespace_name from oracle"
        tablespace_names = db.query_all(sql)
        for i in tablespace_names:
            i = i[0]
            table = pt.PrettyTable(
                ["记录时间", "表空间名称", "表空间大小", "已使用", "已使用百分比", "可用"])
            sql=f"select record_time, size, used, used_percent, free from oracle "\
                    f"where tablespace_name=? "\
                    f"and record_time > datetime('{now_time}', '{modifier}') "\
                    f"order by record_time"
            tablespace_data = db.query_all(sql, (i, ))
            for index, item in enumerate(tablespace_data):
                if index % oracle_granularity_level == 0 or index == 0:
                    total = format_size(item[1])
                    used = format_size(item[2])
                    used_percent = f"{item[3]}%"
                    free = format_size(item[4])
                    table.add_row(
                        (item[0], i, total, used, used_percent, free))
            printf(f"{i}表空间统计:")
            printf(table)
            printf("*" * 100)
        # war
        logger.logger.info("生成awr报告...")
        printf("awr报告信息:")
        awr_hours = conf.get("oracle", "awr_hours")[0]
        if oracle.generate_awr(int(awr_hours), report_dir) == 0:
            printf("请在附件中查看awr.html文件")
        else:
            printf("生成awr报告失败, 请自行手动生成")

    logger.logger.info("统计资源结束...")
    printf("-" * 100)
    end_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    printf(f"统计结束时间: {end_time}")

    tar_file = tar_report(logger, report_dir)
    sender_alias, receive, subject = conf.get("mail", "sender", "receive",
                                              "subject")

    warning_msg = f"\n请查看统计报告.\n\n{message}"
    mail.send(logger,
              warning_msg,
              sender_alias,
              receive,
              subject,
              msg="report",
              attachment_file=tar_file)
Exemplo n.º 3
0
def analysis():
    log_file, log_level=log.get_log_args()
    logger=log.Logger(log_file, log_level)
    logger.logger.info("开始分析资源信息...")

    sender_alias, receive, subject=conf.get("mail", 
            "sender", 
            "receive", 
            "subject"
            )

    warning_percent, warning_interval, analysis_interval=conf.get("autocheck",
        "warning_percent", 
        "warning_interval", 
        "analysis_interval"
        )

    disk_interval, cpu_interval, memory_interval=conf.get("host", 
            "disk_interval", 
            "cpu_interval", 
            "memory_interval"
            )

    min_value=5
    warning_percent=float(warning_percent)
    warning_interval=int(warning_interval)
    analysis_interval=int(analysis_interval)
    disk_interval=int(disk_interval)+analysis_interval
    cpu_interval=int(cpu_interval)+analysis_interval
    memory_interval=int(memory_interval)+analysis_interval

    max_threads=20
    executors = {
            "default": ThreadPoolExecutor(max_threads)
            }
    job_defaults = {
            "coalesce": True, 
            "max_instances": 1,  
            "misfire_grace_time": 3, 
            }
    scheduler=BlockingScheduler(job_defaults=job_defaults, executors=executors) 
    # host资源记录
    logger.logger.info("开始分析主机资源信息...")
    scheduler.add_job(host.disk_analysis, 'interval', args=[log_file, log_level, warning_percent, warning_interval, sender_alias, receive, subject], seconds=disk_interval, id='disk_ana')
    scheduler.add_job(host.cpu_analysis, 'interval', args=[log_file, log_level, warning_percent, warning_interval, sender_alias, receive, subject], seconds=cpu_interval, id='cpu_ana')
    scheduler.add_job(host.memory_analysis, 'interval', args=[log_file, log_level, warning_percent, warning_interval, sender_alias, receive, subject], seconds=memory_interval, id='mem_ana')

    # users_limit
    logger.logger.info("开始分析用户资源信息...")
    scheduler.add_job(user_resource.analysis, 'interval', args=[log_file, log_level, 0, sender_alias, receive, subject], next_run_time=datetime.datetime.now()+datetime.timedelta(seconds=15),  minutes=65, id=f'user_limit_ana')

    # tomcat资源
    tomcat_check=conf.get("tomcat", "check")[0]
    if tomcat_check=='1':
        tomcat_interval=conf.get("tomcat", "tomcat_interval")[0]
        tomcat_interval=int(tomcat_interval)+analysis_interval
        logger.logger.info("开始分析Tomcat资源信息...")
        scheduler.add_job(tomcat.running_analysis, 'interval', args=[log_file, log_level, warning_interval, sender_alias, receive, subject], seconds=tomcat_interval, id='tomcat_run_ana')
        scheduler.add_job(tomcat.jvm_analysis, 'interval', args=[log_file, log_level, warning_interval, sender_alias, receive, subject], seconds=tomcat_interval, id='tomcat_jvm_ana')

    # redis资源
    redis_check=conf.get("redis", "check")[0]
    if redis_check=="1":
        redis_interval=conf.get("redis", "redis_interval")[0]
        redis_interval=int(redis_interval)+analysis_interval
        logger.logger.info("开始分析Redis资源信息...")
        scheduler.add_job(redis.running_analysis, 'interval', args=[log_file, log_level, warning_interval, sender_alias, receive, subject], seconds=redis_interval, id='redis_run_ana')
        scheduler.add_job(redis.master_slave_analysis, 'interval', args=[log_file, log_level, warning_interval, sender_alias, receive, subject], seconds=redis_interval, id='redis_slave_ana')

    # 记录mysql
    mysql_check=conf.get("mysql", "check")[0]
    if mysql_check=="1":
        mysql_interval, seconds_behind_master=conf.get("mysql", "mysql_interval", "seconds_behind_master")
        mysql_interval=int(mysql_interval)+analysis_interval
        logger.logger.info("开始分析MySQL资源信息...")
        scheduler.add_job(mysql.running_analysis, 'interval', args=[log_file, log_level, warning_interval, sender_alias, receive, subject], seconds=mysql_interval, id='mysql_run_ana')
        scheduler.add_job(mysql.master_slave_analysis, 'interval', args=[log_file, log_level, int(seconds_behind_master), warning_interval, sender_alias, receive, subject], seconds=mysql_interval, id='mysql_slave_ana')

    # 记录Oracle
    oracle_check=conf.get("oracle", "check")[0]
    if oracle_check=="1":
        oracle_interval=conf.get("oracle", "oracle_interval")[0]
        oracle_interval=int(oracle_interval)+analysis_interval
        logger.logger.info("开始分析Oracle信息...")
        scheduler.add_job(oracle.tablespace_analysis, 'interval', args=[log_file, log_level, warning_percent, warning_interval, sender_alias, receive, subject], seconds=oracle_interval, id='oracle_tablespace_ana')

    # backup
    backup_check, backup_dir, backup_cron_time=conf.get("backup",
            "check", 
            "dir", 
            "cron_time"
            )
    if backup_check=="1":
        dir_list=[]
        for i in backup_dir.split(","):
            dir_list.append(i.strip())

        cron_time_list=[]
        for i in backup_cron_time.split(","):
            cron_time_list.append(i.strip())

        for i in range(len(dir_list)):
            directory=dir_list[i]
            cron_time=cron_time_list[i].split(":")
            hour=cron_time[0].strip()
            minute=cron_time[1].strip()
            scheduler.add_job(backup.analysis, 'cron', args=[log_file, log_level, directory, 0, sender_alias, receive, subject], day_of_week='0-6', hour=int(hour), minute=int(minute)+1, id=f'backup{i}_ana')

    scheduler.start()
Exemplo n.º 4
0
def record():
    log_file, log_level = log.get_log_args()
    logger = log.Logger(log_file, log_level)
    logger.logger.info("开始采集资源信息...")

    max_threads = 50
    executors = {"default": ThreadPoolExecutor(max_threads)}
    job_defaults = {
        "coalesce": True,
        "max_instances": 1,
        "misfire_grace_time": 3,
    }
    scheduler = BlockingScheduler(job_defaults=job_defaults,
                                  executors=executors)

    min_value = 10

    # host资源记录
    logger.logger.info("开始采集主机资源信息...")
    disk_interval, cpu_interval, memory_interval, swap_interval, users_limit = conf.get(
        "host", "disk_interval", "cpu_interval", "memory_interval",
        "swap_interval", "users_limit")
    if int(disk_interval) < min_value:
        disk_interval = min_value
    if int(cpu_interval) < min_value:
        cpu_interval = min_value
    if int(memory_interval) < min_value:
        memory_interval = min_value
    if int(swap_interval) < min_value:
        swap_interval = min_value

    logger.logger.info("开始采集磁盘资源信息...")
    scheduler.add_job(host.disk_record,
                      'interval',
                      args=[log_file, log_level],
                      seconds=int(disk_interval),
                      id='disk_record')
    logger.logger.info("开始采集CPU资源信息...")
    scheduler.add_job(host.cpu_record,
                      'interval',
                      args=[log_file, log_level],
                      seconds=int(cpu_interval),
                      id='cpu_record')
    logger.logger.info("开始采集内存资源信息...")
    scheduler.add_job(host.memory_record,
                      'interval',
                      args=[log_file, log_level],
                      seconds=int(memory_interval),
                      id='memory_record')
    logger.logger.info("开始采集Swap资源信息...")
    scheduler.add_job(host.swap_record,
                      'interval',
                      args=[log_file, log_level],
                      seconds=int(swap_interval),
                      id='swap_record')
    logger.logger.info("开始采集启动时间资源信息...")
    #scheduler.add_job(host.boot_time_record, 'interval', args=[log_file, log_level], seconds=int(boot_time_interval), id='boot_time_record')
    host.boot_time_record(log_file, log_level)

    # 用户资源限制
    logger.logger.info("开始记录用户限制信息...")
    if users_limit is not None:
        users_limit_list = []
        for i in users_limit.split(","):
            users_limit_list.append(i.strip())

        for user in users_limit_list:
            scheduler.add_job(user_resource.record,
                              'interval',
                              args=[log_file, log_level, user],
                              next_run_time=datetime.datetime.now() +
                              datetime.timedelta(seconds=5),
                              minutes=60,
                              id=f'{user}_limit')

    # tomcat资源
    tomcat_check, tomcat_interval, tomcat_port = conf.get(
        "tomcat",
        "check",
        "tomcat_interval",
        "tomcat_port",
    )
    if tomcat_check == '1':
        logger.logger.info("开始采集Tomcat资源信息...")
        tomcat_port_list = []  # 将tomcat_port参数改为列表
        for i in tomcat_port.split(","):
            tomcat_port_list.append(i.strip())
        if int(tomcat_interval) < min_value:
            tomcat_interval = min_value
        scheduler.add_job(tomcat.record,
                          'interval',
                          args=[log_file, log_level, tomcat_port_list],
                          seconds=int(tomcat_interval),
                          id='tomcat_record')

    # redis资源
    redis_check, redis_interval, redis_password, redis_port, sentinel_port, sentinel_name, commands = conf.get(
        "redis", "check", "redis_interval", "password", "redis_port",
        "sentinel_port", "sentinel_name", "commands")
    if redis_check == "1":
        if int(redis_interval) < min_value:
            redis_interval = min_value
        logger.logger.info("开始采集Redis资源信息...")
        scheduler.add_job(redis.record, 'interval', args=[log_file, log_level, redis_password, redis_port, sentinel_port, sentinel_name, commands], \
                seconds=int(redis_interval), id='redis_record')

    # backup
    backup_check, backup_dir, backup_regular, backup_cron_time = conf.get(
        "backup", "check", "dir", "regular", "cron_time")
    if backup_check == "1":
        logger.logger.info("开始记录备份信息...")
        dir_list = []
        for i in backup_dir.split(","):
            dir_list.append(i.strip())

        regular_list = []
        for i in backup_regular.split(","):
            regular_list.append(i.strip())

        cron_time_list = []
        for i in backup_cron_time.split(","):
            cron_time_list.append(i.strip())

        for i in range(len(dir_list)):
            directory = dir_list[i]
            regular = regular_list[i]
            cron_time = cron_time_list[i].split(":")
            hour = cron_time[0].strip()
            minute = cron_time[1].strip()
            scheduler.add_job(backup.record,
                              'cron',
                              args=[log_file, log_level, directory, regular],
                              next_run_time=datetime.datetime.now(),
                              day_of_week='0-6',
                              hour=int(hour),
                              minute=int(minute),
                              id=f'backup{i}')

    # 记录mysql
    mysql_check, mysql_interval, mysql_user, mysql_ip, mysql_port, mysql_password = conf.get(
        "mysql", "check", "mysql_interval", "mysql_user", "mysql_ip",
        "mysql_port", "mysql_password")
    if mysql_check == "1":
        if int(mysql_interval) < min_value:
            mysql_interval = min_value
        logger.logger.info("开始采集MySQL资源信息...")
        scheduler.add_job(mysql.record,
                          'interval',
                          args=[
                              log_file, log_level, mysql_user, mysql_ip,
                              mysql_password, mysql_port
                          ],
                          seconds=int(mysql_interval),
                          id='mysql_record')

    # 记录Oracle
    oracle_check, oracle_interval = conf.get("oracle", "check",
                                             "oracle_interval")
    if oracle_check == "1":
        if int(oracle_interval) < min_value:
            oracle_interval = min_value
        logger.logger.info("开始记录Oracle信息...")
        scheduler.add_job(oracle.record,
                          'interval',
                          args=[log_file, log_level],
                          seconds=int(oracle_interval),
                          id='oracle_record')

    # 记录并分析匹配
    matching_check, matching_files, matching_keys, matching_interal = conf.get(
        "matching", "check", "matching_files", "matching_keys",
        "matching_interval")
    if matching_check == "1":
        matching_min_value = 1
        if int(matching_interal) < matching_min_value:
            matching_check = matching_min_value
        logger.logger.info("开始采集匹配信息...")

        matching_dict = dict(
            zip([x.strip() for x in matching_files.split(",")],
                [x.strip() for x in matching_keys.split(",")]))
        record_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        db = database.db()
        for matching_file in matching_dict:
            if os.path.exists(matching_file):
                sql = "insert into matching values(?, ?, ?, ?, ?)"
                filesize = os.stat(matching_file)[6]
                db.update_one(sql,
                              (record_time, matching_file,
                               matching_dict[matching_file], "all", filesize))
            else:
                logger.logger.error(
                    f"Error: [matching]配置中文件{matching_file}不存在")
                matching_dict.pop(matching_file)
        scheduler.add_job(matching.matching_records,
                          'interval',
                          args=[log_file, log_level, matching_dict],
                          seconds=int(matching_interal),
                          id=f'matching')

    scheduler.start()