def get_log_args(): db = database.db() sql_log_file = "select value from status where section='logs' and option='log_file'" sql_log_level = "select value from status where section='logs' and option='log_level'" log_file = db.query_one(sql_log_file)[0] log_level = db.query_one(sql_log_level)[0] return (log_file, log_level)
def matching_analysis(log_file, log_level, warning_interval, matching_dict, notify_dict): logger = log.Logger(log_file, log_level) db = database.db() for matching_file in matching_dict: sql = f"select record_time, matching_context from matching \ where record_time=( \ select max(record_time) from matching \ where matching_context!=? and matching_file=? and matching_key=?\ )" data = db.query_one( sql, ("all", matching_file, matching_dict[matching_file])) logger.logger.debug("分析匹配...") if data is not None: if data[1] != 'Nothing': warning_msg = f"\"{matching_file}\"文件中\"{data[1].strip()}\"行存在关键字\"{matching_dict[matching_file]}\"" msg = f"{matching_file}_{matching_dict[matching_file]}" warning_flag = warning.non_remedial_warning( logger, db, "matching", msg, warning_msg, data[0], warning_interval) if warning_flag: warning_msg = f"日志分析预警:\n{warning_msg}\n" notification.send(logger, warning_msg, notify_dict, msg=msg)
def master_slave_analysis(log_file, log_level, warning_interval, sender_alias, receive, subject): logger = log.Logger(log_file, log_level) db = database.db() sql = "select a.role, a.master_link_status, a.master_host from redis_slave as a,redis_role as b where a.record_time=b.record_time and a.role=b.role" data = db.query_one(sql) if data is not None: logger.logger.debug("开始分析Redis主从信息") if data[1] == "up" or data[1] == "online": flag = 0 else: flag = 1 else: flag = 0 warning_flag = warning.warning(logger, db, flag, "redis", "slave", warning_interval) if warning_flag: warning_msg = f"Redis预警:\nRedis slave无法连接master({data[2]})\n" mail.send(logger, warning_msg, sender_alias, receive, subject, msg=f'redis_slave')
def running_analysis(log_file, log_level, warning_interval, sender_alias, receive, subject): logger = log.Logger(log_file, log_level) logger.logger.debug("开始分析Tomcat运行情况...") db = database.db() """ sql="select record_time, port, pid from tomcat_constant where (port,record_time) in (select port,max(record_time) from tomcat_constant group by port)" sqlite3低版本不支持多列in查询, 无语... """ sql = "select port, pid from tomcat_constant where record_time=(select max(record_time) from tomcat_constant)" data = db.query_all(sql) for i in data: flag = 0 if i[1] == 0: flag = 1 warning_flag = warning.warning(logger, db, flag, i[0], "running", warning_interval) if warning_flag: warning_msg = f"Tomcat预警:\nTomcat({i[0]})未运行\n" mail.send(logger, warning_msg, sender_alias, receive, subject, msg=f'tomcat{i[0]}_running')
def master_slave_analysis(log_file, log_level, seconds_behind_master, warning_interval, sender_alias, receive, subject): logger = log.Logger(log_file, log_level) db = database.db() sql = "select role, slave_io_thread, slave_sql_thread, seconds_behind_master, slave_io_state, slave_sql_state from mysql_slave, mysql_role where mysql_role.record_time=mysql_slave.record_time" data = db.query_one(sql) conn_msg = "slave_conn" delay_msg = "slave_delay" if data is not None and data[0] == "slave": logger.logger.debug("开始分析MySQL主从信息") if data[1].lower() == data[2].lower() == "yes": conn_flag = 0 delay_flag = 1 if data[3] >= seconds_behind_master else 0 else: conn_flag = 1 delay_flag = None for flag, msg in [(conn_flag, conn_msg), (delay_flag, delay_msg)]: if flag is not None: warning_flag = warning.warning(logger, db, flag, "mysql", msg, warning_interval) if warning_flag: warning_msg="MySQL预警:\n"\ "MySQL主从连接:\n"\ f"Slave_IO_Running: {data[1]}\n"\ f"Slave_SQL_Running: {data[2]}\n"\ f"Slave_IO_State: {data[4]}\n"\ f"Slave_SQL_Running_State: {data[5]}\n"\ f"Seconds_Behind_Master: {data[3]}" mail.send(logger, warning_msg, sender_alias, receive, subject, msg=msg)
def config_to_db(config_file): """读取配置文件, 将配置写入db """ logger.logger.debug("开始读取配置文件...") db = database.db() sql = "select section, option from status where flag=1" config_init = db.query_all(sql) cfg = configparser.ConfigParser() cfg.read(config_file) config_list = [] for i in config_init: section = i[0] option = i[1] value = get_config(cfg, section, option) config_list.append((value, section, option)) try: logger.logger.debug("将配置文件写入数据库...") sql = "update status set value=? where section=? and option=?" db.update_all(sql, config_list) db.close() except Exception as e: print(f"Error: 配置文件未正常写入数据库({e})") exit()
def get(section, *option): db = database.db() values = [] for i in option: sql = "select value from status where section=? and option=?" values.append(db.query_one(sql, (section, i))[0]) return values
def jvm_analysis(log_file, log_level, warning_interval, sender_alias, receive, subject): logger = log.Logger(log_file, log_level) db = database.db() logger.logger.debug("开始分析Jvm内存情况...") java_version = db.query_one("select version from tomcat_java_version")[0] table_name = f"tomcat_jstat{java_version}" sql = f"select port, ygc, ygct, fgc, fgct from {table_name} where record_time=(select max(record_time) from {table_name})" data = db.query_all(sql) ygc_warning_time = 1 fgc_warning_time = 10 #ygc_warning_time=0.01 #fgc_warning_time=0 for i in data: port = i[0] if i[1] == 0: ygc_time = 0 else: ygc_time = i[2] / i[1] if i[3] == 0: fgc_time = 0 else: fgc_time = i[4] / i[3] ygc_flag = 0 if ygc_time >= ygc_warning_time: ygc_flag = 1 logger.logger.warning(f"Tomcat({port})的YGC平均时间: {ygc_time}") warning_flag = warning.warning(logger, db, ygc_flag, port, "ygc", warning_interval) if warning_flag: warning_msg = f"Tomcat预警:\nTomcat({port})YGC平均时间为{ygc_time}\n" mail.send(logger, warning_msg, sender_alias, receive, subject, msg=f'tomcat{port}_ygc') fgc_flag = 0 if fgc_time >= fgc_warning_time: fgc_flag = 1 logger.logger.warning(f"Tomcat({port})的FGC平均时间: {fgc_time}") warning_flag = warning.warning(logger, db, fgc_flag, port, "fgc", warning_interval) if warning_flag: warning_msg = f"Tomcat预警:\nTomcat({port})FGC平均时间为{fgc_time}\n" mail.send(logger, warning_msg, sender_alias, receive, subject, msg=f'tomcat{port}_fgc')
def cpu_record(log_file, log_level): logger = log.Logger(log_file, log_level) db = database.db() logger.logger.debug("记录cpu信息...") sql = "insert into cpu values(?, ?, ?)" record_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") cpu_count = psutil.cpu_count() cpu_used_percent = psutil.cpu_percent(interval=5) db.update_one(sql, (record_time, cpu_count, cpu_used_percent))
def memory_record(log_file, log_level): logger=log.Logger(log_file, log_level) db=database.db() logger.logger.debug("记录内存信息...") record_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") mem=psutil.virtual_memory() sql="insert into memory values(?, ?, ?, ?, ?, ?)" total, avail, used, used_percent, free=mem[0], mem[1], mem[3], mem[2], mem[4] db.update_one(sql, (record_time, total, avail, used, used_percent, free))
def swap_record(log_file, log_level): logger=log.Logger(log_file, log_level) db=database.db() logger.logger.debug("记录交换分区信息...") record_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") sql="insert into swap values(?, ?, ?, ?, ?)" swap_mem=psutil.swap_memory() total, used, used_percent, free=swap_mem[0], swap_mem[1], swap_mem[3], swap_mem[2] db.update_one(sql, (record_time, total, used, used_percent, free))
def running_analysis(log_file, log_level, warning_interval, notify_dict): logger=log.Logger(log_file, log_level) logger.logger.debug("开始分析Redis运行情况...") db=database.db() sql="select port, pid from redis_constant where record_time=(select max(record_time) from redis_constant)" port, pid=db.query_one(sql) flag= 1 if pid==0 else 0 # 是否预警 warning_flag=warning.warning(logger, db, flag, "redis", "running", warning_interval) if warning_flag: warning_msg=f"Redis预警:\nRedis({port})未运行" notification.send(logger, warning_msg, notify_dict, msg=f'redis_running')
def load_bitcoin_price(row): query = sql.SQL(""" INSERT INTO btc_price_data (time_stamp, id_date, open, high, low, close, volume) VALUES ({time}, {id_date}, {open}, {high}, {low}, {close}, {volume})""").format(time = sql.Literal(row['time']), id_date = sql.Literal(row['id_date']), open=sql.Literal(row['open']), high = sql.Literal(row['high']), low=sql.Literal(row['low']), close=sql.Literal(row['close']), volume = sql.Literal(row['volume'])) return db().execute(query)
def init(data_file, init_file): """数据初始化 """ os.makedirs(os.path.dirname(data_file), exist_ok=True) if os.path.exists(data_file): pass else: logger.logger.info("开始初始化数据.") db = database.db(data_file) with open(init_file, "r") as f: for sql in f.readlines(): db.update_one(sql) logger.logger.info("初始化数据完成.")
def clean_data(logger, keep_days): logger.logger.info("开始清理数据...") db = database.db() all_tables = db.query_all( "select name from sqlite_master where type='table'") now_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") for i in all_tables: table = i[0] sql = f"pragma table_info('{table}')" columns = db.query_all(sql) for j in columns: if j[1] == "record_time": sql = f"delete from {table} where record_time < datetime('{now_time}', '-{keep_days} day')" db.update_one(sql) logger.logger.info("结束清理数据...")
def analysis(log_file, log_level, warning_interval, sender_alias, receive, subject): logger = log.Logger(log_file, log_level) db = database.db() logger.logger.debug(f"分析用户的资源限制...") sql = "select user, nofile, nproc from users_limit where record_time=(select max(record_time) from users_limit)" data = db.query_all(sql) min_limit = 5000 for i in data: flag = 0 arg = "nofile" if i[1].isdigit(): if int(i[1]) < min_limit: flag = 1 cmd = f"echo '{i[0]} - {arg} 65536' >> /etc/security/limits.conf" warning_msg=f"用户资源限制预警:\n" \ f"用户({i[0]})的{arg}参数值({i[1]})过低.\n"\ f"请在root用户下执行命令: {cmd}, 然后重启登录该用户再重启该用户下相应软件" warning_flag = warning.warning(logger, db, flag, f"{i[0]}_limit", arg, warning_interval) if warning_flag: mail.send(logger, warning_msg, sender_alias, receive, subject, msg=f"{i[0]}_limit nofile") flag = 0 arg = "nproc" if i[2].isdigit(): if int(i[2]) < min_limit: flag = 1 cmd = f"echo '{i[0]} - {arg} 65536' >> /etc/security/limits.conf" warning_msg=f"用户资源限制预警:\n" \ f"用户({i[0]})的{arg}参数值({i[2]})过低.\n"\ f"请在root用户下执行命令: {cmd}, 然后重启登录该用户再重启该用户下相应软件" warning_flag = warning.warning(logger, db, flag, f"{i[0]}_nproc_limit", arg, warning_interval) if warning_flag: mail.send(logger, warning_msg, sender_alias, receive, subject, msg=f"{i[0]}_limit nproc")
def record(log_file, log_level, user): logger = log.Logger(log_file, log_level) logger.logger.info(f"记录用户{user}的资源限制...") record_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") cmd = f'su - {user} -c "ulimit -n -u"' (status, message) = subprocess.getstatusoutput(cmd) if status == 0: message = message.splitlines() nofile = message[0].split()[-1] nproc = message[1].split()[-1] db = database.db() sql = "insert into users_limit values(?, ?, ?, ?)" db.update_one(sql, (record_time, user, nofile, nproc)) else: logger.logger.error(f"命令'{cmd}'执行报错")
def memory_analysis(log_file, log_level, warning_percent, warning_interval, notify_dict): logger=log.Logger(log_file, log_level) db=database.db() sql="select record_time, used_percent from memory order by record_time desc" data=db.query_one(sql) mem_used_percent=float(data[1]) logger.logger.debug("分析Mem...") flag=0 # 是否有预警信息 if mem_used_percent > warning_percent: flag=1 logger.logger.warning(f"内存当前使用率当前已达到{mem_used_percent}%") warning_flag=warning.warning(logger, db, flag, "mem", "used_percent", warning_interval) if warning_flag: warning_msg=f"内存预警:\n内存当前使用率当前已达到{mem_used_percent}%" notification.send(logger, warning_msg, notify_dict, msg='mem_used_percent')
def disk_analysis(log_file, log_level, warning_percent, warning_interval, notify_dict): logger=log.Logger(log_file, log_level) db=database.db() sql=f"select record_time, name, used_percent, mounted from disk where record_time=(select max(record_time) from disk)" data=db.query_all(sql) logger.logger.debug("分析disk...") for i in data: flag=0 # 是否有预警信息 if i[2] >= warning_percent: flag=1 logger.logger.warning(f"{i[3]}目录({i[1]})已使用{i[2]}%") warning_flag=warning.warning(logger, db, flag, "disk", i[3], warning_interval) if warning_flag: warning_msg=f"磁盘预警:\n{i[3]}目录({i[1]})已使用{i[2]}%\n" notification.send(logger, warning_msg, notify_dict, msg=i[3])
def boot_time_record(log_file, log_level): logger = log.Logger(log_file, log_level) db = database.db() logger.logger.debug("记录服务器启动时间信息...") record_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") boot_time = datetime.datetime.fromtimestamp( psutil.boot_time()).strftime("%Y-%m-%d %H:%M:%S") """ # 判断启动时间是否有变化再插入 sql="select boot_time from boot_time order by record_time desc limit 1;" data=db.query_one(sql) if data is None or data[0]!=boot_time: sql="insert into boot_time values(?, ?)" db.update_one(sql, (record_time, boot_time)) """ sql = "insert into boot_time values(?, ?)" db.update_one(sql, (record_time, boot_time))
def cpu_analysis(log_file, log_level, warning_percent, warning_interval, notify_dict): logger=log.Logger(log_file, log_level) db=database.db() sql="select record_time, cpu_used_percent from cpu order by record_time desc" data=db.query_one(sql) cpu_used_percent=float(data[1]) logger.logger.debug("分析CPU...") flag=0 # 是否有预警信息 if cpu_used_percent >= warning_percent: flag=1 logger.logger.warning(f"CPU当前使用率已达到{cpu_used_percent}%") warning_flag=warning.warning(logger, db, flag, "cpu", "used_percent", warning_interval) if warning_flag: warning_msg=f"CPU预警:\nCPU使用率当前已达到{cpu_used_percent}%" notification.send(logger, warning_msg, notify_dict, msg='cpu_used_percent')
def tablespace_analysis(log_file, log_level, warning_percent, warning_interval, notify_dict): logger = log.Logger(log_file, log_level) db = database.db() sql = f"select tablespace_name, used_percent from oracle where record_time=(select max(record_time) from oracle)" data = db.query_all(sql) logger.logger.debug("分析表空间...") for i in data: flag = 0 # 是否有预警信息 if i[1] >= warning_percent: flag = 1 logger.logger.warning(f"{i[0]}表空间已使用{i[1]}%") warning_flag = warning.warning(logger, db, flag, "oracle", i[0], warning_interval) if warning_flag: warning_msg = f"Oracle表空间预警:\n{i[0]}表空间已使用{i[1]}%" notification.send(logger, warning_msg, notify_dict, msg=i[0])
def running_analysis(log_file, log_level, warning_interval, sender_alias, receive, subject): logger = log.Logger(log_file, log_level) logger.logger.debug("开始分析MySQL运行情况...") db = database.db() sql = "select port, pid from mysql_constant where record_time=(select max(record_time) from mysql_constant)" port, pid = db.query_one(sql) flag = 1 if pid == 0 else 0 # 是否预警 warning_flag = warning.warning(logger, db, flag, "mysql", "running", warning_interval) if warning_flag: warning_msg = f"MySQL预警:\nMySQL({port})未运行" mail.send(logger, warning_msg, sender_alias, receive, subject, msg=f'mysql_running')
def analysis(log_file, log_level, directory, warning_interval, notify_dict): """对备份文件进行预警 1. 备份目录不存在则提示 2. 当天的备份文件未生成则提示 3. 当天的备份文件小于上一个的大小的99%则提示 """ logger = log.Logger(log_file, log_level) db = database.db() logger.logger.info(f"分析备份目录{directory}文件...") sql = "select record_time, directory, filename, size, ctime from backup where directory=? order by record_time, ctime desc limit 2" data = db.query_all(sql, (directory, )) now_time = datetime.datetime.now().strftime("%Y-%m-%d") flag = 0 # 是否有预警信息 value = None if len(data) < 2: if data[0][2] is None: flag = 1 value = "dir_is_None" warning_msg = f"备份预警:\n备份目录({directory})不存在" else: if now_time not in data[0][4]: flag = 1 warning_msg = f"备份预警:\n备份目录({directory})当天备份文件未生成" value = "file_is_None" else: if now_time not in data[0][4]: flag = 1 warning_msg = f"备份预警:\n备份目录({directory})当天备份文件未生成" value = "file_is_None" elif data[0][3] < data[1][3] * 0.99: flag = 1 warning_msg = f"备份预警:\n备份目录({directory})当天备份文件({format_size(data[0][3])})与上一次({format_size(data[1][3])})相比相差较大" value = "file_is_small" warning_flag = warning.warning(logger, db, flag, f"backup {directory}", value, warning_interval) if warning_flag: notification.send(logger, warning_msg, notify_dict, msg=f"{directory} {value}")
def disk_record(log_file, log_level): logger=log.Logger(log_file, log_level) db=database.db() logger.logger.debug("记录磁盘信息...") record_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") disk_list=[] all_disk=psutil.disk_partitions() for i in all_disk: disk_name=i[0] mounted=i[1] size=psutil.disk_usage(mounted) total=size[0] used=size[1] used_percent=size[3] free=size[2] disk_list.append((record_time, disk_name, total, used, used_percent, free, mounted)) sql="insert into disk values(?, ?, ?, ?, ?, ?, ?)" db.update_all(sql, disk_list)
def record(log_file, log_level, directory, regular): logger=log.Logger(log_file, log_level) logger.logger.info(f"记录备份目录{directory}的信息...") record_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") backup_info=[] if os.path.exists(directory): flag=0 for i in os.listdir(directory): filename=f"{directory}/{i}" if os.path.isfile(filename) and filename.endswith(regular): size=os.path.getsize(filename) ctime=datetime.datetime.fromtimestamp(os.path.getctime(filename)).strftime("%Y-%m-%d %H:%M:%S") backup_info.append((record_time, directory, i, size, ctime)) flag=1 if flag==0: backup_info.append((record_time, directory, None, None, None)) else: backup_info.append((record_time, directory, None, None, None)) db=database.db() delete_sql="delete from backup where directory=?" db.update_one(delete_sql, [directory]) sql="insert into backup values(?, ?, ?, ?, ?)" db.update_all(sql, backup_info)
def record(log_file, log_level): logger = log.Logger(log_file, log_level) logger.logger.debug("记录表空间信息") db = database.db() record_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") # 表空间 sql = """ set heading off trimspool on feedback off pagesize 0 linesize 1000 SELECT a.tablespace_name , a.bytes, ( a.bytes - b.bytes ), b.bytes, Round(( ( a.bytes - b.bytes ) / a.bytes ) * 100, 2) FROM (SELECT tablespace_name, SUM(bytes) bytes FROM dba_data_files GROUP BY tablespace_name) a, (SELECT tablespace_name, SUM(bytes) bytes, Max(bytes) largest FROM dba_free_space GROUP BY tablespace_name) b WHERE a.tablespace_name = b.tablespace_name ORDER BY ( ( a.bytes - b.bytes ) / a.bytes ) DESC; """ cmd = f"su - oracle -c 'sqlplus -S / as sysdba <<EOF\n{sql}\nEOF'" (status, message) = subprocess.getstatusoutput(cmd) if status == 0: data_list = [] for i in message.splitlines(): i = i.split() data_list.append((record_time, i[0], i[1], i[2], i[4], i[3])) sql = "insert into oracle values(?, ?, ?, ?, ?, ?)" db.update_all(sql, data_list) else: sql = "insert into error value(?, ?, ?, ?, ?)" db.update_one(sql, (record_time, 'Oracle', 'connect', '无法连接Oracle', 0))
def main(): # 初始化. 判断是否存数据文件, 若不存在则初始化; 若存在则正常启动 data_file = "./data/auto.db" if os.path.exists(data_file): pass else: init_file = "./share/init.sql" os.makedirs(os.path.dirname(data_file), exist_ok=True) logger.logger.info("开始初始化数据.") db = database.db(data_file) with open(init_file, "r") as f: for sql in f.readlines(): db.update_one(sql) logger.logger.info("初始化数据完成.") """ try: db=database.db(data_file) with open(init_file, "r") as f: for sql in f.readlines(): db.update_one(sql) logger.logger.info("初始化数据完成.") except Exception as e: print(f"Error: 初始化失败({e})") exit() """ config_to_db("./conf/autocheck.conf") daemonize('logs/autocheck.pid', rootdir) check_item = [record.record, show.show, analysis.analysis, clean.clean] gevent_list = [] for i in check_item: g = gevent.spawn(i, ) gevent_list.append(g) gevent.joinall(gevent_list)
import ConfigParser from StringIO import StringIO import os import random from models.messages import PostForm from decorator import login_required from lib import s3 from lib.database import db from lib import form from lib import pagination flash_config = ConfigParser.ConfigParser() flash_config.read('config/flash.cfg') database = db() database.init_db() home_blueprint = Blueprint('home', __name__, template_folder='templates', static_folder='static') msgs_in_each_page = pagination.msgs_in_each_page html_escape_table = { "&": "&", '"': """, "'": "'", ">": ">", "<": "<", } def html_escape(text):
def resource_show(hostname, check_dict, granularity_level, sender_alias, receive, subject): log_file, log_level = log.get_log_args() logger = log.Logger(log_file, log_level) db = database.db() now_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") modifier = "-24 hour" message = "" # 重置统计文件 report_dir = "report" shutil.rmtree(report_dir, ignore_errors=True) os.makedirs(report_dir, exist_ok=True) logger.logger.info("统计资源记录信息...") printf(f"统计开始时间: {now_time}") printf(f"主机名: {hostname}") printf("-" * 100) # 系统启动时间 sql = "select boot_time from boot_time order by record_time desc" boot_time = db.query_one(sql)[0] printf(f"系统启动时间: {boot_time}") printf("*" * 100) # 磁盘 logger.logger.info("统计Disk记录信息...") printf("磁盘统计:") sql = "select distinct mounted from disk" disk_names = db.query_all(sql) disk_granularity_level = int(60 / int(check_dict['host_check'][0]) * granularity_level) disk_granularity_level = disk_granularity_level if disk_granularity_level != 0 else 1 for i in disk_names: i = i[0] table = pt.PrettyTable( ["记录时间", "挂载点", "磁盘名称", "磁盘大小", "已使用大小", "已使用百分比", "可用"]) sql=f"select record_time, name, total, used, used_percent, avail from disk "\ f"where mounted=? "\ f"and record_time > datetime('{now_time}', '{modifier}') "\ f"order by record_time" disk_data = db.query_all(sql, (i, )) for index, item in enumerate(disk_data): if index % disk_granularity_level == 0 or index == 0: total = format_size(item[2]) used = format_size(item[3]) used_percent = f"{item[4]}%" avail = format_size(item[5]) table.add_row( (item[0], i, item[1], total, used, used_percent, avail)) printf(f"{i}磁盘统计:") printf(table) printf("*" * 100) # CPU logger.logger.info("统计CPU记录信息...") printf("CPU统计:") cpu_granularity_level = int(60 / int(check_dict['host_check'][1]) * granularity_level) cpu_granularity_level = cpu_granularity_level if cpu_granularity_level != 0 else 1 table = pt.PrettyTable(["记录时间", "CPU核心数", "CPU使用率"]) sql=f"select record_time, cpu_count, cpu_used_percent from cpu "\ f"where record_time > datetime('{now_time}', '{modifier}') "\ f"order by record_time" cpu_data = db.query_all(sql) for index, item in enumerate(cpu_data): if index % cpu_granularity_level == 0 or index == 0: used_percent = f"{item[2]}%" table.add_row((item[0], item[1], used_percent)) printf(table) printf("*" * 100) # MEM logger.logger.info("统计Mem记录信息...") printf("内存统计:") mem_granularity_level = int(60 / int(check_dict['host_check'][2]) * granularity_level) mem_granularity_level = mem_granularity_level if mem_granularity_level != 0 else 1 table = pt.PrettyTable( ["记录时间", "内存大小", "可用(avail)", "已使用", "已使用百分比", "剩余(free)"]) sql=f"select record_time, total, avail, used, used_percent, free from memory "\ f"where record_time > datetime('{now_time}', '{modifier}') "\ f"order by record_time" mem_data = db.query_all(sql) for index, item in enumerate(mem_data): if index % mem_granularity_level == 0 or index == 0: total = format_size(item[1]) avail = format_size(item[2]) used = format_size(item[3]) used_percent = f"{item[4]}%" free = format_size(item[5]) table.add_row((item[0], total, avail, used, used_percent, free)) printf(table) printf("*" * 100) # Swap logger.logger.info("统计Swap记录信息...") printf("Swap统计:") swap_granularity_level = int(60 / int(check_dict['host_check'][3]) * granularity_level) swap_granularity_level = swap_granularity_level if swap_granularity_level != 0 else 1 table = pt.PrettyTable(["记录时间", "Swap大小", "已使用", "已使用百分比", "剩余"]) sql=f"select record_time, total, used, used_percent, free from swap "\ f"where record_time > datetime('{now_time}', '{modifier}') "\ f"order by record_time" swap_data = db.query_all(sql) for index, item in enumerate(swap_data): if index % swap_granularity_level == 0 or index == 0: total = format_size(item[1]) used = format_size(item[2]) used_percent = f"{item[3]}%" free = format_size(item[4]) table.add_row((item[0], total, used, used_percent, free)) printf(table) printf("*" * 100) # Tomcat if check_dict["tomcat_check"][0] == "1": logger.logger.info("统计Tomcat记录信息...") printf("Tomcat统计:") tomcat_granularity_level = int( 60 / int(check_dict['tomcat_check'][1]) * granularity_level) tomcat_granularity_level = tomcat_granularity_level if tomcat_granularity_level != 0 else 1 version = db.query_one("select version from tomcat_java_version")[0] printf(f"Java版本: {version}") printf("*" * 100) #sql="select distinct port from tomcat_constant" #tomcat_ports=db.query_all(sql) tomcat_ports = conf.get("tomcat", "tomcat_port")[0].split(",") tomcat_constant_data = [] for i in tomcat_ports: port = int(i.strip()) constant_sql=f"select record_time, pid, port, boot_time, cmdline from tomcat_constant "\ f"where port=? "\ f"and '{now_time}' >= record_time "\ f"order by record_time desc" variable_sql=f"select record_time, pid, men_used, mem_used_percent, connections, threads_num from tomcat_variable "\ f"where port=? "\ f"and record_time > datetime('{now_time}', '{modifier}') "\ f"order by record_time" if version == "8": jvm_sql=f"select record_time, S0, S1, E, O, M, CCS, YGC, YGCT, FGC, FGCT, GCT from tomcat_jstat8 "\ f"where port=? "\ f"and record_time > datetime('{now_time}', '{modifier}') "\ f"order by record_time" jvm_table = pt.PrettyTable([ "记录时间", "S0", "S1", "E", "O", "M", "CCS", "YGC", "YGCT", "FGC", "FGCT", "GCT" ]) elif version == "7": jvm_sql=f"select record_time, S0, S1, E, O, P, YGC, YGCT, FGC, FGCT, GCT from tomcat_jstat7 "\ f"where port=? "\ f"and record_time > datetime('{now_time}', '{modifier}') "\ f"order by record_time" jvm_table = pt.PrettyTable([ "记录时间", "S0", "S1", "E", "O", "P", "YGC", "YGCT", "FGC", "FGCT", "GCT" ]) constant_table = pt.PrettyTable( ["记录时间", "Pid", "端口", "启动时间", "启动参数"]) tomcat_constant_data = (db.query_one(constant_sql, (port, ))) constant_table.add_row(tomcat_constant_data) variable_table = pt.PrettyTable( ["记录时间", "Pid", "内存使用", "内存使用率", "连接数", "线程数"]) tomcat_variable_data = (db.query_all(variable_sql, (port, ))) for index, item in enumerate(tomcat_variable_data): if index % tomcat_granularity_level == 0 or index == 0: mem_used = format_size(item[2]) mem_used_percent = f"{item[3]:.2f}%" variable_table.add_row( (item[0], item[1], mem_used, mem_used_percent, item[4], item[5])) tomcat_jvm_data = (db.query_all(jvm_sql, (port, ))) for index, item in enumerate(tomcat_jvm_data): if index % tomcat_granularity_level == 0 or index == 0: jvm_table.add_row(item) printf(f"Tomcat({port})统计信息:") printf("启动信息:") printf(constant_table) printf("运行信息:") printf(variable_table) printf("Jvm内存信息:") printf(jvm_table) printf("*" * 100) # Redis if check_dict["redis_check"][0] == "1": logger.logger.info("统计Redis记录信息...") printf("Redis统计:") redis_granularity_level = int(60 / int(check_dict['redis_check'][1]) * granularity_level) redis_granularity_level = redis_granularity_level if redis_granularity_level != 0 else 1 printf("*" * 100) constant_sql=f"select record_time, pid, port, boot_time from redis_constant "\ f"where '{now_time}' >= record_time "\ f"order by record_time desc" variable_sql=f"select record_time, pid, mem_used, mem_used_percent, connections, threads_num from redis_variable "\ f"where record_time > datetime('{now_time}', '{modifier}') "\ f"order by record_time" # 启动信息 constant_table = pt.PrettyTable(["记录时间", "Pid", "端口", "启动时间"]) constant_data = (db.query_one(constant_sql)) constant_table.add_row(constant_data) # 运行信息 variable_table = pt.PrettyTable( ["记录时间", "Pid", "内存使用", "内存使用率", "连接数", "线程数"]) variable_data = (db.query_all(variable_sql)) for index, item in enumerate(variable_data): if index % tomcat_granularity_level == 0 or index == 0: mem_used = format_size(item[2]) mem_used_percent = f"{item[3]:.2f}%" variable_table.add_row((item[0], item[1], mem_used, mem_used_percent, item[4], item[5])) # master_slave信息 role = db.query_one("select role from redis_role")[0] if role == "master": master_slave_sql = "select a.record_time, connected_slave, slave_ip, slave_port, slave_state from redis_master a ,redis_slaves_info b on a.record_time=b.record_time where a.record_time=(select max(record_time) from redis_master)" master_slave_table = pt.PrettyTable( ["记录时间", "Slave数量", "Slave IP", "Slave端口", "Slave状态"]) master_slave_data = (db.query_all(master_slave_sql)) for i in master_slave_data: master_slave_table.add_row(i) elif role == "slave": master_slave_sql = "select record_time, pid, master_host, master_port, master_link_status from redis_slave order by record_time desc" master_slave_table = pt.PrettyTable( ["记录时间", "Pid", "master主机", "master端口", "与master连接状态"]) master_slave_data = (db.query_one(master_slave_sql)) master_slave_table.add_row(master_slave_data) # sentinel监控信息 sentinel_sql = "select a.record_time, role, host, a.port from redis_sentinel a, redis_constant b on a.record_time=b.record_time where b.record_time=(select max(record_time) from redis_constant)" sentinel_table = pt.PrettyTable(["记录时间", "角色", "IP", "端口"]) sentinel_data = (db.query_all(sentinel_sql)) for i in sentinel_data: sentinel_table.add_row(i) printf("启动信息:") printf(constant_table) printf("运行信息:") printf(variable_table) printf("集群信息:") printf(f"当前角色: {role}") printf(master_slave_table) printf("Sentinel监控信息:") printf(sentinel_table) printf("*" * 100) # backup if check_dict["backup_check"] == "1": logger.logger.info("统计备份记录信息...") printf("备份统计:") backup_dirs = conf.get("backup", "dir")[0].split(",") for i in backup_dirs: directory = i.strip() table = pt.PrettyTable(["记录时间", "备份文件", "大小", "创建时间"]) sql=f"select record_time, filename, size, ctime from backup "\ f"where directory=?"\ f"order by ctime" backup_data = db.query_all(sql, (directory, )) for j in backup_data: if j[2] is not None: size = format_size(j[2]) table.add_row((j[0], j[1], size, j[3])) printf(f"备份({directory})统计信息:") printf(table) printf("*" * 100) # MySQL if check_dict["mysql_check"][0] == "1": logger.logger.info("统计MySQL记录信息...") printf("MySQL统计:") mysql_granularity_level = int(60 / int(check_dict['mysql_check'][1]) * granularity_level) mysql_granularity_level = mysql_granularity_level if mysql_granularity_level != 0 else 1 printf("*" * 100) constant_sql=f"select record_time, pid, port, boot_time from mysql_constant "\ f"where '{now_time}' >= record_time "\ f"order by record_time desc" variable_sql=f"select record_time, pid, mem_used, mem_used_percent, connections, threads_num from mysql_variable "\ f"where record_time > datetime('{now_time}', '{modifier}') "\ f"order by record_time" # 启动信息 constant_table = pt.PrettyTable(["记录时间", "Pid", "端口", "启动时间"]) constant_data = (db.query_one(constant_sql)) constant_table.add_row(constant_data) # 运行信息 variable_table = pt.PrettyTable( ["记录时间", "Pid", "内存使用", "内存使用率", "连接数", "线程数"]) variable_data = (db.query_all(variable_sql)) for index, item in enumerate(variable_data): if index % mysql_granularity_level == 0 or index == 0: mem_used = format_size(item[2]) mem_used_percent = f"{item[3]:.2f}%" variable_table.add_row((item[0], item[1], mem_used, mem_used_percent, item[4], item[5])) # master_slave信息 role = db.query_one("select role from mysql_role")[0] if role == "master": master_slave_sql = "select record_time, pid, slave_num, binlog_do_db, binlog_ignore_db from mysql_master order by record_time desc" master_slave_table = pt.PrettyTable( ["记录时间", "Pid", "Slave数量", "Binlog_do_db", "Binlog_ignore_db"]) elif role == "slave": master_slave_sql="select record_time, pid, master_host, master_port, replicate_do_db, replicate_ignore_db, "\ "slave_io_thread, slave_io_state, slave_sql_thread, slave_sql_state, "\ "master_uuid, retrieved_gtid_set, executed_gtid_set, seconds_behind_master "\ "from mysql_slave order by record_time desc" master_slave_table = pt.PrettyTable([ "记录时间", "Pid", "Master主机", "Master端口", "同步数据库", "非同步数据库", "Slave_IO线程", "Slave_IO状态", "Slave_SQL线程", "Slave_SQL状态", "Master_UUID", "已接收的GTID集合", "已执行的GTID集合", "Slave落后Master的秒数" ]) master_slave_data = (db.query_one(master_slave_sql)) if master_slave_data is not None: master_slave_table.add_row(master_slave_data) printf("启动信息:") printf(constant_table) printf("运行信息:") printf(variable_table) printf("集群信息:") printf(f"当前角色: {role}") printf(master_slave_table) printf("*" * 100) # 慢日志 printf("慢日志信息:") mysql_user, mysql_ip, mysql_port, mysql_password = conf.get( "mysql", "mysql_user", "mysql_ip", "mysql_port", "mysql_password") mysql_flag, msg = mysql.export_slow_log( logger, mysql_user, mysql_ip, mysql_password, mysql_port, f"{report_dir}/slow_analysis.log", f"{report_dir}/slow.log") if mysql_flag == 1: message = f"该附件存在MySQL慢日志" printf(msg) printf("*" * 100) # Oracle表空间 if check_dict["oracle_check"][0] == "1": logger.logger.info("统计Oracle表空间记录信息...") printf("Oracle表空间统计:") oracle_granularity_level = int( 60 / int(check_dict['oracle_check'][1]) * granularity_level) oracle_granularity_level = oracle_granularity_level if oracle_granularity_level != 0 else 1 sql = "select distinct tablespace_name from oracle" tablespace_names = db.query_all(sql) for i in tablespace_names: i = i[0] table = pt.PrettyTable( ["记录时间", "表空间名称", "表空间大小", "已使用", "已使用百分比", "可用"]) sql=f"select record_time, size, used, used_percent, free from oracle "\ f"where tablespace_name=? "\ f"and record_time > datetime('{now_time}', '{modifier}') "\ f"order by record_time" tablespace_data = db.query_all(sql, (i, )) for index, item in enumerate(tablespace_data): if index % oracle_granularity_level == 0 or index == 0: total = format_size(item[1]) used = format_size(item[2]) used_percent = f"{item[3]}%" free = format_size(item[4]) table.add_row( (item[0], i, total, used, used_percent, free)) printf(f"{i}表空间统计:") printf(table) printf("*" * 100) # war logger.logger.info("生成awr报告...") printf("awr报告信息:") awr_hours = conf.get("oracle", "awr_hours")[0] if oracle.generate_awr(int(awr_hours), report_dir) == 0: printf("请在附件中查看awr.html文件") else: printf("生成awr报告失败, 请自行手动生成") logger.logger.info("统计资源结束...") printf("-" * 100) end_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") printf(f"统计结束时间: {end_time}") tar_file = tar_report(logger, report_dir) sender_alias, receive, subject = conf.get("mail", "sender", "receive", "subject") warning_msg = f"\n请查看统计报告.\n\n{message}" mail.send(logger, warning_msg, sender_alias, receive, subject, msg="report", attachment_file=tar_file)
from flask import Blueprint, render_template, request, redirect, url_for, session, flash import hashlib import time import ConfigParser from models.users import LoginForm from decorator import login_not_required from lib.database import db from lib import form from lib import google_openid flash_config = ConfigParser.ConfigParser() flash_config.read('config/flash.cfg') database = db() database.init_db() login_blueprint = Blueprint('login', __name__, template_folder='templates', static_folder='static') @login_blueprint.route("/google_login") def google_login(): auth_uri = google_openid.flow_step_1() return redirect(auth_uri) @login_blueprint.route("/oauth2cb") def oauth2cb():