コード例 #1
0
def check_linux(tags, linux_params):
    check_time = now()
    host = linux_params['hostname']
    port = linux_params['port']
    # create connection
    linux_conn, _ = LinuxBase(linux_params).connection()
    if linux_conn:
        checklog.logger.info('{}:开始获取Linux主机监控信息'.format(tags))
        # get linuxstat data
        linuxstat = LinuxStat(linux_params, linux_conn).get_linux()
        hostinfo = linuxstat['hostinfo']
        cpuinfo = linuxstat['cpuinfo']
        memtotal = linuxstat['Memtotal']
        ipinfo = linuxstat['ipinfo']
        load = linuxstat['load']
        cpustat = linuxstat['cpu']
        iostat = linuxstat['iostat']
        memstat = linuxstat['mem']
        vmstat = linuxstat['vmstat']
        tcpstat = linuxstat['tcpstat']
        netstat = linuxstat['net']
        procstat = linuxstat['proc']

        # total network in/out
        recv_kbps = round(sum([d['recv'] for d in netstat]), 2)
        send_kbps = round(sum([d['send'] for d in netstat]), 2)
        # total io
        read_mb = round(sum([d['rd_m_s'] for d in iostat]), 2)
        write_mb = round(sum([d['wr_m_s'] for d in iostat]), 2)
        iops = round(sum([d['io_s'] for d in iostat]), 2)
        # cpu used percent
        cpu_used = round(100 - cpustat['cpu_idle'], 2)
        # memory used percent
        mem_used = round((float(memstat['mem_used_mb']) /
                          (float(memtotal['memtotal']) / 1024)) * 100, 2)

        insert_data_values = {
            **locals(),
            **hostinfo,
            **cpuinfo,
            **memtotal,
            **ipinfo,
            **load,
            **cpustat,
            **memstat,
            **vmstat,
            **tcpstat,
            **procstat
        }

        print("cpu_speed:")

        insert_data_sql = "insert into linux_stat(tags,host,port,hostname,ipinfo,linux_version,updays,kernel,frame,cpu_mode,cpu_cache,processor,cpu_speed," \
                          "recv_kbps,send_kbps,load1,load5,load15,cpu_sys,cpu_iowait,cpu_user,cpu_used,memtotal,mem_used,mem_cache,mem_buffer,mem_free,mem_used_mb," \
                          "swap_used,swap_free,swapin,swapout,pgin,pgout,pgfault,pgmjfault,tcp_close,tcp_timewait,tcp_connected,tcp_syn,tcp_listen,iops,read_mb,write_mb," \
                          "proc_new,proc_running,proc_block,intr,ctx,softirq,status,check_time) " \
                          "values ('{tags}','{host}',{port},'{hostname}','{ipinfo}','{linux_version}',{updays},'{kernel}','{frame}','{cpu_mode}','{cpu_cache}','{processor}','{cpu_speed}'," \
                          "{recv_kbps},{send_kbps},{load1},{load5},{load15},{cpu_sys},{cpu_iowait},{cpu_user},{cpu_used},{memtotal},{mem_used},{mem_cache},{mem_buffer},{mem_free},{mem_used_mb}," \
                          "{swap_used},{swap_free},{swapin},{swapout},{pgin},{pgout},{pgfault},{pgmjfault},{tcp_close},{tcp_timewait},{tcp_connected},{tcp_syn},{tcp_listen},{iops},{read_mb},{write_mb}," \
                          "{proc_new},{proc_running},{proc_block},{intr},{ctx},{softirq},0,'{check_time}')"

        clear_table(tags, 'linux_stat')
        insert_sql = insert_data_sql.format(**insert_data_values)
        mysql_exec(insert_sql)
        archive_table(tags, 'linux_stat')

        # disk free
        clear_table(tags, 'linux_disk')
        diskfree_list = LinuxStat(linux_params, linux_conn).get_diskfree()
        for each in diskfree_list:
            dev, total_size, used_size, free_size, used_percent, mount_point = each
            print("used_percent>>>")
            used_percent = float(used_percent.replace('%', ''))

            insert_data_sql = '''insert into linux_disk(tags,host,dev,total_size,used_size,free_size,used_percent,mount_point,check_time) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)'''

            values = (tags, host, dev, round(float(total_size) / 1024, 2),
                      round(float(used_size) / 1024,
                            2), round(float(free_size) / 1024,
                                      2), used_percent, mount_point, now())
            mysql_exec(insert_data_sql, values)
        archive_table(tags, 'linux_disk')

        # io stat
        clear_table(tags, 'linux_io_stat')
        for each in iostat:
            insert_data_sql = "insert into linux_io_stat(tags,host,dev,rd_s,rd_avgkb,rd_m_s,rd_mrg_s,rd_cnc,rd_rt,wr_s,wr_avgkb,wr_m_s,wr_mrg_s,wr_cnc,wr_rt,busy,in_prg,io_s,qtime,stime,check_time)" \
                              " values ('{tags}','{host}','{dev}',{rd_s},{rd_avgkb},{rd_m_s},{rd_mrg_s},{rd_cnc},{rd_rt},{wr_s},{wr_avgkb},{wr_m_s},{wr_mrg_s},{wr_cnc},{wr_rt},{busy},{in_prg},{io_s},{qtime},{stime},'{check_time}')"
            insert_data_values = {**locals(), **each}
            insert_sql = insert_data_sql.format(**insert_data_values)
            mysql_exec(insert_sql)
        archive_table(tags, 'linux_io_stat')
    else:
        error_msg = "{}:linux主机连接失败".format(tags)
        checklog.logger.error(error_msg)
        checklog.logger.info('{}:写入linux_stat采集数据'.format(tags))
        clear_table(tags, 'linux_stat')
        sql = "insert into linux_stat(tags,host,port,status,check_time) values('{tags}','{host}',{port},1,'{check_time}')"
        sql = sql.format(**locals())
        mysql_exec(sql)
        archive_table(tags, 'linux_stat')
コード例 #2
0
ファイル: check_mysql.py プロジェクト: zjh1015/db_monitor
def check_mysql(tags, mysql_params):
    check_time = now()
    host = mysql_params['host']
    port = mysql_params['port']
    linux_params = {
        'hostname': mysql_params['host'],
        'port': mysql_params['sshport_os'],
        'username': mysql_params['user_os'],
        'password': mysql_params['password_os']
    }
    # create connection
    mysql_conn = MysqlBase(mysql_params).connection()

    if mysql_conn:
        checklog.logger.info('{}:开始获取MySQL数据库监控信息'.format(tags))
        # get mysqlstat data
        mysqlstat = MySQLStat(mysql_params, mysql_conn)
        mysqlstat.init_stat_vals()
        mysqlstat.get_mysql_stat()
        time.sleep(1)
        mysqlstat.get_mysql_stat()
        mysqldata = mysqlstat.format_stat()
        # get mysql params
        mysqlparams = mysqlstat.get_mysql_params()
        #
        updays = round(float(mysqldata['uptime']) / 86400, 2)
        threads_waited = mysqlstat.get_threads_waited()
        total_rows, data_size, index_size = mysqlstat.get_totalsize()
        innodb_buffer_pool_read_requests = mysqldata[
            'innodb_buffer_pool_read_requests']
        innodb_buffer_pool_reads = mysqldata['innodb_buffer_pool_reads']
        innodb_buffer_pool_hit = (
            1 - innodb_buffer_pool_reads / innodb_buffer_pool_read_requests
        ) * 100 if innodb_buffer_pool_read_requests != 0 else 100

        checklog.logger.info('{}:写入mysql_stat采集数据'.format(tags))
        clear_table(tags, 'mysql_stat')

        insert_data_sql = "insert into mysql_stat(tags,host,port,version,updays,basedir,datadir,slow_query_log,slow_query_log_file,log_bin," \
                          "max_connections,max_connect_errors,total_rows,data_size,index_size,threads_connected,threads_running,threads_waited,threads_created,threads_cached," \
                          "qps,tps,bytes_received,bytes_sent,open_files_limit,open_files,table_open_cache,open_tables,key_buffer_size,sort_buffer_size,join_buffer_size," \
                          "key_blocks_unused,key_blocks_used,key_blocks_not_flushed,mysql_sel,mysql_ins,mysql_upd,mysql_del,select_scan,slow_queries," \
                          "key_read_requests,key_reads,key_write_requests,Key_writes,innodb_buffer_pool_size,innodb_buffer_pool_pages_total,innodb_buffer_pool_pages_data," \
                          "innodb_buffer_pool_pages_dirty,innodb_buffer_pool_pages_free,innodb_buffer_pool_hit,innodb_io_capacity," \
                          "innodb_read_io_threads,innodb_write_io_threads,innodb_rows_deleted,innodb_rows_inserted,innodb_rows_read,innodb_rows_updated," \
                          "innodb_row_lock_waits,innodb_row_lock_time_avg,innodb_buffer_pool_pages_flushed,innodb_data_read,innodb_data_written,innodb_data_reads," \
                          "innodb_data_writes,innodb_log_writes,innodb_data_fsyncs,innodb_os_log_written,status,check_time) " \
                          "values ('{tags}','{host}','{port}','{version}','{updays}','{basedir}','{datadir}','{slow_query_log}','{slow_query_log_file}','{log_bin}'," \
                          "{max_connections},{max_connect_errors},{total_rows},{data_size},{index_size},{threads_connected},{threads_running},{threads_waited},{threads_created},{threads_cached}," \
                          "{qps},{tps},{bytes_received},{bytes_sent},{open_files_limit},{open_files},{table_open_cache},{open_tables},{key_buffer_size},{sort_buffer_size},{join_buffer_size}," \
                          "{key_blocks_unused},{key_blocks_used},{key_blocks_not_flushed},{mysql_sel},{mysql_ins},{mysql_upd},{mysql_del},{select_scan},{slow_queries}," \
                          "{key_read_requests},{key_reads},{key_write_requests},{Key_writes},{innodb_buffer_pool_size},{innodb_buffer_pool_pages_total},{innodb_buffer_pool_pages_data}," \
                          "{innodb_buffer_pool_pages_dirty},{innodb_buffer_pool_pages_free},{innodb_buffer_pool_hit},{innodb_io_capacity}," \
                          "{innodb_read_io_threads},{innodb_write_io_threads},{innodb_rows_deleted},{innodb_rows_inserted},{innodb_rows_read},{innodb_rows_updated}," \
                          "{innodb_row_lock_waits},{innodb_row_lock_time_avg},{innodb_buffer_pool_pages_flushed},{innodb_data_read},{innodb_data_written},{innodb_data_reads}," \
                          "{innodb_data_writes},{innodb_log_writes},{innodb_data_fsyncs},{innodb_os_log_written},0,'{check_time}' )"

        insert_data_values = {**mysqldata, **mysqlparams, **locals()}
        insert_sql = insert_data_sql.format(**insert_data_values)
        mysql_exec(insert_sql)
        archive_table(tags, 'mysql_stat')

        # 后台日志解析
        get_mysql_alert(tags, mysql_params, linux_params)

        # 慢查询日志解析
        get_mysql_slowquery(tags, mysql_params, linux_params)
    else:
        error_msg = "{}:mysql数据库连接失败".format(tags)
        checklog.logger.error(error_msg)
        checklog.logger.info('{}:写入mysql_stat采集数据'.format(tags))
        clear_table(tags, 'mysql_stat')
        sql = "insert into mysql_stat(tags,host,port,status,check_time) values('{tags}','{host}',{port},1,'{check_time}')"
        sql = sql.format(**locals())
        mysql_exec(sql)
        archive_table(tags, 'mysql_stat')
コード例 #3
0
ファイル: check_oracle.py プロジェクト: zxb1987/db_monitor
def check_oracle(tags, oracle_params):
    db_version = oracle_params['db_version']
    host = oracle_params['host']
    port = oracle_params['port']
    service_name = oracle_params['service_name']
    linux_params = {
        'hostname': oracle_params['host'],
        'port': oracle_params['sshport_os'],
        'username': oracle_params['user_os'],
        'password': oracle_params['password_os']
    }
    check_time = now()
    db_conn = OracleBase(oracle_params).connection()
    db_conn_cdb = OracleBase(oracle_params).connection_cdb(
    ) if db_version == 'Oracle12c' else db_conn

    if db_conn and db_conn_cdb:
        # db信息监控
        checklog.logger.info('{}:开始获取Oracle数据库监控信息'.format(tags))
        # 数据库
        dbname, db_unique_name, database_role, open_mode, log_mode, dbid, flashback_on, platform, created = database_info(
            db_conn)
        # 容量
        datafile_size = round(get_datafile_size(db_conn)[0], 2)
        tempfile_size = round(get_tempfile_size(db_conn)[0], 2)
        archivelog_size = round(get_archivelog_size(db_conn)[0], 2)
        # 实例
        inst_id, instance_name, hostname, startup_time, version = instance_info(
            db_conn)
        updays = (datetime.now() - startup_time).days
        # 连接数
        max_process, current_process, process_used_percent = process_info(
            db_conn_cdb)
        # 归档
        archive_used_percent = get_archived(db_conn)
        # 审计
        audit_trail = para(db_conn, 'audit_trail')
        is_rac = para(db_conn, 'cluster_database')
        # 默认Undo表空间
        undo_tablespace = para(db_conn, 'undo_tablespace')
        # Oraclestat
        oraclestat = OracleStat(oracle_params, db_conn)
        oraclestat.get_oracle_stat()
        time.sleep(1)
        oracle_data = oraclestat.get_oracle_stat()
        # 状态数据
        oracle_osstat = oracle_data['os']
        oracle_stat = oracle_data['stat']
        oracle_wait = oracle_data['wait']
        oracle_sess = oracle_data['sess']
        oracle_mem = oracle_data['mem']
        oracle_load = oracle_data['load']
        # PGA使用率
        pga_target_size, pga_used_size, pga_used_percent = pga(db_conn)
        if database_role == 'PYSICAL STANDBY DATABASE':
            adg_trans_lag, adg_trans_value = adg_trans(db_conn)
            adg_apply_lag, adg_apply_value = adg_apply(db_conn)
        else:
            adg_trans_lag = 'None'
            adg_apply_lag = 'None'
            adg_trans_value = 0
            adg_apply_value = 0

        # 锁等待信息
        lock_wait_res = get_lockwait_count(db_conn)
        dic_lock_wait = {each[0]: each[1] for each in lock_wait_res}
        enq_tx_row_lock_contention = dic_lock_wait.get(
            'enq: TX - row lock contention', 0)
        enq_tm_contention = dic_lock_wait.get('enq: TM - contention', 0)
        row_cache_lock = dic_lock_wait.get('row cache lock', 0)
        library_cache_lock = dic_lock_wait.get('library cache lock', 0)
        enq_tx_contention = dic_lock_wait.get('enq: TX - contention', 0)
        lock_wait_others = sum(each[1] for each in lock_wait_res) - (
            enq_tx_row_lock_contention + enq_tm_contention + row_cache_lock +
            library_cache_lock + enq_tx_contention)

        checklog.logger.info('{}:写入oracle_stat采集数据'.format(tags))
        clear_table(tags, 'oracle_stat')

        insert_data_values = {
            **locals(),
            **oracle_osstat,
            **oracle_wait,
            **oracle_load,
            **oracle_sess,
            **oracle_stat,
            **oracle_mem
        }

        insert_data_sql = "insert into oracle_stat(tags,host,port,service_name,hostname,platform,num_cpus,physical_memory,inst_id,instance_name,db_version," \
                          "dbid,created,dbname,db_unique_name,database_role,open_mode,updays,audit_trail,log_mode,is_rac,undo_tablespace,flashback_on," \
                          "datafile_size,tempfile_size,archivelog_size," \
                          "archive_used_percent,max_process,current_process,process_used_percent,pga_target_size,pga_used_size," \
                          "pga_used_percent,pga_size,sga_size,memory_used_percent,logons_cumulative,qps,tps,exec_count,user_commits,user_rollbacks," \
                          "consistent_gets,logical_reads,physical_reads,physical_writes,block_changes,redo_size,redo_writes,total_parse_count," \
                          "hard_parse_count,bytes_received,bytes_sent,io_throughput,total_sessions,active_sessions,active_trans_sessions," \
                          "blocked_sessions,dbtime,dbcpu,log_parallel_write_wait,log_file_sync_wait,log_file_sync_count," \
                          "db_file_scattered_read_wait,db_file_scattered_read_count,db_file_sequential_read_wait,db_file_sequential_read_count," \
                          "row_lock_wait_count,enq_tx_row_lock_contention,enq_tm_contention,row_cache_lock,library_cache_lock,enq_tx_contention,lock_wait_others," \
                          "adg_trans_lag,adg_apply_lag,adg_trans_value,adg_apply_value,status,check_time) " \
                          "values('{tags}','{host}',{port},'{service_name}','{hostname}','{platform}',{num_cpus},{physical_memory},{inst_id},'{instance_name}','{version}'," \
                          "{dbid},'{created}','{dbname}','{db_unique_name}','{database_role}','{open_mode}',{updays},'{audit_trail}','{log_mode}','{is_rac}','{undo_tablespace}','{flashback_on}'," \
                          "{datafile_size},{tempfile_size},{archivelog_size}," \
                          "{archive_used_percent},{max_process},{current_process},{process_used_percent},{pga_target_size},{pga_used_size}," \
                          "{pga_used_percent},{pga_size},{sga_size},{memory_used_percent},{logons_cumulative},{qps},{tps},{exec_count},{user_commits},{user_rollbacks}," \
                          "{consistent_gets},{logical_reads},{physical_reads},{physical_writes},{block_changes},{redo_size},{redo_writes},{total_parse_count}," \
                          "{hard_parse_count},{bytes_received},{bytes_sent},{io_throughput},{total_sessions},{active_sessions},{active_trans_sessions}," \
                          "{blocked_sessions},{dbtime},{dbcpu},{log_parallel_write_wait},{log_file_sync_wait},{log_file_sync_count}," \
                          "{db_file_scattered_read_wait},{db_file_scattered_read_count},{db_file_sequential_read_wait},{db_file_sequential_read_count}," \
                          "{row_lock_wait_count},{enq_tx_row_lock_contention},{enq_tm_contention},{row_cache_lock},{library_cache_lock},{enq_tx_contention},{lock_wait_others}," \
                          "'{adg_trans_lag}','{adg_apply_lag}',{adg_trans_value},{adg_apply_value},0,'{check_time}' )"

        insert_sql = insert_data_sql.format(**insert_data_values)
        mysql_exec(insert_sql)
        checklog.logger.info(
            '{}:获取Oracle数据库监控数据(数据库名:{} 数据库角色:{} 数据库状态:{})'.format(
                tags, dbname, database_role, open_mode))
        archive_table(tags, 'oracle_stat')

        # control file
        clear_table(tags, 'oracle_controlfile')
        controlfile_list = get_controlfile(db_conn)
        for each in controlfile_list:
            control_name, size = each
            insert_data_sql = "insert into oracle_controlfile(tags,host,port,service_name,name,size,check_time)" \
                              "values('{tags}','{host}',{port},'{service_name}','{control_name}',{size},'{check_time}')"
            insert_sql = insert_data_sql.format(**locals())
            mysql_exec(insert_sql)

        # redolog
        clear_table(tags, 'oracle_redolog')
        redolog_list = get_redolog(db_conn)
        for each in redolog_list:
            group_no, thread_no, type, sequence_no, size, archived, status, member = each
            insert_data_sql = "insert into oracle_redolog(tags,host,port,service_name,group_no,thread_no,type,sequence_no," \
                              "size,archived,status,member,check_time)" \
                                  "values('{tags}','{host}',{port},'{service_name}',{group_no},{thread_no},'{type}',{sequence_no}," \
                              "{size},'{archived}','{status}','{member}','{check_time}')"
            insert_sql = insert_data_sql.format(**locals())
            mysql_exec(insert_sql)

        # 表空间
        clear_table(tags, 'oracle_tablespace')
        tbsinfo_list = tablespace(db_conn)
        for tbsinfo in tbsinfo_list:
            tablespace_name, datafile_count, total_size, free_size, used_size, max_free_size, percent_used, percent_free, used_mb = tbsinfo
            insert_data_sql = "insert into oracle_tablespace(tags,host,port,service_name,tablespace_name,datafile_count,total_size,free_size," \
                              "used_size,max_free_size,percent_used,percent_free,used_mb,check_time)" \
                              "values('{tags}','{host}',{port},'{service_name}','{tablespace_name}',{datafile_count},{total_size},{free_size}," \
                              "{used_size},{max_free_size},{percent_used},{percent_free},{used_mb},'{check_time}')"
            insert_sql = insert_data_sql.format(**locals())
            mysql_exec(insert_sql)
        archive_table(tags, 'oracle_tablespace')

        # 临时表空间
        clear_table(tags, 'oracle_temp_tablespace')
        temptbsinfo_list = temp_tablespace(db_conn)
        for temptbsinfo in temptbsinfo_list:
            temptablespace_name, total_size, used_size, percent_used = temptbsinfo
            insert_data_sql = "insert into oracle_temp_tablespace(tags,host,port,service_name,temptablespace_name,total_size,used_size," \
                              "percent_used,check_time)" \
                              "values('{tags}','{host}',{port},'{service_name}','{temptablespace_name}',{total_size}," \
                              "{used_size},{percent_used},'{check_time}')"
            insert_sql = insert_data_sql.format(**locals())
            mysql_exec(insert_sql)
        archive_table(tags, 'oracle_temp_tablespace')

        # undo表空间
        clear_table(tags, 'oracle_undo_tablespace')
        undotbsinfo_list = get_undo_tablespace(db_conn)
        for undotbsinfo in undotbsinfo_list:
            undotablespace_name, total_size, used_size, percent_used = undotbsinfo
            insert_data_sql = "insert into oracle_undo_tablespace(tags,host,port,service_name,undotablespace_name,total_size,used_size," \
                              "percent_used,check_time)" \
                              "values('{tags}','{host}',{port},'{service_name}','{undotablespace_name}',{total_size}," \
                              "{used_size},{percent_used},'{check_time}')"
            insert_sql = insert_data_sql.format(**locals())
            mysql_exec(insert_sql)
        archive_table(tags, 'oracle_undo_tablespace')

        # 统计信息分析
        clear_table(tags, 'oracle_table_stats')
        oracletablestats_list = get_tab_stats(db_conn)
        for each in oracletablestats_list:
            owner, table_name, num_rows, change_pct, last_analyzed = each
            insert_data_sql = "insert into oracle_table_stats(tags,host,port,service_name,owner,table_name,num_rows,change_pct,last_analyzed,check_time) " \
                              "values('{tags}','{host}',{port},'{service_name}','{owner}','{table_name}',{num_rows},{change_pct},'{last_analyzed}','{check_time}')"
            insert_sql = insert_data_sql.format(**locals())
            mysql_exec(insert_sql)

        # 后台日志解析
        get_oracle_alert(tags, db_conn, oracle_params, linux_params)

        db_conn.close()
    else:
        error_msg = "{}:数据库连接失败".format(tags)
        checklog.logger.error(error_msg)
        clear_table(tags, 'oracle_stat')
        checklog.logger.info('{}:写入oracle_stat采集数据'.format(tags))
        sql = "insert into oracle_stat(tags,host,port,service_name,status,check_time) values(%s,%s,%s,%s,%s,%s)"
        value = (tags, oracle_params['host'], oracle_params['port'],
                 oracle_params['service_name'], 1, check_time)
        mysql_exec(sql, value)
        archive_table(tags, 'oracle_stat')