Beispiel #1
0
def sqlserver_performance(database):
    query = "\nselect COUNTER_NAME,CNTR_VALUE from sys.dm_os_performance_counters where\n(object_name like '%sql statistics%' and counter_name = 'batch requests/sec') or\n(object_name like '%sql statistics%' and counter_name = 'sql compilations/sec') or\n(object_name like '%sql statistics%' and counter_name = 'sql re-compilations/sec') or\n(object_name like '%buffer manager%' and counter_name = 'lazy writes/sec') or\n(object_name like '%buffer manager%' and counter_name = 'page life expectancy') or\n(object_name like '%memory manager%' and counter_name = 'connection memory (kb)') or\n(object_name like '%memory manager%' and counter_name = 'memory grants pending') or\n(object_name like '%memory manager%' and counter_name = 'sql cache memory (kb)') or\n(object_name like '%memory manager%' and counter_name = 'target server memory (kb)') or\n(object_name like '%memory manager%' and counter_name = 'total server memory (kb)') or\n(object_name like '%access methods%' and counter_name = 'full scans/sec') or\n(object_name like '%access methods%' and counter_name = 'forwarded records/sec') or\n(object_name like '%access methods%' and counter_name = 'mixed page allocations/sec') or\n(object_name like '%access methods%' and counter_name = 'page splits/sec') or\n(object_name like '%access methods%' and counter_name = 'table lock escalations/sec') or\n(object_name like '%general statistics%' and counter_name = 'logins/sec') or\n(object_name like '%general statistics%' and counter_name = 'logouts/sec') or\n(object_name like '%general statistics%' and counter_name = 'user connections') or\n(object_name like '%general statistics%' and counter_name = 'processes blocked') or\n(object_name like '%latches%' and counter_name = 'latch waits/sec') or\n(object_name like '%latches%' and counter_name = 'average latch wait time (ms)') or\n(object_name like '%access methods%' and counter_name = 'workfiles created/sec') or\n(object_name like '%access methods%' and counter_name = 'worktables created/sec') or\n(object_name like '%general statistics%' and counter_name = 'active temp tables') or\n(object_name like '%general statistics%' and counter_name = 'temp tables creation rate') or\n(object_name like '%general statistics%' and counter_name = 'temp tables for destruction') or\n(object_name like '%databases%' and counter_name ='active transactions' and instance_name = '_Total') or\n(object_name like '%databases%' and counter_name ='Transactions/sec' and instance_name = '_Total') or\n(object_name like '%databases%' and counter_name ='log flushes/sec' and instance_name = '_Total') or\n(object_name like '%databases%' and counter_name ='cache hit ratio' and instance_name = '_Total') or\n(object_name like '%SQLServer:Locks%' and counter_name like '%Lock%' and instance_name = '_Total')"
    match_patern = re.compile('/sec', re.IGNORECASE)
    date_current = get_10s_time()
    flag, json_data_current = run_sql(database, query)
    if not flag:
        print(str(build_exception_from_java(json_data_current)))
        return
    key = str(database.id) + ':performance'
    date_key = str(database.id) + ':performance_date'
    json_data_str_prev = redis.get(key)
    date_prev = redis.get(date_key)
    keys = ['COUNTER_NAME', 'CNTR_VALUE']
    redis.set(key, json.dumps(json_data_current))
    redis.set(date_key, str(date_current))
    if json_data_str_prev and date_prev and (
            date_current - to_date(date_prev)).total_seconds() < MAX_INTERVAL:
        json_data_prev = json.loads(json_data_str_prev)
        for idx, obj in enumerate(json_data_current):
            name = obj.get(keys[0])
            value = obj.get(keys[1])
            p = Performance()
            p.name = name
            p.database = database
            p.created_at = date_current
            if re.search(match_patern, name):
                p.value = round(
                    (float(value) - float(json_data_prev[idx].get(keys[1]))) /
                    INTERVAL, 1)
            else:
                p.value = float(value)
            p.save()
Beispiel #2
0
def oracle_activity(database):
    if database.version == '10':
        query = "\n            select /*+ leading(b a)*/\n                a.inst_id,\n                SESSION_ID sid,\n                SESSION_SERIAL# serial,\n                SESSION_ID || ',' || SESSION_SERIAL# || '@'|| a.inst_id SESSION_ID,\n                (select username from dba_users u where u.user_id = a.user_id) username,\n                '' machine,\n                program,\n                --status,\n                case SQL_OPCODE\n                    when 1 then 'CREATE TABLE'\n                    when 2 then 'INSERT'\n                    when 3 then 'SELECT'\n                    when 6 then 'UPDATE'\n                    when 7 then 'DELETE'\n                    when 9 then 'CREATE INDEX'\n                    when 11 then 'ALTER INDEX'\n                    when 15 then 'ALTER INDEX' else 'Others' end command,\n                SQL_ID,\n                SQL_PLAN_HASH_VALUE,\n                nvl(event, 'ON CPU') event,\n                p1,\n                p2,\n                p3,\n                nvl(wait_class, 'ON CPU') wait_class ,\n                module,\n                action,\n                (select name from V$ACTIVE_SERVICES s where s.NAME_HASH = a.SERVICE_HASH) service_name,\n                '' plsql_object_name,\n                '' plsql_entry_object_name,\n                BLOCKING_SESSION,\n                BLOCKING_SESSION_SERIAL# BLOCKING_SESSION_SERIAL,\n                null SQL_PLAN_LINE_ID,\n                '' SQL_PLAN_OPERATION,\n                SESSION_TYPE,\n                (select SQL_TEXT from v$sql b where b.sql_id = a.sql_id and rownum =1) SQL_TEXT\n            from gv$ACTIVE_SESSION_HISTORY a\n            where a.SAMPLE_TIME between systimestamp - numtodsinterval(2,'SECOND') and systimestamp - numtodsinterval(1,'SECOND')\n            and nvl(a.wait_class,'ON CPU') <> 'Idle'"
    else:
        if database.version >= '11':
            query = "\n            select /*+ leading(b a)*/\n                a.inst_id,\n                SESSION_ID sid,\n                SESSION_SERIAL# serial,\n                SESSION_ID || ',' || SESSION_SERIAL# || '@'|| a.inst_id SESSION_ID,\n                round((cast(sample_time as date)-a.sql_exec_start)*24*3600) SQL_ELAPSED_TIME,\n                (select username from dba_users u where u.user_id = a.user_id) username,\n                machine,\n                program,\n                --status,\n                case SQL_OPCODE\n                    when 1 then 'CREATE TABLE'\n                    when 2 then 'INSERT'\n                    when 3 then 'SELECT'\n                    when 6 then 'UPDATE'\n                    when 7 then 'DELETE'\n                    when 9 then 'CREATE INDEX'\n                    when 11 then 'ALTER INDEX'\n                    when 15 then 'ALTER INDEX' else 'Others' end command,\n                SQL_ID,\n                SQL_PLAN_HASH_VALUE,\n                nvl(event, 'ON CPU') event,\n                p1,\n                p2,\n                p3,\n                nvl(wait_class, 'ON CPU') wait_class,\n                module,\n                action,\n                (select name from V$ACTIVE_SERVICES s where s.NAME_HASH = a.SERVICE_HASH) SERVER_NAME ,\n                -- (select object_name from dba_objects s where s.object_id = a.PLSQL_OBJECT_ID) plsql_object_name,\n                -- (select object_name from dba_objects s where s.object_id = a.PLSQL_ENTRY_OBJECT_ID) plsql_entry_object_name,\n                '' plsql_object_name,\n                '' plsql_entry_object_name,\n                BLOCKING_SESSION,\n                BLOCKING_SESSION_SERIAL# BLOCKING_SESSION_SERIAL,\n                SQL_PLAN_LINE_ID,\n                SQL_PLAN_OPERATION || ' ' || SQL_PLAN_OPTIONS SQL_PLAN_OPERATION,\n                SESSION_TYPE,\n                (select sql_fulltext from v$sql b where b.sql_id = a.sql_id and rownum =1) SQL_TEXT\n            from gv$ACTIVE_SESSION_HISTORY a\n            where a.SAMPLE_TIME between systimestamp - numtodsinterval(2,'SECOND') and systimestamp - numtodsinterval(1,'SECOND')\n            and nvl(a.wait_class,'ON CPU') <> 'Idle'\n        "
    ash_date = get_10s_time()
    flag, json_data = run_sql(database, query)
    if not flag:
        print(str(build_exception_from_java(json_data)))
        return
    for x in json_data:
        ash = Oracle_ASH()
        ash.inst_id = x.get('INST_ID')
        ash.sid = x.get('SID')
        ash.serial = x.get('SERIAL')
        ash.username = x.get('USERNAME')
        ash.db_name = x.get('USERNAME')
        ash.machine = x.get('MACHINE')
        ash.program = x.get('PROGRAM')
        ash.status = x.get('STATUS')
        ash.command = x.get('COMMAND')
        ash.sql_hash_value = x.get('SQL_HASH_VALUE')
        ash.sql_id = x.get('SQL_ID')
        ash.sql_text = x.get('SQL_TEXT')
        ash.sql_plan_hash_value = x.get('SQL_PLAN_HASH_VALUE')
        ash.event = x.get('EVENT')
        ash.p1 = x.get('P1')
        ash.p2 = x.get('P2')
        ash.p3 = x.get('P3')
        ash.wait_class = x.get('WAIT_CLASS')
        ash.module = x.get('MODULE')
        ash.action = x.get('ACTION')
        ash.service_name = x.get('SERVICE_NAME')
        ash.plsql_object_name = x.get('PLSQL_OBJECT_NAME')
        ash.plsql_entry_object_name = x.get('PLSQL_ENTRY_OBJECT_NAME')
        ash.blocking_session = x.get('BLOCKING_SESSION')
        ash.blocking_session_serial = x.get('BLOCKING_SESSION_SERIAL')
        ash.sql_plan_line_id = x.get('SQL_PLAN_LINE_ID')
        ash.sql_plan_operation = x.get('SQL_PLAN_OPERATION')
        ash.session_type = x.get('SESSION_TYPE')
        ash.session_id = x.get('SESSION_ID')
        ash.sql_elapsed_time = x.get('SQL_ELAPSED_TIME')
        ash.created_at = ash_date
        ash.database = database
        try:
            ash.save()
        except Exception as e:
            logger.error(str(e))

    warn = WARN_ENUM.get(database.db_type).Active_Session_Warn
    p = Performance(inst_id=database.db_name,
                    name=warn.name,
                    value=len(json_data),
                    created_at=ash_date)
    customized_warn_scanner(warn, p, database, False)
Beispiel #3
0
def db2_activity(database):
    padding_str = '_v97' if database.is_v97() else ''
    query1 = "\n    SELECT distinct rtrim(app.db_name) DB_NAME,\n                app.agent_id,\n                app.appl_id,\n                app.appl_name,\n                app.appl_status,\n                app.authid,\n                t.activity_type,\n  (select cast(p.stmt_text as varchar(2000)) from  table(mon_get_pkg_cache_stmt(NULL, t.executable_id, NULL, -2)) as p FETCH FIRST 1 ROWS ONLY) stmt_text,\n                hex(t.EXECUTABLE_ID) EXECUTABLE_ID,\n                uow.ELAPSED_TIME_SEC,\n                round(uow.TOTAL_CPU_TIME/1000000) TOTAL_CPU_TIME,\n                uow.TOTAL_ROWS_READ,\n                uow.TOTAL_ROWS_RETURNED\nFROM table(wlm_get_workload_occurrence_activities(NULL, -2)) as t,\n     sysibmadm.applications app,\n     SYSIBMADM.MON_CURRENT_UOW uow\nWHERE\n  app.agent_id = t.application_handle\n  and t.application_handle = uow.application_handle\n  and app.appl_id != (values application_id())\n  and app.appl_status not in ('CONNECTED',\n                              'UOWWAIT')"
    query1_v97_base = "\n        SELECT\n        distinct rtrim(app.db_name) DB_NAME, app.agent_id, app.appl_id, app.appl_name, app.appl_status, app.authid,\n        t.activity_type, cast(p.stmt_text as varchar(2000)) stmt_text, hex(t.EXECUTABLE_ID) EXECUTABLE_ID\n    FROM table(wlm_get_workload_occurrence_activities_v97(NULL, -2)) as t,\n         table(mon_get_pkg_cache_stmt(NULL, NULL, NULL, -2)) as p,\n         sysibmadm.applications app\n    WHERE t.executable_id = p.executable_id\n        and app.agent_id = t.application_handle\n        and app.appl_id != (values application_id())\n        and app.appl_status not in ('CONNECTED','UOWWAIT')"
    query1_v97 = "\n    SELECT distinct rtrim(app.db_name) DB_NAME,\n                app.agent_id,\n                app.appl_id,\n                app.appl_name,\n                app.appl_status,\n                app.authid,\n                t.activity_type,\n  (select cast(p.stmt_text as varchar(2000)) from  table(mon_get_pkg_cache_stmt(NULL, t.executable_id, NULL, -2)) as p FETCH FIRST 1 ROWS ONLY) stmt_text,\n                hex(t.EXECUTABLE_ID) EXECUTABLE_ID,\n                uow.ELAPSED_TIME_SEC,\n                round(uow.TOTAL_CPU_TIME/1000000) TOTAL_CPU_TIME,\n                uow.TOTAL_ROWS_READ,\n                uow.TOTAL_ROWS_RETURNED\nFROM table(wlm_get_workload_occurrence_activities_v97(NULL, -2)) as t,\n     sysibmadm.applications app,\n     SYSIBMADM.MON_CURRENT_UOW uow\nWHERE\n  app.agent_id = t.application_handle\n  and t.application_handle = uow.application_handle\n  and app.appl_id != (values application_id())\n  and app.appl_status not in ('CONNECTED',\n                              'UOWWAIT')"
    query2 = "\n    SELECT\n        app.db_name, app.agent_id, app.appl_id, app.appl_name, app.appl_status, app.authid,\n        t.activity_type, (select VALUE from table(WLM_GET_ACTIVITY_DETAILS(t.application_handle,t.uow_id,t.activity_id,-2)) where name = 'STMT_TEXT') STMT_TEXT\n    FROM table(wlm_get_workload_occurrence_activities(cast(null as bigint), -1)) as t,\n         sysibmadm.applications app\n    WHERE app.agent_id = t.application_handle\n        and app.appl_id != (values application_id())\n        and app.appl_status not in ('CONNECTED','UOWWAIT')"
    ash_date = get_10s_time()
    if not database.is_v95_base():
        if database.is_v97():
            flag, json_data = run_sql(database, query1_v97)
            if not flag:
                flag, json_data = run_sql(database, query1_v97_base)
            else:
                flag, json_data = run_sql(database, query1)
    else:
        flag, json_data = run_sql(database, query2)
    if not flag:
        print(str(build_exception_from_java(json_data)))
        return
    for x in json_data:
        ash = DB2_ASH()
        ash.db_name = x.get('AUTHID').strip()
        ash.session_id = x.get('AGENT_ID')
        ash.machine = x.get('APPL_ID')
        ash.program = x.get('APPL_NAME')
        ash.appl_status = x.get('APPL_STATUS')
        ash.username = x.get('AUTHID').strip()
        ash.command = x.get('ACTIVITY_TYPE')
        ash.sql_text = x.get('STMT_TEXT')
        if not database.is_v95_base():
            ash.sql_id = x.get('EXECUTABLE_ID')
            ash.sql_elapsed_time = x.get('ELAPSED_TIME_SEC')
            ash.total_cpu_time = x.get('TOTAL_CPU_TIME')
            ash.rows_read = x.get('TOTAL_ROWS_READ')
            ash.rows_returned = x.get('TOTAL_ROWS_RETURNED')
        else:
            ash.sql_id = gen_sql_id(
                x.get('STMT_TEXT')) if x.get('STMT_TEXT') else None
        ash.created_at = ash_date
        ash.database = database
        ash.save()

    warn = WARN_ENUM.get(database.db_type).Active_Session_Warn
    p = Performance(inst_id=database.db_name,
                    name=warn.name,
                    value=len(json_data),
                    created_at=ash_date)
    customized_warn_scanner(warn, p, database, False)
Beispiel #4
0
def db2_performance(database):
    query = '\n    select\n    TOTAL_CONS, APPLS_CUR_CONS, APPLS_IN_DB2, LOCKS_WAITING, NUM_ASSOC_AGENTS, ACTIVE_SORTS,\n    LOCKS_HELD, LOCK_WAITS,\n    TOTAL_SORTS, SORT_OVERFLOWS,\n    POOL_DATA_L_READS, POOL_TEMP_DATA_L_READS, POOL_INDEX_L_READS, POOL_TEMP_INDEX_L_READS, POOL_XDA_L_READS, POOL_TEMP_XDA_L_READS, POOL_DATA_P_READS, POOL_TEMP_DATA_P_READS,\n    POOL_INDEX_P_READS, POOL_TEMP_INDEX_P_READS, POOL_XDA_P_READS, POOL_TEMP_XDA_P_READS,\n    POOL_DATA_WRITES, POOL_INDEX_WRITES, POOL_XDA_WRITES,\n    DIRECT_READS, DIRECT_WRITES,\n    COMMIT_SQL_STMTS, ROLLBACK_SQL_STMTS, DYNAMIC_SQL_STMTS, STATIC_SQL_STMTS, FAILED_SQL_STMTS, SELECT_SQL_STMTS, UID_SQL_STMTS, DDL_SQL_STMTS,\n    ROWS_DELETED, ROWS_INSERTED, ROWS_UPDATED, ROWS_SELECTED, ROWS_READ,\n    LOG_READS, LOG_WRITES\n    from sysibmadm.snapdb'
    stats_list_realtime = [
        'APPLS_CUR_CONS', 'LOCKS_HELD', 'APPLS_IN_DB2', 'LOCKS_WAITING',
        'NUM_ASSOC_AGENTS', 'ACTIVE_SORTS'
    ]
    stats_list_delta = [
        'TOTAL_CONS', 'LOCK_WAITS', 'TOTAL_SORTS', 'SORT_OVERFLOWS',
        'POOL_DATA_L_READS', 'POOL_TEMP_DATA_L_READS', 'POOL_INDEX_L_READS',
        'POOL_TEMP_INDEX_L_READS', 'POOL_XDA_L_READS', 'POOL_TEMP_XDA_L_READS',
        'POOL_DATA_P_READS', 'POOL_TEMP_DATA_P_READS', 'POOL_INDEX_P_READS',
        'POOL_TEMP_INDEX_P_READS', 'POOL_XDA_P_READS', 'POOL_TEMP_XDA_P_READS',
        'POOL_DATA_WRITES', 'POOL_INDEX_WRITES', 'POOL_XDA_WRITES',
        'DIRECT_READS', 'DIRECT_WRITES', 'COMMIT_SQL_STMTS',
        'ROLLBACK_SQL_STMTS', 'DYNAMIC_SQL_STMTS', 'STATIC_SQL_STMTS',
        'FAILED_SQL_STMTS', 'SELECT_SQL_STMTS', 'UID_SQL_STMTS',
        'DDL_SQL_STMTS', 'ROWS_DELETED', 'ROWS_INSERTED', 'ROWS_UPDATED',
        'ROWS_SELECTED', 'ROWS_READ', 'LOG_READS', 'LOG_WRITES'
    ]
    date_current = get_10s_time()
    flag, json_data_current = run_sql(database, query)
    if not flag or not json_data_current:
        print(str(build_exception_from_java(json_data_current)))
        return
    json_data_current = json_data_current[0]
    key = str(database.id) + ':performance'
    date_key = str(database.id) + ':performance_date'
    json_data_str_prev = redis.get(key)
    date_prev = redis.get(date_key)
    redis.setex(key, MAX_INTERVAL, json.dumps(json_data_current))
    redis.setex(date_key, MAX_INTERVAL, str(date_current))
    if json_data_str_prev:
        if date_prev:
            if (date_current -
                    to_date(date_prev)).total_seconds() < MAX_INTERVAL:
                json_data_prev = json.loads(json_data_str_prev)
                for key, value in json_data_current.items():
                    p = Performance()
                    p.name = key
                    p.created_at = date_current
                    p.database = database
                    if key in stats_list_realtime:
                        p.value = value
                    else:
                        if key in stats_list_delta:
                            p.value = (float(value) - float(
                                json_data_prev.get(key))) / INTERVAL
                    p.save()
Beispiel #5
0
def get_sqlserver_activity(databases):
    query = "\n            SELECT /* sample_query */\n            req.SESSION_ID,\n            convert(varchar(25), req.START_TIME, 120) START_TIME,\n            req.STATUS,\n            req.COMMAND,\n            (select name from master..sysdatabases where dbid = req.database_id) DB_NAME,\n            ses.LOGIN_NAME,\n            ses.HOST_NAME,\n            ses.PROGRAM_NAME,\n            req.BLOCKING_SESSION_ID,\n            req.WAIT_TYPE,\n            req.WAIT_TIME,\n            req.WAIT_RESOURCE,\n            req.TOTAL_ELAPSED_TIME,\n            req.ROW_COUNT,\n            sqltext.TEXT SQLTEXT,\n            substring(sys.fn_sqlvarbasetostr(req.sql_handle),3,1000) SQL_HANDLE,\n            con.CLIENT_NET_ADDRESS,\n            case when req.wait_resource like '%SPID%' then SUBSTRING(wait_resource, 1, CHARINDEX(' ', wait_resource)-1) else '' end LINKED_IP,\n            cast(case when req.wait_resource like '%SPID%' then SUBSTRING(wait_resource, CHARINDEX('=', wait_resource)+1, CHARINDEX(')', wait_resource)-CHARINDEX('=', wait_resource)-1) else '0' end as int) LINKED_SPID,\n            DATEDIFF(SECOND, req.START_TIME, getdate()) TIME\n            FROM sys.dm_exec_requests req\n            inner join sys.dm_exec_sessions ses on req.session_id = ses.session_id\n            inner join sys.dm_exec_connections con on ses.session_id = con.session_id\n            CROSS APPLY sys.dm_exec_sql_text(sql_handle) AS sqltext\n            where sqltext.TEXT not like '%sample_query%'"
    ash_date = get_10s_time()
    result_set = {}
    db_set = {}
    for db in databases:
        flag, json_data = run_sql(db, query)
        if not flag:
            print(str(build_exception_from_java(json_data)))
            continue
            result_set[str(db.id)] = json_data
            db_set[str(db.id)] = db

    for db_id, ash_data in result_set.items():
        for x in ash_data:
            ash = MSSQL_ASH()
            ash.session_id = x.get('SESSION_ID')
            ash.start_time = x.get('START_TIME')
            ash.status = x.get('STATUS').upper()
            ash.command = x.get('COMMAND')
            ash.db_name = x.get('DB_NAME')
            ash.username = x.get('LOGIN_NAME')
            ash.machine = x.get('HOST_NAME')
            ash.program = x.get('PROGRAM_NAME')
            ash.b_blocker = x.get('BLOCKING_SESSION_ID')
            ash.wait_type = x.get('WAIT_TYPE')
            ash.wait_time = x.get('WAIT_TIME')
            ash.wait_resource = x.get('WAIT_RESOURCE')
            ash.total_elapsed_time = x.get('TOTAL_ELAPSED_TIME')
            ash.row_count = x.get('ROW_COUNT')
            ash.sql_text = x.get('SQLTEXT')
            ash.sql_id = x.get('SQL_HANDLE')
            ash.client_net_address = x.get('CLIENT_NET_ADDRESS')
            ash.linked_ip = x.get('LINKED_IP')
            ash.linked_spid = x.get('LINKED_SPID')
            ash.sql_elapsed_time = x.get('TIME')
            ash.created_at = ash_date
            ash.database = db_set.get(db_id)
            ash.save()

        database = db_set.get(db_id)
        warn = WARN_ENUM.get(database.db_type).Active_Session_Warn
        p = Performance(inst_id=database.db_name,
                        name=warn.name,
                        value=len(ash_data),
                        created_at=ash_date)
        customized_warn_scanner(warn, p, database, False)
Beispiel #6
0
def mysql_performance(database):
    query = "show global status where VARIABLE_NAME in (\n        'Queries', 'Questions','Com_delete','Com_insert','Com_select','Com_update',\n        'Bytes_received','Bytes_sent',\n        'Thread_connected','Connections',\n        'Select_full_join', 'Select_full_range_join', 'Select_range', 'Select_range_check', 'Select_scan',\n        'Sort_merge_passes','Sort_scan','Sort_range','Sort_rows',\n        'Created_tmp_disk_tables','Created_tmp_files','Created_tmp_tables',\n        'Innodb_data_writes','Innodb_log_writes','Innodb_os_log_written',\n        'Innodb_rows_read','Innodb_rows_inserted','Innodb_rows_updated','Innodb_rows_deleted',\n        'Innodb_data_reads','Innodb_buffer_pool_read_requests'\n    )"
    none_delta_stats = 'Thread_connected'
    date_current = get_10s_time()
    flag, json_data_current = run_sql(database, query)
    if not flag:
        print(str(build_exception_from_java(json_data_current)))
        return
    key = str(database.id) + ':performance'
    date_key = str(database.id) + ':performance_date'
    json_data_str_prev = redis.get(key)
    date_prev = redis.get(date_key)
    k1, k2 = json_data_current[0].keys()
    if re.search('name', k1, re.IGNORECASE):
        keys = [k1, k2]
    else:
        keys = [k2, k1]
    redis.setex(key, MAX_INTERVAL, json.dumps(json_data_current))
    redis.setex(date_key, MAX_INTERVAL, str(date_current))
    if json_data_str_prev and date_prev and (
            date_current - to_date(date_prev)).total_seconds() < MAX_INTERVAL:
        json_data_prev = json.loads(json_data_str_prev)
        for idx, obj in enumerate(json_data_current):
            name = obj.get(keys[0])
            value = obj.get(keys[1])
            p = Performance()
            p.name = name
            p.database = database
            p.created_at = date_current
            if name in none_delta_stats:
                if not value:
                    print('value is None' + keys[1])
                p.value = float(value)
            else:
                p.value = round(
                    (float(value) - float(json_data_prev[idx].get(keys[1]))) /
                    INTERVAL, 1)
            p.save()
Beispiel #7
0
def mysql_activity(database):
    query = "SELECT * FROM information_schema.processlist\n        WHERE command != 'Sleep' and id != CONNECTION_ID()\n        and state not in\n          ('Master has sent all binlog to slave; waiting for binlog to be up','Slave has read all relay log; waiting for the slave I/O thread t','Waiting for master to send event')\n        ORDER BY id"
    state_list = [
        ('optimizing', 'preparing', 'statistics'),
        ('copy to tmp table', 'Copying to tmp table',
         'Copying to tmp table on disk', 'Creating tmp table',
         'removing tmp table'),
        ('Opening table', 'Opening tables', 'Reopen tables', 'Checking table',
         'closing tables', 'creating table', 'discard_or_import_tablespace',
         'Flushing tables'),
        ('Copying to group table', 'Sorting for group', 'Sorting for order',
         'Sorting index', 'Sorting result'),
        ('update', 'updating', 'updating main table',
         'updating reference tables', 'deleting from main table',
         'deleting from reference tables'),
        ('System lock', 'User lock', 'Waiting for commit lock',
         'Waiting for global read lock', 'Waiting for event metadata lock',
         'Waiting for schema metadata lock',
         'Waiting for stored function metadata lock',
         'Waiting for stored procedure metadata lock',
         'Waiting for table level lock', 'Waiting for table metadata lock',
         'Waiting for trigger metadata lock'),
        ('checking privileges on cached query',
         'checking query cache for query', 'invalidating query cache entries',
         'sending cached result to client', 'storing result in query cache',
         'Waiting for query cache lock'),
        ('Reading from net', 'Writing to net', 'Sending data'),
        ('Finished reading one binlog; switching to next binlog',
         'Sending binlog event to slave',
         'Master has sent all binlog to slave; waiting for binlog to be up',
         ' Waiting to finalize termination',
         'Waiting to finalize termination'),
        ('Waiting for master update', 'Connecting to master',
         'Checking master version', 'Registering slave on master',
         'Requesting binlog dump',
         'Waiting to reconnect after a failed binlog dump request',
         'Reconnecting after a failed binlog dump request',
         'Waiting for master to send event',
         'Queueing master event to the relay log',
         'Waiting to reconnect after a failed master event read',
         'Reconnecting after a failed master event read',
         'Waiting for the slave SQL thread to free enough relay log space',
         'Waiting for slave mutex on exit', 'Waiting for its turn to commit'),
        ('Making temp file', 'Waiting for the next event in relay log',
         'Reading event from the relay log',
         'Slave has read all relay log; waiting for the slave I/O thread t',
         'Waiting for slave mutex on exit')
    ]
    wait_classses = [
        'Optimization', 'Tmp Table', 'Table Operation', 'Sort',
        'Update/Delete', 'Lock', 'Query Cache', 'Network', 'Master Thread',
        'I/O Thread', 'SQL Thread', 'Others'
    ]
    ash_date = get_10s_time()
    flag, json_data = run_sql(database, query)
    if not flag:
        print(str(build_exception_from_java(json_data)))
        return
    for x in json_data:
        ash = MySQL_ASH()
        ash.session_id = x.get('ID')
        ash.username = x.get('USER')
        ash.machine = x.get('HOST')
        ash.db_name = x.get('DB')
        ash.command = x.get('COMMAND')
        ash.sql_elapsed_time = x.get('TIME')
        ash.state = x.get('STATE')
        ash.sql_text = x.get('INFO')
        ash.sql_id = gen_sql_id(x.get('INFO')) if x.get('INFO') else None
        ash.created_at = ash_date
        ash.database = database
        others_flag = True
        for idx, val in enumerate(state_list):
            if ash.state in val:
                ash.wait_class = wait_classses[idx]
                others_flag = False
                break

        if others_flag:
            ash.wait_class = wait_classses[len(wait_classses) - 1]
        ash.save()

    warn = WARN_ENUM.get(database.db_type).Active_Session_Warn
    p = Performance(inst_id=database.db_name,
                    name=warn.name,
                    value=len(json_data),
                    created_at=ash_date)
    customized_warn_scanner(warn, p, database, False)
Beispiel #8
0
def oracle_performance(database):
    query = {
        'stats':
        "\nselect inst_id, name, value\nfrom\n    (\n        select ss.inst_id\n        ,      sn.name\n        ,      ss.value\n        from   v$statname sn\n        ,      gv$sysstat  ss\n        where  sn.statistic# = ss.statistic#\n        and    sn.name in (\n        'execute count', 'logons cumulative',\n        'parse count (hard)', 'parse count (total)', 'parse count (failures)',\n        'physical read total IO requests', 'physical read total bytes',\n        'physical write total IO requests', 'physical write total bytes',\n        'redo size', --'session cursor cache hits',\n        'session logical reads', 'user calls', 'user commits', 'user rollbacks','logons current',\n        'gc cr blocks received','gc current blocks received',\n        'gc cr block receive time', 'gc current block receive time')\n        union all\n        select inst_id\n        ,      STAT_NAME\n        ,      VALUE\n        from gv$osstat\n        where STAT_NAME in ('BUSY_TIME','IDLE_TIME')\n        union all\n        select\n          (select min(INSTANCE_NUMBER) from gv$instance),\n          'SCN GAP Per Minute',\n          current_scn\n        from v$database\n    )\norder by 1,2",
        'wait':
        "\n        select inst_id, event, TIME_WAITED, TOTAL_WAITS\n        from gv$system_event\n        where\n        event in (\n            'log file sync',\n            'log file parallel write',\n            'db file parallel write',\n            'db file sequential read',\n            'db file scattered read',\n            'direct path read',\n            'direct path read temp'\n            )\n            order by 1,2",
        'dg':
        "\n        select\n            INST_ID,\n            NAME,\n            VALUE\n        from gv$dataguard_stats\n        where name in ('apply lag','transport lag')"
    }
    stats_list = {
        'session cursor cache hits': 'session cursor cache hits',
        'BUSY_TIME': 'Host CPU Utilization (%)',
        'physical write total IO requests':
        'Physical Write IO Requests Per Sec',
        'physical write total bytes': 'Physical Write Total Bytes Per Sec',
        'physical read total IO requests': 'Physical Read IO Requests Per Sec',
        'physical read total bytes': 'Physical Read Total Bytes Per Sec',
        'SCN GAP Per Minute': 'SCN GAP Per Minute',
        'execute count': 'Executions Per Sec',
        'logons cumulative': 'Logons Per Sec',
        'logons current': 'Session Count',
        'parse count (failures)': 'Parse Failure Count Per Sec',
        'parse count (hard)': 'Hard Parse Count Per Sec',
        'parse count (total)': 'Total Parse Count Per Sec',
        'redo size': 'Redo Generated Per Sec',
        'session logical reads': 'Logical Reads Per Sec',
        'user rollbacks': 'User Rollbacks Per Sec',
        'user calls': 'User Calls Per Sec',
        'user commits': 'User Commits Per Sec',
        'gc cr blocks received': 'GC CR Block Received Per Second',
        'gc current blocks received': 'GC Current Block Received Per Second',
        'gc cr block receive time': 'Global Cache Average CR Get Time',
        'gc current block receive time':
        'Global Cache Average Current Get Time'
    }
    none_delta_stats = ('BUSY_TIME', 'IDLE_TIME', 'gc cr block receive time',
                        'gc current block receive time', 'logons current')
    date_current = get_10s_time()
    if not database.dg_stats:
        query.pop('dg')
    flag, json_data = run_batch_sql(database, query)
    if not flag:
        print(str(build_exception_from_java(json_data)))
        return
    json_data_1_current = json_data.get('stats')
    json_data_2_current = json_data.get('wait')
    json_data_dg = json_data.get('dg')
    key1 = str(database.id) + ':performance1'
    key2 = str(database.id) + ':performance2'
    date_key = str(database.id) + ':performance_date'
    json_data_str_1 = redis.get(key1)
    json_data_str_2 = redis.get(key2)
    date_prev = redis.get(date_key)
    keys1 = ['NAME', 'VALUE']
    keys2 = ['EVENT', 'TIME_WAITED', 'TOTAL_WAITS']
    redis.setex(key1, MAX_INTERVAL, json.dumps(json_data_1_current))
    redis.setex(key2, MAX_INTERVAL, json.dumps(json_data_2_current))
    redis.setex(date_key, MAX_INTERVAL, str(date_current))
    if json_data_str_1:
        if json_data_str_2:
            if date_prev:
                if (date_current -
                        to_date(date_prev)).total_seconds() < MAX_INTERVAL:
                    json_data_1_prev = json.loads(json_data_str_1)
                    json_data_2_prev = json.loads(json_data_str_2)
                    for idx, obj in enumerate(json_data_1_current):
                        name = obj.get(keys1[0])
                        if name == 'IDLE_TIME':
                            continue
                        value = obj.get(keys1[1])
                        inst_id = obj.get('INST_ID')
                        p = Performance()
                        p.name = stats_list.get(name)
                        p.database = database
                        p.created_at = date_current
                        p.inst_id = inst_id
                        delta = float(value) - float(json_data_1_prev[idx].get(
                            keys1[1]))
                        total = 1
                        if name in none_delta_stats:
                            if name in 'BUSY_TIME':
                                total = delta + float(
                                    json_data_1_current[idx + 1].get(
                                        keys1[1])) - float(
                                            json_data_1_prev[idx + 1].get(
                                                keys1[1]))
                                value = round(
                                    delta / (total if total != 0 else 1) * 100,
                                    1)
                            else:
                                if name in ('gc cr block receive time',
                                            'gc current block receive time'):
                                    delta = 10.0 * delta
                                    total = float(
                                        json_data_1_current[idx + 1].get(
                                            keys1[1])) - float(
                                                json_data_1_prev[idx + 1].get(
                                                    keys1[1]))
                                    value = round(
                                        delta / (total if total != 0 else 1),
                                        1)
                            p.value = value
                        else:
                            p.value = round(delta / INTERVAL, 1)
                        p.save()

                    for idx, obj in enumerate(json_data_2_current):
                        name = obj.get(keys2[0])
                        value1 = obj.get(keys2[1])
                        value2 = obj.get(keys2[2])
                        inst_id = obj.get('INST_ID')
                        delta = 10.0 * (float(value1) - float(
                            json_data_2_prev[idx].get(keys2[1])))
                        total = float(value2) - float(
                            json_data_2_prev[idx].get(keys2[2]))
                        value = round(delta / (total if total != 0 else 1), 1)
                        p = Performance()
                        p.name = name
                        p.database = database
                        p.created_at = date_current
                        p.inst_id = inst_id
                        p.value = value
                        p.save()

                    if json_data_dg:
                        for x in json_data_dg:
                            p = Performance()
                            p.name = x.get('NAME')
                            p.inst_id = x.get('INST_ID')
                            p.value = convert_oracle_interval_to_secodns(
                                x.get('VALUE'))
                            p.database = database
                            p.created_at = date_current
                            p.save()