def get_sql_text(database, sql_id): sql_text = '' db_type = database.db_type model = ASH_DICT.get(db_type) obj = (((model.objects.filter(database=database)).filter( sql_id=sql_id)).filter( sql_text__isnull=False)).order_by('-created_at').first() if obj: return (obj.sql_text, obj.db_name) if sql_id != 'null': if db_type == 'oracle': instance_id_list = database.instance_id_list query_sqltext_realtime = f'''select sql_fulltext SQL_TEXT from gv$sql where inst_id in ({instance_id_list}) and sql_id= '{sql_id}' and rownum=1''' query_sqltext_hist = f'''select SQL_TEXT from DBA_HIST_SQLTEXT where sql_id='{sql_id}' and rownum=1''' flag, sql_text = run_sql(database, query_sqltext_realtime) if not flag: raise build_exception_from_java(sql_text) else: if not sql_text: flag, sql_text = run_sql(database, query_sqltext_hist) if not flag: raise build_exception_from_java(sql_text) return (sql_text[0].get('SQL_TEXT') if sql_text else '', None) if db_type == 'sqlserver': query = f''' select top 1 TEXT as SQL_TEXT from sys.dm_exec_sql_text(cast('' as xml).value('xs:hexBinary("{sql_id}")', 'varbinary(max)'))''' flag, sql_text = run_sql(database, query) if not flag: raise build_exception_from_java(sql_text) return (sql_text[0].get('SQL_TEXT') if sql_text else database.db_name, None) if db_type == 'db2': query = f''' select stmt_text SQL_TEXT from TABLE(MON_GET_PKG_CACHE_STMT(null, null, null, -2)) cache where EXECUTABLE_ID = x'{sql_id}' fetch first 1 rows only ''' flag, sql_text = run_sql(database, query) if not flag: raise build_exception_from_java(sql_text) return (sql_text[0].get('SQL_TEXT') if sql_text else database.db_name, None) return ('', None)
def db2_activity(database): padding_str = '_v97' if database.is_v97() else '' query1 = "\n SELECT distinct rtrim(app.db_name) DB_NAME,\n app.agent_id,\n app.appl_id,\n app.appl_name,\n app.appl_status,\n app.authid,\n t.activity_type,\n (select cast(p.stmt_text as varchar(2000)) from table(mon_get_pkg_cache_stmt(NULL, t.executable_id, NULL, -2)) as p FETCH FIRST 1 ROWS ONLY) stmt_text,\n hex(t.EXECUTABLE_ID) EXECUTABLE_ID,\n uow.ELAPSED_TIME_SEC,\n round(uow.TOTAL_CPU_TIME/1000000) TOTAL_CPU_TIME,\n uow.TOTAL_ROWS_READ,\n uow.TOTAL_ROWS_RETURNED\nFROM table(wlm_get_workload_occurrence_activities(NULL, -2)) as t,\n sysibmadm.applications app,\n SYSIBMADM.MON_CURRENT_UOW uow\nWHERE\n app.agent_id = t.application_handle\n and t.application_handle = uow.application_handle\n and app.appl_id != (values application_id())\n and app.appl_status not in ('CONNECTED',\n 'UOWWAIT')" query1_v97_base = "\n SELECT\n distinct rtrim(app.db_name) DB_NAME, app.agent_id, app.appl_id, app.appl_name, app.appl_status, app.authid,\n t.activity_type, cast(p.stmt_text as varchar(2000)) stmt_text, hex(t.EXECUTABLE_ID) EXECUTABLE_ID\n FROM table(wlm_get_workload_occurrence_activities_v97(NULL, -2)) as t,\n table(mon_get_pkg_cache_stmt(NULL, NULL, NULL, -2)) as p,\n sysibmadm.applications app\n WHERE t.executable_id = p.executable_id\n and app.agent_id = t.application_handle\n and app.appl_id != (values application_id())\n and app.appl_status not in ('CONNECTED','UOWWAIT')" query1_v97 = "\n SELECT distinct rtrim(app.db_name) DB_NAME,\n app.agent_id,\n app.appl_id,\n app.appl_name,\n app.appl_status,\n app.authid,\n t.activity_type,\n (select cast(p.stmt_text as varchar(2000)) from table(mon_get_pkg_cache_stmt(NULL, t.executable_id, NULL, -2)) as p FETCH FIRST 1 ROWS ONLY) stmt_text,\n hex(t.EXECUTABLE_ID) EXECUTABLE_ID,\n uow.ELAPSED_TIME_SEC,\n round(uow.TOTAL_CPU_TIME/1000000) TOTAL_CPU_TIME,\n uow.TOTAL_ROWS_READ,\n uow.TOTAL_ROWS_RETURNED\nFROM table(wlm_get_workload_occurrence_activities_v97(NULL, -2)) as t,\n sysibmadm.applications app,\n SYSIBMADM.MON_CURRENT_UOW uow\nWHERE\n app.agent_id = t.application_handle\n and t.application_handle = uow.application_handle\n and app.appl_id != (values application_id())\n and app.appl_status not in ('CONNECTED',\n 'UOWWAIT')" query2 = "\n SELECT\n app.db_name, app.agent_id, app.appl_id, app.appl_name, app.appl_status, app.authid,\n t.activity_type, (select VALUE from table(WLM_GET_ACTIVITY_DETAILS(t.application_handle,t.uow_id,t.activity_id,-2)) where name = 'STMT_TEXT') STMT_TEXT\n FROM table(wlm_get_workload_occurrence_activities(cast(null as bigint), -1)) as t,\n sysibmadm.applications app\n WHERE app.agent_id = t.application_handle\n and app.appl_id != (values application_id())\n and app.appl_status not in ('CONNECTED','UOWWAIT')" ash_date = get_10s_time() if not database.is_v95_base(): if database.is_v97(): flag, json_data = run_sql(database, query1_v97) if not flag: flag, json_data = run_sql(database, query1_v97_base) else: flag, json_data = run_sql(database, query1) else: flag, json_data = run_sql(database, query2) if not flag: print(str(build_exception_from_java(json_data))) return for x in json_data: ash = DB2_ASH() ash.db_name = x.get('AUTHID').strip() ash.session_id = x.get('AGENT_ID') ash.machine = x.get('APPL_ID') ash.program = x.get('APPL_NAME') ash.appl_status = x.get('APPL_STATUS') ash.username = x.get('AUTHID').strip() ash.command = x.get('ACTIVITY_TYPE') ash.sql_text = x.get('STMT_TEXT') if not database.is_v95_base(): ash.sql_id = x.get('EXECUTABLE_ID') ash.sql_elapsed_time = x.get('ELAPSED_TIME_SEC') ash.total_cpu_time = x.get('TOTAL_CPU_TIME') ash.rows_read = x.get('TOTAL_ROWS_READ') ash.rows_returned = x.get('TOTAL_ROWS_RETURNED') else: ash.sql_id = gen_sql_id( x.get('STMT_TEXT')) if x.get('STMT_TEXT') else None ash.created_at = ash_date ash.database = database ash.save() warn = WARN_ENUM.get(database.db_type).Active_Session_Warn p = Performance(inst_id=database.db_name, name=warn.name, value=len(json_data), created_at=ash_date) customized_warn_scanner(warn, p, database, False)
def sqlserver_performance(database): query = "\nselect COUNTER_NAME,CNTR_VALUE from sys.dm_os_performance_counters where\n(object_name like '%sql statistics%' and counter_name = 'batch requests/sec') or\n(object_name like '%sql statistics%' and counter_name = 'sql compilations/sec') or\n(object_name like '%sql statistics%' and counter_name = 'sql re-compilations/sec') or\n(object_name like '%buffer manager%' and counter_name = 'lazy writes/sec') or\n(object_name like '%buffer manager%' and counter_name = 'page life expectancy') or\n(object_name like '%memory manager%' and counter_name = 'connection memory (kb)') or\n(object_name like '%memory manager%' and counter_name = 'memory grants pending') or\n(object_name like '%memory manager%' and counter_name = 'sql cache memory (kb)') or\n(object_name like '%memory manager%' and counter_name = 'target server memory (kb)') or\n(object_name like '%memory manager%' and counter_name = 'total server memory (kb)') or\n(object_name like '%access methods%' and counter_name = 'full scans/sec') or\n(object_name like '%access methods%' and counter_name = 'forwarded records/sec') or\n(object_name like '%access methods%' and counter_name = 'mixed page allocations/sec') or\n(object_name like '%access methods%' and counter_name = 'page splits/sec') or\n(object_name like '%access methods%' and counter_name = 'table lock escalations/sec') or\n(object_name like '%general statistics%' and counter_name = 'logins/sec') or\n(object_name like '%general statistics%' and counter_name = 'logouts/sec') or\n(object_name like '%general statistics%' and counter_name = 'user connections') or\n(object_name like '%general statistics%' and counter_name = 'processes blocked') or\n(object_name like '%latches%' and counter_name = 'latch waits/sec') or\n(object_name like '%latches%' and counter_name = 'average latch wait time (ms)') or\n(object_name like '%access methods%' and counter_name = 'workfiles created/sec') or\n(object_name like '%access methods%' and counter_name = 'worktables created/sec') or\n(object_name like '%general statistics%' and counter_name = 'active temp tables') or\n(object_name like '%general statistics%' and counter_name = 'temp tables creation rate') or\n(object_name like '%general statistics%' and counter_name = 'temp tables for destruction') or\n(object_name like '%databases%' and counter_name ='active transactions' and instance_name = '_Total') or\n(object_name like '%databases%' and counter_name ='Transactions/sec' and instance_name = '_Total') or\n(object_name like '%databases%' and counter_name ='log flushes/sec' and instance_name = '_Total') or\n(object_name like '%databases%' and counter_name ='cache hit ratio' and instance_name = '_Total') or\n(object_name like '%SQLServer:Locks%' and counter_name like '%Lock%' and instance_name = '_Total')" match_patern = re.compile('/sec', re.IGNORECASE) date_current = get_10s_time() flag, json_data_current = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data_current))) return key = str(database.id) + ':performance' date_key = str(database.id) + ':performance_date' json_data_str_prev = redis.get(key) date_prev = redis.get(date_key) keys = ['COUNTER_NAME', 'CNTR_VALUE'] redis.set(key, json.dumps(json_data_current)) redis.set(date_key, str(date_current)) if json_data_str_prev and date_prev and ( date_current - to_date(date_prev)).total_seconds() < MAX_INTERVAL: json_data_prev = json.loads(json_data_str_prev) for idx, obj in enumerate(json_data_current): name = obj.get(keys[0]) value = obj.get(keys[1]) p = Performance() p.name = name p.database = database p.created_at = date_current if re.search(match_patern, name): p.value = round( (float(value) - float(json_data_prev[idx].get(keys[1]))) / INTERVAL, 1) else: p.value = float(value) p.save()
def create_index_fts(database, plans, full_plan=None): result = [] for x in plans: filter = x.get('FILTER_PREDICATES') access = x.get('ACCESS_PREDICATES') card = x.get('CARDINALITY') owner = x.get('OBJECT_OWNER') name = x.get('OBJECT_NAME') num_rows = 0 query = f'''select NUM_ROWS from dba_tables where owner = '{owner}' and table_name = '{name}'''' flag, table_data = run_sql(database, query) if flag: if table_data: num_rows = table_data[0].get('NUM_ROWS') if card == 1 or card <= num_rows * 0.05: col_str = '' if filter or access: if filter: col_str = col_str + filter if access: col_str = col_str + access owner = x.get('OBJECT_OWNER') name = x.get('OBJECT_NAME') create_index_stmt = gen_create_index_stmt(database, owner, name, col_str) if create_index_stmt: result.append(create_index_stmt) return list(set(result))
def get_table_rows(database): try: rows_query = Rows_Query.get(database.db_type) rows_data = [] if database.db_type != 'sqlserver': flag, rows_data = run_sql(database, rows_query) if not flag: raise build_exception_from_java(rows_data) else: rows_data = sqlserver_rows_data(database) table_rows_save_list = [] created_at = datetime.now().replace(microsecond=0) for r in rows_data: owner = r.get('OWNER') table_name = r.get('TABLE_NAME') rows = r.get('ROWS') table_rows_obj = Table_Rows(database=database, owner=owner, table_name=table_name, rows=rows, created_at=created_at) table_rows_save_list.append(table_rows_obj) Table_Rows.objects.bulk_create(table_rows_save_list) except Exception as err: print(err)
def get_space(database): query = Space_Query.get(database.db_type) flag, space_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(space_data))) return created_at = datetime.now().replace(microsecond=0) space_detail = Space_Detail.objects.update_or_create(database=database, defaults={'detail':space_data, 'created_at':created_at}) for x in space_data: space = Space() space.database = database space.name = x.get('TABLESPACE_NAME') space.total_mb = x.get('TOTAL_MB') space.free = x.get('FREE') space.used = x.get('USED') space.type = x.get('CONTENTS') space.used_pct = x.get('USED_PCT') space.created_at = created_at space.save() options = {'name':x.get('TABLESPACE_NAME'), 'total':x.get('TOTAL_MB'), 'used':x.get('USED'), 'used_pct':x.get('USED_PCT')} if database.db_type not in ('mysql', 'sqlserver'): warn = WARN_ENUM.get(database.db_type).Tablespace_Warn customized_warn_scanner(warn, space, database, False, options)
def sqlserver_rows_data(database): query = "\n SELECT NAME FROM master.dbo.sysdatabases where name not in ('master', 'tempdb', 'model', 'msdb')" flag, json_data = run_sql(database, query) if not flag: raise build_exception_from_java(json_data) db_list = [x.get('NAME') for x in json_data] schema_data = [] query = Rows_Query.get(database.db_type) for db in db_list: flag, json_data = run_sql(database, query, db) if not flag: raise build_exception_from_java(json_data) else: schema_data = schema_data + json_data return schema_data
def get_sqlmon(database): query_sqlmon = ( "\n select a.*,dbms_sqltune.report_sql_monitor(type=>'{}', sql_id=>a.sql_id, sql_exec_id=>a.sql_exec_id, report_level=>'ALL') SQLMON\n from (select\n KEY, STATUS,SQL_ID,round((LAST_REFRESH_TIME-SQL_EXEC_START)*24*3600) ELAPSED_TIME,\n round(ELAPSED_TIME/1e6) DB_TIME,round(CPU_TIME/1e6) DB_CPU,\n SQL_EXEC_ID,to_char(sql_exec_start,'YYYY-MM-DD HH24:MI:SS') SQL_EXEC_START,\n SQL_PLAN_HASH_VALUE,INST_ID, USERNAME,\n SQL_TEXT\n from Gv$sql_Monitor\n where --(LAST_REFRESH_TIME-SQL_EXEC_START)*24*3600>60\n sql_plan_hash_value >0 and\n status like 'DONE%'\n and LAST_REFRESH_TIME>=sysdate - 600/3600/24\n and LAST_REFRESH_TIME<=sysdate\n and sql_text is not null\n order by elapsed_time desc\n ) a where rownum<={} " ).format(database.sqlmon_format, database.num_sqlmon_per_minute) query_sqlmon_plan = "select\n INST_ID,\n STATUS,\n to_char(FIRST_REFRESH_TIME,'YYYY-MM-DD HH24:MI:SS') FIRST_REFRESH_TIME,\n to_char(LAST_REFRESH_TIME,'YYYY-MM-DD HH24:MI:SS') LAST_REFRESH_TIME,\n SID,\n SQL_ID,\n to_char(SQL_EXEC_START,'YYYY-MM-DD HH24:MI:SS') SQL_EXEC_START,\n SQL_EXEC_ID,\n SQL_PLAN_HASH_VALUE,\n PLAN_PARENT_ID,\n PLAN_LINE_ID,\n PLAN_OPERATION,\n PLAN_OPTIONS,\n PLAN_OBJECT_OWNER,\n PLAN_OBJECT_NAME,\n PLAN_OBJECT_TYPE,\n PLAN_DEPTH,\n PLAN_POSITION,\n PLAN_COST,\n PLAN_CARDINALITY,\n PLAN_TEMP_SPACE,\n STARTS,\n OUTPUT_ROWS,\n PHYSICAL_READ_REQUESTS,\n PHYSICAL_READ_BYTES,\n PHYSICAL_WRITE_REQUESTS,\n PHYSICAL_WRITE_BYTES\nfrom gv$sql_plan_monitor\nwhere\n key in ({})" flag, sqlmon_data = run_sql(database, query_sqlmon) if not flag: print(str(build_exception_from_java(sqlmon_data))) return sqlmon_data sqlmon_time = datetime.now().replace(microsecond=0) for x in sqlmon_data: m = SQLMON() m.inst_id = x.get('INST_ID') m.sql_id = x.get('SQL_ID') m.status = x.get('STATUS') m.username = x.get('USERNAME') m.elapsed_time = x.get('ELAPSED_TIME') m.db_time = x.get('DB_TIME') m.db_cpu = x.get('DB_CPU') m.sql_exec_id = x.get('SQL_EXEC_ID') m.sql_exec_start = x.get('SQL_EXEC_START') m.sql_plan_hash_value = x.get('SQL_PLAN_HASH_VALUE') m.sql_text = x.get('SQL_TEXT') m.sqlmon = x.get('SQLMON') m.database = database m.created_at = sqlmon_time m.save()
def diskgroup_warn(database): query = '\n SELECT\n NAME,\n STATE,\n round(TOTAL_MB/1024) TOTAL_GB,\n round((TOTAL_MB-FREE_MB)/1024) USED_GB,\n round((TOTAL_MB-FREE_MB)/TOTAL_MB*100) USED_PCT,\n OFFLINE_DISKS\n FROM\n V$ASM_DISKGROUP' flag, json_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data))) return json_data created_at = datetime.now().replace(microsecond=0) warn = WARN_ENUM.get(database.db_type).DiskGroup_Offline_Disks_Warn for x in json_data: options = {'name': x.get('NAME')} p = Performance(inst_id=database.db_name, name=warn.name, value=x.get('OFFLINE_DISKS'), created_at=created_at) customized_warn_scanner(warn, p, database, False, options) warn = WARN_ENUM.get(database.db_type).DiskGroup_Status_Warn for x in json_data: options = {'name': x.get('NAME')} p = Performance(inst_id=database.db_name, name=warn.name, value=x.get('STATE'), created_at=created_at) customized_warn_scanner(warn, p, database, False, options) warn = WARN_ENUM.get(database.db_type).DiskGroup_Used_Percent_Warn for x in json_data: options = {'name':x.get('NAME'), 'total':x.get('TOTAL_GB'), 'used':x.get('USED_GB'), 'used_pct':x.get('USED_PCT')} p = Performance(inst_id=database.db_name, name=warn.name, value=x.get('STATE'), created_at=created_at) customized_warn_scanner(warn, p, database, False, options)
def get_ash_report(pk, instance_id, begin_time, end_time): try: database = Database.objects.get(pk=pk) json_data = [] flag, json_data = run_sql(database, DBID_Query) if not flag: raise build_exception_from_java(json_data) db_id = json_data[0].get('DBID') key, inst_str = get_key_inst_str(database, instance_id) query_ash = ASH_Query.get(key) options = { 'db_id': db_id, 'inst_str': inst_str, 'begin_time': timestamp_to_char(begin_time), 'end_time': timestamp_to_char(end_time) } query_ash['report'] = query_ash.get('report').format(**options) flag, report_data = run_batch_sql(database, query_ash) if not flag: raise build_exception_from_java(report_data) report_html = ('').join([ x.get('OUTPUT') for x in report_data.get('report') if x.get('OUTPUT') ]) if report_data.get('report') else '' return {'report_html': report_html} except ObjectDoesNotExist: return {'error_message': ''} except Exception as err: return {'error_message': str(err)}
def get_object_type(database, owner, object_name, options, db_name=None): query = Object_Type_Query.get(database.db_type).format(**options) flag, type_data = run_sql(database, query, db_name) if not flag: raise build_exception_from_java(type_data) if type_data: return type_data[0].get('OBJECT_TYPE') else: return type_data
def create_index_gap(database, plans, full_plan=None): result = [] for x in plans: sql_plan_hash_value = x.get('SQL_PLAN_HASH_VALUE') plan_line_id = x.get('PLAN_LINE_ID') plan_operation = x.get('PLAN_OPERATION') object_name = x.get('OBJECT_NAME') object_owner = x.get('OBJECT_OWNER') plan_cardinality = x.get('PLAN_CARDINALITY') starts = x.get('STARTS') output_rows = x.get('OUTPUT_ROWS') parent_plan_operation = x.get('PARENT_PLAN_OPERATION') parent_plan_options = x.get('PARENT_PLAN_OPTIONS') parent_output_rows = x.get('PARENT_OUTPUT_ROWS') plan_parent_id = x.get('PLAN_PARENT_ID') object_type = x.get('OBJECT_TYPE') parent_object_object_type = x.get('PARENT_PLAN_OBJECT_TYPE') parent_object_object_name = x.get('PARENT_PLAN_OBJECT_NAME') parent_object_object_owner = x.get('PARENT_PLAN_OBJECT_OWNER') owner = '' table_name = '' query = object_type == 'INDEX' and parent_object_object_type == 'TABLE' and parent_plan_operation == 'TABLE ACCESS' and parent_plan_options == 'BY INDEX ROWID' and "select TABLE_OWNER, TABLE_NAME from dba_indexes where owner = '{object_owner}' and index_name = '{object_name}'" flag, table_data = run_sql(database, query) row = flag and table_data and table_data[0] owner, table_name = row.get('TABLE_OWNER'), row.get('TABLE_NAME') if owner != parent_object_object_owner: if parent_object_object_name != table_name: continue else: if object_type == 'TABLE': if plan_operation == 'TABLE ACCESS': if parent_plan_options == 'HASH JOIN': owner, table_name = object_owner, object_name else: continue plan = full_plan.get(str(sql_plan_hash_value)) if plan: line = plan[plan_line_id] parent_line = plan[plan_parent_id] filter = line.get('FILTER_PREDICATES') access = line.get('ACCESS_PREDICATES') parent_filter = parent_line.get('FILTER_PREDICATES') parent_access = parent_line.get('ACCESS_PREDICATES') col_str = '' if filter: col_str = col_str + ' ' + filter if access: col_str = col_str + ' ' + access if parent_filter: col_str = col_str + ' ' + parent_filter if parent_access: col_str = col_str + ' ' + parent_access create_index_stmt = gen_create_index_stmt(database, owner, table_name, col_str) if create_index_stmt: result.append(create_index_stmt) return list(set(result))
def oracle_activity(database): if database.version == '10': query = "\n select /*+ leading(b a)*/\n a.inst_id,\n SESSION_ID sid,\n SESSION_SERIAL# serial,\n SESSION_ID || ',' || SESSION_SERIAL# || '@'|| a.inst_id SESSION_ID,\n (select username from dba_users u where u.user_id = a.user_id) username,\n '' machine,\n program,\n --status,\n case SQL_OPCODE\n when 1 then 'CREATE TABLE'\n when 2 then 'INSERT'\n when 3 then 'SELECT'\n when 6 then 'UPDATE'\n when 7 then 'DELETE'\n when 9 then 'CREATE INDEX'\n when 11 then 'ALTER INDEX'\n when 15 then 'ALTER INDEX' else 'Others' end command,\n SQL_ID,\n SQL_PLAN_HASH_VALUE,\n nvl(event, 'ON CPU') event,\n p1,\n p2,\n p3,\n nvl(wait_class, 'ON CPU') wait_class ,\n module,\n action,\n (select name from V$ACTIVE_SERVICES s where s.NAME_HASH = a.SERVICE_HASH) service_name,\n '' plsql_object_name,\n '' plsql_entry_object_name,\n BLOCKING_SESSION,\n BLOCKING_SESSION_SERIAL# BLOCKING_SESSION_SERIAL,\n null SQL_PLAN_LINE_ID,\n '' SQL_PLAN_OPERATION,\n SESSION_TYPE,\n (select SQL_TEXT from v$sql b where b.sql_id = a.sql_id and rownum =1) SQL_TEXT\n from gv$ACTIVE_SESSION_HISTORY a\n where a.SAMPLE_TIME between systimestamp - numtodsinterval(2,'SECOND') and systimestamp - numtodsinterval(1,'SECOND')\n and nvl(a.wait_class,'ON CPU') <> 'Idle'" else: if database.version >= '11': query = "\n select /*+ leading(b a)*/\n a.inst_id,\n SESSION_ID sid,\n SESSION_SERIAL# serial,\n SESSION_ID || ',' || SESSION_SERIAL# || '@'|| a.inst_id SESSION_ID,\n round((cast(sample_time as date)-a.sql_exec_start)*24*3600) SQL_ELAPSED_TIME,\n (select username from dba_users u where u.user_id = a.user_id) username,\n machine,\n program,\n --status,\n case SQL_OPCODE\n when 1 then 'CREATE TABLE'\n when 2 then 'INSERT'\n when 3 then 'SELECT'\n when 6 then 'UPDATE'\n when 7 then 'DELETE'\n when 9 then 'CREATE INDEX'\n when 11 then 'ALTER INDEX'\n when 15 then 'ALTER INDEX' else 'Others' end command,\n SQL_ID,\n SQL_PLAN_HASH_VALUE,\n nvl(event, 'ON CPU') event,\n p1,\n p2,\n p3,\n nvl(wait_class, 'ON CPU') wait_class,\n module,\n action,\n (select name from V$ACTIVE_SERVICES s where s.NAME_HASH = a.SERVICE_HASH) SERVER_NAME ,\n -- (select object_name from dba_objects s where s.object_id = a.PLSQL_OBJECT_ID) plsql_object_name,\n -- (select object_name from dba_objects s where s.object_id = a.PLSQL_ENTRY_OBJECT_ID) plsql_entry_object_name,\n '' plsql_object_name,\n '' plsql_entry_object_name,\n BLOCKING_SESSION,\n BLOCKING_SESSION_SERIAL# BLOCKING_SESSION_SERIAL,\n SQL_PLAN_LINE_ID,\n SQL_PLAN_OPERATION || ' ' || SQL_PLAN_OPTIONS SQL_PLAN_OPERATION,\n SESSION_TYPE,\n (select sql_fulltext from v$sql b where b.sql_id = a.sql_id and rownum =1) SQL_TEXT\n from gv$ACTIVE_SESSION_HISTORY a\n where a.SAMPLE_TIME between systimestamp - numtodsinterval(2,'SECOND') and systimestamp - numtodsinterval(1,'SECOND')\n and nvl(a.wait_class,'ON CPU') <> 'Idle'\n " ash_date = get_10s_time() flag, json_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data))) return for x in json_data: ash = Oracle_ASH() ash.inst_id = x.get('INST_ID') ash.sid = x.get('SID') ash.serial = x.get('SERIAL') ash.username = x.get('USERNAME') ash.db_name = x.get('USERNAME') ash.machine = x.get('MACHINE') ash.program = x.get('PROGRAM') ash.status = x.get('STATUS') ash.command = x.get('COMMAND') ash.sql_hash_value = x.get('SQL_HASH_VALUE') ash.sql_id = x.get('SQL_ID') ash.sql_text = x.get('SQL_TEXT') ash.sql_plan_hash_value = x.get('SQL_PLAN_HASH_VALUE') ash.event = x.get('EVENT') ash.p1 = x.get('P1') ash.p2 = x.get('P2') ash.p3 = x.get('P3') ash.wait_class = x.get('WAIT_CLASS') ash.module = x.get('MODULE') ash.action = x.get('ACTION') ash.service_name = x.get('SERVICE_NAME') ash.plsql_object_name = x.get('PLSQL_OBJECT_NAME') ash.plsql_entry_object_name = x.get('PLSQL_ENTRY_OBJECT_NAME') ash.blocking_session = x.get('BLOCKING_SESSION') ash.blocking_session_serial = x.get('BLOCKING_SESSION_SERIAL') ash.sql_plan_line_id = x.get('SQL_PLAN_LINE_ID') ash.sql_plan_operation = x.get('SQL_PLAN_OPERATION') ash.session_type = x.get('SESSION_TYPE') ash.session_id = x.get('SESSION_ID') ash.sql_elapsed_time = x.get('SQL_ELAPSED_TIME') ash.created_at = ash_date ash.database = database try: ash.save() except Exception as e: logger.error(str(e)) warn = WARN_ENUM.get(database.db_type).Active_Session_Warn p = Performance(inst_id=database.db_name, name=warn.name, value=len(json_data), created_at=ash_date) customized_warn_scanner(warn, p, database, False)
def get_mysql_session_detail(database, session_id): detail_format = get_default_detail_format() detai_query = f'''SELECT * FROM information_schema.processlist WHERE id = {session_id}''' flag, json_data = run_sql(database, detai_query) if not flag: raise build_exception_from_java(json_data) if json_data: detail_format['detail'][''] = json_data[0] return detail_format
def all_sessions(pk): query = {'oracle':"\n select\n s.sid || ',' || s.serial# || '@' || s.inst_id session_id,\n s.username,\n s.status,\n s.sql_id,\n case when s.state = 'WAITING' then s.event else 'ON CPU' end event,\n machine,\n s.program,\n to_char(s.logon_time,'YYYY-MON-DD HH24:MI') logon_time,\n round(Value / 1024 / 1024,1) PGA_MB\n from\n gv$session s, V$sesstat St, V$statname Sn\n Where St.Sid = s.Sid\n And St.Statistic# = Sn.Statistic#\n And Sn.Name Like 'session pga memory'", 'mysql':'\n SELECT * FROM\n information_schema.processlist', 'db2':'select agent_id, db_name, appl_name, authid, appl_id,\n appl_status, client_nname MACHINE\n FROM SYSIBMADM.APPLICATIONS', 'sqlserver':'\n SELECT\n ses.SESSION_ID,\n (select name from master..sysdatabases where dbid = req.database_id) DB_NAME,\n ses.LOGIN_NAME,\n CONVERT(VARCHAR(24), ses.LOGIN_TIME, 120) LOGON_TIME,\n ses.HOST_NAME,\n ses.PROGRAM_NAME,\n --application\n ses.status STATUS,\n --current request\n req.STATUS REQ_STATUS,\n CONVERT(VARCHAR(24), req.start_time, 120) START_TIME,\n req.ROW_COUNT REQ_ROW_COUNT,\n con.CLIENT_NET_ADDRESS,\n substring(sys.fn_sqlvarbasetostr(req.sql_handle),3,1000) SQL_ID\n FROM sys.dm_exec_sessions ses\n inner join sys.dm_exec_connections con on ses.session_id = con.session_id\n left join sys.dm_exec_requests req on req.session_id = ses.session_id\n outer APPLY sys.dm_exec_sql_text(sql_handle) AS sqltext'} database = Database.objects.get(pk=pk) detail_query = query.get(database.db_type) flag, json_data = run_sql(database, detail_query) if not flag: raise build_exception_from_java(json_data) return json_data
def plan_change_warn(database): query = '\nselect sql_id,\n round(max(elapsed_time/decode(executions,0,1,executions))/min(elapsed_time/decode(executions,0,1,executions))) DIFF,\n min(inst_id) INST_ID\nfrom\n gv$sql\nwhere elapsed_time > 0\ngroup by sql_id\nhaving count(distinct plan_hash_value) > 1' flag, json_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data))) return json_data created_at = datetime.now().replace(microsecond=0) warn = WARN_ENUM.get(database.db_type).Plan_Change_Warn for x in json_data: options = {'sql_id': x.get('SQL_ID')} p = Performance(inst_id=x.get('INST_ID'), name=warn.name, value=x.get('DIFF'), created_at=created_at) customized_warn_scanner(warn, p, database, True, options)
def job_failure_warn(database): query = "\n select SCHEMA_USER OWNER, job || ' '|| what JOB_NAME, failures from dba_jobs where failures > 0\nunion all\nselect OWNER, JOB_NAME, count(*)\nFROM dba_scheduler_job_log\nwhere\nlog_date > sysdate - 1/24 and\nSTATUS != 'SUCCEEDED'\ngroup by OWNER, job_name" flag, json_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data))) return json_data created_at = datetime.now().replace(microsecond=0) warn = WARN_ENUM.get(database.db_type).Job_Warn for x in json_data: options = {'name':x.get('JOB_NAME'), 'schema':x.get('OWNER')} p = Performance(inst_id=database.db_name, name=warn.name, value=x.get('FAILURES'), created_at=created_at) customized_warn_scanner(warn, p, database, True, options)
def verify_columns(database, owner, table_name, col_list): query = f''' select COLUMN_NAME FROM dba_tab_cols where OWNER = '{owner}' and TABLE_NAME = '{table_name}'''' flag, table_data = run_sql(database, query) if flag: if table_data: table_colums = [x.get('COLUMN_NAME') for x in table_data] return [c for c in col_list if c in table_colums] return []
def get_snapshot(pk, snapshot_limit=1000): try: database = Database.objects.get(pk=pk) json_data = [] query = Snapshot_Query.format(snapshot_limit) flag, json_data = run_sql(database, query) if not flag: raise build_exception_from_java(json_data) return json_data except ObjectDoesNotExist: return {'error_message': ''} except Exception as err: return {'error_message': str(err)}
def object_change_warn(database): query = "\nselect object_name,owner, to_char(last_ddl_time, 'yyyy-mm-dd hh24:mi:ss') last_ddl_time\nfrom dba_objects\nwhere last_ddl_time > sysdate - 1/24\nand owner not in ('SCOTT','MGMT_VIEW','MDDATA','MDSYS','SI_INFORMTN_SCHEMA','ORDPLUGINS','ORDSYS','OLAPSYS','SYSMAN','ANONYMOUS','XDB','CTXSYS','EXFSYS','WMSYS','ORACLE_OCM','DBSNMP','TSMSYS','DMSYS','DIP','OUTLN','SYSTEM','SYS') " flag, json_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data))) return json_data created_at = datetime.now().replace(microsecond=0) warn = WARN_ENUM.get(database.db_type).DB_Object_Change_Warn for x in json_data: options = {'schema':x.get('OWNER'), 'object_name':x.get('OBJECT_NAME'), 'last_ddl_time':x.get('LAST_DDL_TIME')} p = Performance(inst_id=database.db_name, name=warn.name, value=1, created_at=created_at) customized_warn_scanner(warn, p, database, True, options)
def oracle_standby_warn(database): query = "\nSELECT a.thread#, b.last_seq, a.applied_seq, a. last_app_timestamp, b.last_seq-a.applied_seq ARC_DIFF, dest_name\nFROM\n (SELECT thread#, dest_name, MAX(sequence#) applied_seq, MAX(next_time) last_app_timestamp\n FROM gv$archived_log log,\n v$ARCHIVE_DEST dest WHERE log.applied = 'YES' and dest.dest_name is not null and log.dest_id = dest.dest_id GROUP BY dest.dest_name, thread#) a,\n (SELECT thread#, MAX (sequence#) last_seq FROM gv$archived_log GROUP BY thread#) b\nWHERE a.thread# = b.thread#" flag, json_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data))) return json_data created_at = datetime.now().replace(microsecond=0) warn = WARN_ENUM.get(database.db_type).Standby_Gap_Warn for x in json_data: options = {'name':x.get('DEST_NAME'), 'applied_seq':x.get('APPLIED_SEQ'), 'max_seq':x.get('LAST_SEQ'), 'thread':x.get('THREAD#')} p = Performance(inst_id=database.db_name, name=warn.name, value=x.get('ARC_DIFF'), created_at=created_at) customized_warn_scanner(warn, p, database, False, options)
def _check(self, database): try: flag, result = run_sql(database, self.check_sql) if flag: if result: resultSet = [list(r.values()) for r in result] self.set_result_raw(resultSet) self.set_result() if self.is_failed(): self.set_score() self.resize_result_raw() self.set_advise() except Exception as e: logger.exception('health check queries, error: %s', e)
def get_db2_session_detail(database, session_id): detail_format = get_default_detail_format() detai_query = f'''select agent_id, db_name, appl_name, authid, appl_id, appl_status,client_prdid, client_pid, client_platform,client_protocol, client_nname FROM SYSIBMADM.APPLICATIONS WHERE agent_id = {session_id}''' flag, json_data = run_sql(database, detai_query) if not flag: raise build_exception_from_java(json_data) if json_data: detail_data = json_data[0] detail_info = {u'\u8fde\u63a5\u4fe1\u606f':{x:detail_data[x] for x in detail_data if 'CLIENT' in x}, u'\u5ba2\u6237\u7aef\u4fe1\u606f':{x:detail_data[x] for x in detail_data if 'CLIENT' not in x}} detail_format['detail'] = detail_info return detail_format
def REDUNDANT_INDEX(sqltext, pred=0, schema=None, database=None): prog = re.compile('ON ([\\w$]+) ?\\(([^)]+)\\)', re.IGNORECASE) m = prog.search(sqltext) if m: table_name = m.group(1).upper() columns = m.group(2).upper() columns = columns.replace(' ', '') columns = (':').join(columns.split(',')) query = build_static_query(StaticSQLJson.get('REDUNDANT_INDEX'), schema, table_name, columns) flag, result = run_sql(database, query) if flag: if result: return (True, None) return (False, None)
def db2_performance(database): query = '\n select\n TOTAL_CONS, APPLS_CUR_CONS, APPLS_IN_DB2, LOCKS_WAITING, NUM_ASSOC_AGENTS, ACTIVE_SORTS,\n LOCKS_HELD, LOCK_WAITS,\n TOTAL_SORTS, SORT_OVERFLOWS,\n POOL_DATA_L_READS, POOL_TEMP_DATA_L_READS, POOL_INDEX_L_READS, POOL_TEMP_INDEX_L_READS, POOL_XDA_L_READS, POOL_TEMP_XDA_L_READS, POOL_DATA_P_READS, POOL_TEMP_DATA_P_READS,\n POOL_INDEX_P_READS, POOL_TEMP_INDEX_P_READS, POOL_XDA_P_READS, POOL_TEMP_XDA_P_READS,\n POOL_DATA_WRITES, POOL_INDEX_WRITES, POOL_XDA_WRITES,\n DIRECT_READS, DIRECT_WRITES,\n COMMIT_SQL_STMTS, ROLLBACK_SQL_STMTS, DYNAMIC_SQL_STMTS, STATIC_SQL_STMTS, FAILED_SQL_STMTS, SELECT_SQL_STMTS, UID_SQL_STMTS, DDL_SQL_STMTS,\n ROWS_DELETED, ROWS_INSERTED, ROWS_UPDATED, ROWS_SELECTED, ROWS_READ,\n LOG_READS, LOG_WRITES\n from sysibmadm.snapdb' stats_list_realtime = [ 'APPLS_CUR_CONS', 'LOCKS_HELD', 'APPLS_IN_DB2', 'LOCKS_WAITING', 'NUM_ASSOC_AGENTS', 'ACTIVE_SORTS' ] stats_list_delta = [ 'TOTAL_CONS', 'LOCK_WAITS', 'TOTAL_SORTS', 'SORT_OVERFLOWS', 'POOL_DATA_L_READS', 'POOL_TEMP_DATA_L_READS', 'POOL_INDEX_L_READS', 'POOL_TEMP_INDEX_L_READS', 'POOL_XDA_L_READS', 'POOL_TEMP_XDA_L_READS', 'POOL_DATA_P_READS', 'POOL_TEMP_DATA_P_READS', 'POOL_INDEX_P_READS', 'POOL_TEMP_INDEX_P_READS', 'POOL_XDA_P_READS', 'POOL_TEMP_XDA_P_READS', 'POOL_DATA_WRITES', 'POOL_INDEX_WRITES', 'POOL_XDA_WRITES', 'DIRECT_READS', 'DIRECT_WRITES', 'COMMIT_SQL_STMTS', 'ROLLBACK_SQL_STMTS', 'DYNAMIC_SQL_STMTS', 'STATIC_SQL_STMTS', 'FAILED_SQL_STMTS', 'SELECT_SQL_STMTS', 'UID_SQL_STMTS', 'DDL_SQL_STMTS', 'ROWS_DELETED', 'ROWS_INSERTED', 'ROWS_UPDATED', 'ROWS_SELECTED', 'ROWS_READ', 'LOG_READS', 'LOG_WRITES' ] date_current = get_10s_time() flag, json_data_current = run_sql(database, query) if not flag or not json_data_current: print(str(build_exception_from_java(json_data_current))) return json_data_current = json_data_current[0] key = str(database.id) + ':performance' date_key = str(database.id) + ':performance_date' json_data_str_prev = redis.get(key) date_prev = redis.get(date_key) redis.setex(key, MAX_INTERVAL, json.dumps(json_data_current)) redis.setex(date_key, MAX_INTERVAL, str(date_current)) if json_data_str_prev: if date_prev: if (date_current - to_date(date_prev)).total_seconds() < MAX_INTERVAL: json_data_prev = json.loads(json_data_str_prev) for key, value in json_data_current.items(): p = Performance() p.name = key p.created_at = date_current p.database = database if key in stats_list_realtime: p.value = value else: if key in stats_list_delta: p.value = (float(value) - float( json_data_prev.get(key))) / INTERVAL p.save()
def get_dbsummary(pk): database = Database.objects.get(pk=pk) query = 'call monreport.dbsummary(10)' flag, json_data = run_sql(database, query) created_at = datetime.now().replace(microsecond=0) if flag: text = ('\n').join([x.get('TEXT') for x in json_data]) dbs = DB2_Summary() dbs.database = database dbs.summary = text dbs.created_at = created_at dbs.save() key = ('dbsummary:{}').format(str(database.id)) redis.set(key, text) else: print(json_data)
def get_sqlserver_activity(databases): query = "\n SELECT /* sample_query */\n req.SESSION_ID,\n convert(varchar(25), req.START_TIME, 120) START_TIME,\n req.STATUS,\n req.COMMAND,\n (select name from master..sysdatabases where dbid = req.database_id) DB_NAME,\n ses.LOGIN_NAME,\n ses.HOST_NAME,\n ses.PROGRAM_NAME,\n req.BLOCKING_SESSION_ID,\n req.WAIT_TYPE,\n req.WAIT_TIME,\n req.WAIT_RESOURCE,\n req.TOTAL_ELAPSED_TIME,\n req.ROW_COUNT,\n sqltext.TEXT SQLTEXT,\n substring(sys.fn_sqlvarbasetostr(req.sql_handle),3,1000) SQL_HANDLE,\n con.CLIENT_NET_ADDRESS,\n case when req.wait_resource like '%SPID%' then SUBSTRING(wait_resource, 1, CHARINDEX(' ', wait_resource)-1) else '' end LINKED_IP,\n cast(case when req.wait_resource like '%SPID%' then SUBSTRING(wait_resource, CHARINDEX('=', wait_resource)+1, CHARINDEX(')', wait_resource)-CHARINDEX('=', wait_resource)-1) else '0' end as int) LINKED_SPID,\n DATEDIFF(SECOND, req.START_TIME, getdate()) TIME\n FROM sys.dm_exec_requests req\n inner join sys.dm_exec_sessions ses on req.session_id = ses.session_id\n inner join sys.dm_exec_connections con on ses.session_id = con.session_id\n CROSS APPLY sys.dm_exec_sql_text(sql_handle) AS sqltext\n where sqltext.TEXT not like '%sample_query%'" ash_date = get_10s_time() result_set = {} db_set = {} for db in databases: flag, json_data = run_sql(db, query) if not flag: print(str(build_exception_from_java(json_data))) continue result_set[str(db.id)] = json_data db_set[str(db.id)] = db for db_id, ash_data in result_set.items(): for x in ash_data: ash = MSSQL_ASH() ash.session_id = x.get('SESSION_ID') ash.start_time = x.get('START_TIME') ash.status = x.get('STATUS').upper() ash.command = x.get('COMMAND') ash.db_name = x.get('DB_NAME') ash.username = x.get('LOGIN_NAME') ash.machine = x.get('HOST_NAME') ash.program = x.get('PROGRAM_NAME') ash.b_blocker = x.get('BLOCKING_SESSION_ID') ash.wait_type = x.get('WAIT_TYPE') ash.wait_time = x.get('WAIT_TIME') ash.wait_resource = x.get('WAIT_RESOURCE') ash.total_elapsed_time = x.get('TOTAL_ELAPSED_TIME') ash.row_count = x.get('ROW_COUNT') ash.sql_text = x.get('SQLTEXT') ash.sql_id = x.get('SQL_HANDLE') ash.client_net_address = x.get('CLIENT_NET_ADDRESS') ash.linked_ip = x.get('LINKED_IP') ash.linked_spid = x.get('LINKED_SPID') ash.sql_elapsed_time = x.get('TIME') ash.created_at = ash_date ash.database = db_set.get(db_id) ash.save() database = db_set.get(db_id) warn = WARN_ENUM.get(database.db_type).Active_Session_Warn p = Performance(inst_id=database.db_name, name=warn.name, value=len(ash_data), created_at=ash_date) customized_warn_scanner(warn, p, database, False)
def collect_sql_text(database, schema): schema_name = COLLECT_SQL_TEXT.get('schema_name') instance_id_list = database.instance_id_list data = { 'schema_pred': f'''{schema_name} in ('{schema}')''', 'inst_id_pred': f'''inst_id in ({instance_id_list})''' } query = COLLECT_SQL_TEXT.get('sql').format(**data) flag, result = run_sql(database, query) if not flag: raise build_exception_from_java(result) for x in result: a = Audit_SQL_Text() a.sql_id = x.get('SQL_ID') a.force_matching_signature = x.get('FORCE_MATCHING_SIGNATURE') a.sql_text = x.get('SQL_TEXT') a.save()
def get_database_schema_list(database): schema_query = { 'oracle': "select\n username\n from\n dba_users\n where\n username not in ('MGMT_VIEW','MDDATA','MDSYS','SI_INFORMTN_SCHEMA','ORDPLUGINS','ORDSYS','OLAPSYS','SYSMAN','ANONYMOUS','XDB','CTXSYS','EXFSYS','WMSYS','ORACLE_OCM','DBSNMP','TSMSYS','DMSYS','DIP','OUTLN','SYSTEM','SYS','APPQOSSYS', 'FLOWS_FILES', 'JWT', 'ORDDATA', 'OWBSYS', 'OWBSYS_AUDIT', 'SCOTT', 'SPATIAL_CSW_ADMIN_USR', 'SPATIAL_WFS_ADMIN_USR', 'XS$NULL', 'YUNQU') and username not like 'APEX%'\n order by username", 'db2': '\n select rtrim(schemaname) username from syscat.schemata order by schemaname', 'sqlserver': '\n SELECT NAME [USERNAME] FROM master.dbo.sysdatabases' } flag, result = run_sql(database, schema_query.get(database.db_type)) if not flag: return [] else: schema_list = [] for schema in result: schema_list.append(schema['USERNAME']) return schema_list
def create_snapshot(pk): query = 'begin sys.dbms_workload_repository.create_snapshot; end;' try: database = Database.objects.get(pk=pk) json_data = [] flag, json_data = run_plsql(database, query) if not flag: raise build_exception_from_java(json_data) flag, json_data = run_sql(database, Max_Snapshot_Query) if not flag: raise build_exception_from_java(json_data) if json_data: return json_data[0] return {} except ObjectDoesNotExist: return {'error_message': ''} except Exception as err: return {'error_message': str(err)}