def get_ash_report(pk, instance_id, begin_time, end_time): try: database = Database.objects.get(pk=pk) json_data = [] flag, json_data = run_sql(database, DBID_Query) if not flag: raise build_exception_from_java(json_data) db_id = json_data[0].get('DBID') key, inst_str = get_key_inst_str(database, instance_id) query_ash = ASH_Query.get(key) options = { 'db_id': db_id, 'inst_str': inst_str, 'begin_time': timestamp_to_char(begin_time), 'end_time': timestamp_to_char(end_time) } query_ash['report'] = query_ash.get('report').format(**options) flag, report_data = run_batch_sql(database, query_ash) if not flag: raise build_exception_from_java(report_data) report_html = ('').join([ x.get('OUTPUT') for x in report_data.get('report') if x.get('OUTPUT') ]) if report_data.get('report') else '' return {'report_html': report_html} except ObjectDoesNotExist: return {'error_message': ''} except Exception as err: return {'error_message': str(err)}
def sqlserver_analysis(audit_job): try: schema, max_rows, order_by_pred, time_span = (audit_job.schema, audit_job.max_rows, audit_job.order_by, audit_job.time_span) if time_span: begin_time, end_time = datetime.now(), datetime.now() - (timedelta( hours=1)) else: begin_time, end_time = audit_job.snapshot_begin_time, audit_job.snapshot_end_time rule_list = [] database = audit_job.database strategy_dict = audit_job.strategy audit_rule_queryset = (Audit_Rule.objects.filter( database=database)).filter(enabled=True) for k, v in strategy_dict.items(): rule_list += list((audit_rule_queryset.filter( audit_type=k)).filter(target__in=v)) logger.error('sql_audit_analysis begin build query') query_total_json = build_total_query( TotalTemplateJSON.get(database.db_type), database, schema) query_detail_json = {} query_problem_json = {} object_rules_list = [ rule for rule in rule_list if rule.audit_type == '' ] for rule in set(object_rules_list): query_detail_json[rule.name] = build_rule_query( DetailTemplateJson, database, rule, schema, max_rows, order_by_pred) query_problem_json[rule.name] = build_rule_query( ProblemTemplateJSON, database, rule, schema, max_rows, order_by_pred) logger.error('sql_audit_analysis begin run total') flag, total_result = run_batch_sql(database, query_total_json, schema) if not flag: logger.error(total_result) raise build_exception_from_java(total_result) logger.error('sql_audit_analysis begin run detail') flag, detail_result = run_batch_sql(database, query_detail_json, schema) if not flag: logger.error(detail_result) raise build_exception_from_java(detail_result) logger.error('sql_audit_analysis begin run problem') flag, problem_result = run_batch_sql(database, query_problem_json, schema) if not flag: logger.error(problem_result) raise build_exception_from_java(problem_result) logger.error('sql_audit_analysis begin build result') return build_audit_result(database, rule_list, total_result, problem_result, detail_result, audit_job) except Exception as e: logger.error(str(e))
def get_sql_text(database, sql_id): sql_text = '' db_type = database.db_type model = ASH_DICT.get(db_type) obj = (((model.objects.filter(database=database)).filter( sql_id=sql_id)).filter( sql_text__isnull=False)).order_by('-created_at').first() if obj: return (obj.sql_text, obj.db_name) if sql_id != 'null': if db_type == 'oracle': instance_id_list = database.instance_id_list query_sqltext_realtime = f'''select sql_fulltext SQL_TEXT from gv$sql where inst_id in ({instance_id_list}) and sql_id= '{sql_id}' and rownum=1''' query_sqltext_hist = f'''select SQL_TEXT from DBA_HIST_SQLTEXT where sql_id='{sql_id}' and rownum=1''' flag, sql_text = run_sql(database, query_sqltext_realtime) if not flag: raise build_exception_from_java(sql_text) else: if not sql_text: flag, sql_text = run_sql(database, query_sqltext_hist) if not flag: raise build_exception_from_java(sql_text) return (sql_text[0].get('SQL_TEXT') if sql_text else '', None) if db_type == 'sqlserver': query = f''' select top 1 TEXT as SQL_TEXT from sys.dm_exec_sql_text(cast('' as xml).value('xs:hexBinary("{sql_id}")', 'varbinary(max)'))''' flag, sql_text = run_sql(database, query) if not flag: raise build_exception_from_java(sql_text) return (sql_text[0].get('SQL_TEXT') if sql_text else database.db_name, None) if db_type == 'db2': query = f''' select stmt_text SQL_TEXT from TABLE(MON_GET_PKG_CACHE_STMT(null, null, null, -2)) cache where EXECUTABLE_ID = x'{sql_id}' fetch first 1 rows only ''' flag, sql_text = run_sql(database, query) if not flag: raise build_exception_from_java(sql_text) return (sql_text[0].get('SQL_TEXT') if sql_text else database.db_name, None) return ('', None)
def get_table_rows(database): try: rows_query = Rows_Query.get(database.db_type) rows_data = [] if database.db_type != 'sqlserver': flag, rows_data = run_sql(database, rows_query) if not flag: raise build_exception_from_java(rows_data) else: rows_data = sqlserver_rows_data(database) table_rows_save_list = [] created_at = datetime.now().replace(microsecond=0) for r in rows_data: owner = r.get('OWNER') table_name = r.get('TABLE_NAME') rows = r.get('ROWS') table_rows_obj = Table_Rows(database=database, owner=owner, table_name=table_name, rows=rows, created_at=created_at) table_rows_save_list.append(table_rows_obj) Table_Rows.objects.bulk_create(table_rows_save_list) except Exception as err: print(err)
def diskgroup_warn(database): query = '\n SELECT\n NAME,\n STATE,\n round(TOTAL_MB/1024) TOTAL_GB,\n round((TOTAL_MB-FREE_MB)/1024) USED_GB,\n round((TOTAL_MB-FREE_MB)/TOTAL_MB*100) USED_PCT,\n OFFLINE_DISKS\n FROM\n V$ASM_DISKGROUP' flag, json_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data))) return json_data created_at = datetime.now().replace(microsecond=0) warn = WARN_ENUM.get(database.db_type).DiskGroup_Offline_Disks_Warn for x in json_data: options = {'name': x.get('NAME')} p = Performance(inst_id=database.db_name, name=warn.name, value=x.get('OFFLINE_DISKS'), created_at=created_at) customized_warn_scanner(warn, p, database, False, options) warn = WARN_ENUM.get(database.db_type).DiskGroup_Status_Warn for x in json_data: options = {'name': x.get('NAME')} p = Performance(inst_id=database.db_name, name=warn.name, value=x.get('STATE'), created_at=created_at) customized_warn_scanner(warn, p, database, False, options) warn = WARN_ENUM.get(database.db_type).DiskGroup_Used_Percent_Warn for x in json_data: options = {'name':x.get('NAME'), 'total':x.get('TOTAL_GB'), 'used':x.get('USED_GB'), 'used_pct':x.get('USED_PCT')} p = Performance(inst_id=database.db_name, name=warn.name, value=x.get('STATE'), created_at=created_at) customized_warn_scanner(warn, p, database, False, options)
def sqlserver_performance(database): query = "\nselect COUNTER_NAME,CNTR_VALUE from sys.dm_os_performance_counters where\n(object_name like '%sql statistics%' and counter_name = 'batch requests/sec') or\n(object_name like '%sql statistics%' and counter_name = 'sql compilations/sec') or\n(object_name like '%sql statistics%' and counter_name = 'sql re-compilations/sec') or\n(object_name like '%buffer manager%' and counter_name = 'lazy writes/sec') or\n(object_name like '%buffer manager%' and counter_name = 'page life expectancy') or\n(object_name like '%memory manager%' and counter_name = 'connection memory (kb)') or\n(object_name like '%memory manager%' and counter_name = 'memory grants pending') or\n(object_name like '%memory manager%' and counter_name = 'sql cache memory (kb)') or\n(object_name like '%memory manager%' and counter_name = 'target server memory (kb)') or\n(object_name like '%memory manager%' and counter_name = 'total server memory (kb)') or\n(object_name like '%access methods%' and counter_name = 'full scans/sec') or\n(object_name like '%access methods%' and counter_name = 'forwarded records/sec') or\n(object_name like '%access methods%' and counter_name = 'mixed page allocations/sec') or\n(object_name like '%access methods%' and counter_name = 'page splits/sec') or\n(object_name like '%access methods%' and counter_name = 'table lock escalations/sec') or\n(object_name like '%general statistics%' and counter_name = 'logins/sec') or\n(object_name like '%general statistics%' and counter_name = 'logouts/sec') or\n(object_name like '%general statistics%' and counter_name = 'user connections') or\n(object_name like '%general statistics%' and counter_name = 'processes blocked') or\n(object_name like '%latches%' and counter_name = 'latch waits/sec') or\n(object_name like '%latches%' and counter_name = 'average latch wait time (ms)') or\n(object_name like '%access methods%' and counter_name = 'workfiles created/sec') or\n(object_name like '%access methods%' and counter_name = 'worktables created/sec') or\n(object_name like '%general statistics%' and counter_name = 'active temp tables') or\n(object_name like '%general statistics%' and counter_name = 'temp tables creation rate') or\n(object_name like '%general statistics%' and counter_name = 'temp tables for destruction') or\n(object_name like '%databases%' and counter_name ='active transactions' and instance_name = '_Total') or\n(object_name like '%databases%' and counter_name ='Transactions/sec' and instance_name = '_Total') or\n(object_name like '%databases%' and counter_name ='log flushes/sec' and instance_name = '_Total') or\n(object_name like '%databases%' and counter_name ='cache hit ratio' and instance_name = '_Total') or\n(object_name like '%SQLServer:Locks%' and counter_name like '%Lock%' and instance_name = '_Total')" match_patern = re.compile('/sec', re.IGNORECASE) date_current = get_10s_time() flag, json_data_current = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data_current))) return key = str(database.id) + ':performance' date_key = str(database.id) + ':performance_date' json_data_str_prev = redis.get(key) date_prev = redis.get(date_key) keys = ['COUNTER_NAME', 'CNTR_VALUE'] redis.set(key, json.dumps(json_data_current)) redis.set(date_key, str(date_current)) if json_data_str_prev and date_prev and ( date_current - to_date(date_prev)).total_seconds() < MAX_INTERVAL: json_data_prev = json.loads(json_data_str_prev) for idx, obj in enumerate(json_data_current): name = obj.get(keys[0]) value = obj.get(keys[1]) p = Performance() p.name = name p.database = database p.created_at = date_current if re.search(match_patern, name): p.value = round( (float(value) - float(json_data_prev[idx].get(keys[1]))) / INTERVAL, 1) else: p.value = float(value) p.save()
def get_sqlmon(database): query_sqlmon = ( "\n select a.*,dbms_sqltune.report_sql_monitor(type=>'{}', sql_id=>a.sql_id, sql_exec_id=>a.sql_exec_id, report_level=>'ALL') SQLMON\n from (select\n KEY, STATUS,SQL_ID,round((LAST_REFRESH_TIME-SQL_EXEC_START)*24*3600) ELAPSED_TIME,\n round(ELAPSED_TIME/1e6) DB_TIME,round(CPU_TIME/1e6) DB_CPU,\n SQL_EXEC_ID,to_char(sql_exec_start,'YYYY-MM-DD HH24:MI:SS') SQL_EXEC_START,\n SQL_PLAN_HASH_VALUE,INST_ID, USERNAME,\n SQL_TEXT\n from Gv$sql_Monitor\n where --(LAST_REFRESH_TIME-SQL_EXEC_START)*24*3600>60\n sql_plan_hash_value >0 and\n status like 'DONE%'\n and LAST_REFRESH_TIME>=sysdate - 600/3600/24\n and LAST_REFRESH_TIME<=sysdate\n and sql_text is not null\n order by elapsed_time desc\n ) a where rownum<={} " ).format(database.sqlmon_format, database.num_sqlmon_per_minute) query_sqlmon_plan = "select\n INST_ID,\n STATUS,\n to_char(FIRST_REFRESH_TIME,'YYYY-MM-DD HH24:MI:SS') FIRST_REFRESH_TIME,\n to_char(LAST_REFRESH_TIME,'YYYY-MM-DD HH24:MI:SS') LAST_REFRESH_TIME,\n SID,\n SQL_ID,\n to_char(SQL_EXEC_START,'YYYY-MM-DD HH24:MI:SS') SQL_EXEC_START,\n SQL_EXEC_ID,\n SQL_PLAN_HASH_VALUE,\n PLAN_PARENT_ID,\n PLAN_LINE_ID,\n PLAN_OPERATION,\n PLAN_OPTIONS,\n PLAN_OBJECT_OWNER,\n PLAN_OBJECT_NAME,\n PLAN_OBJECT_TYPE,\n PLAN_DEPTH,\n PLAN_POSITION,\n PLAN_COST,\n PLAN_CARDINALITY,\n PLAN_TEMP_SPACE,\n STARTS,\n OUTPUT_ROWS,\n PHYSICAL_READ_REQUESTS,\n PHYSICAL_READ_BYTES,\n PHYSICAL_WRITE_REQUESTS,\n PHYSICAL_WRITE_BYTES\nfrom gv$sql_plan_monitor\nwhere\n key in ({})" flag, sqlmon_data = run_sql(database, query_sqlmon) if not flag: print(str(build_exception_from_java(sqlmon_data))) return sqlmon_data sqlmon_time = datetime.now().replace(microsecond=0) for x in sqlmon_data: m = SQLMON() m.inst_id = x.get('INST_ID') m.sql_id = x.get('SQL_ID') m.status = x.get('STATUS') m.username = x.get('USERNAME') m.elapsed_time = x.get('ELAPSED_TIME') m.db_time = x.get('DB_TIME') m.db_cpu = x.get('DB_CPU') m.sql_exec_id = x.get('SQL_EXEC_ID') m.sql_exec_start = x.get('SQL_EXEC_START') m.sql_plan_hash_value = x.get('SQL_PLAN_HASH_VALUE') m.sql_text = x.get('SQL_TEXT') m.sqlmon = x.get('SQLMON') m.database = database m.created_at = sqlmon_time m.save()
def get_space(database): query = Space_Query.get(database.db_type) flag, space_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(space_data))) return created_at = datetime.now().replace(microsecond=0) space_detail = Space_Detail.objects.update_or_create(database=database, defaults={'detail':space_data, 'created_at':created_at}) for x in space_data: space = Space() space.database = database space.name = x.get('TABLESPACE_NAME') space.total_mb = x.get('TOTAL_MB') space.free = x.get('FREE') space.used = x.get('USED') space.type = x.get('CONTENTS') space.used_pct = x.get('USED_PCT') space.created_at = created_at space.save() options = {'name':x.get('TABLESPACE_NAME'), 'total':x.get('TOTAL_MB'), 'used':x.get('USED'), 'used_pct':x.get('USED_PCT')} if database.db_type not in ('mysql', 'sqlserver'): warn = WARN_ENUM.get(database.db_type).Tablespace_Warn customized_warn_scanner(warn, space, database, False, options)
def sqlserver_rows_data(database): query = "\n SELECT NAME FROM master.dbo.sysdatabases where name not in ('master', 'tempdb', 'model', 'msdb')" flag, json_data = run_sql(database, query) if not flag: raise build_exception_from_java(json_data) db_list = [x.get('NAME') for x in json_data] schema_data = [] query = Rows_Query.get(database.db_type) for db in db_list: flag, json_data = run_sql(database, query, db) if not flag: raise build_exception_from_java(json_data) else: schema_data = schema_data + json_data return schema_data
def get_mysql_summary(pk): conn = Database.objects.get(pk=pk) db_type = conn.db_type query = reprocess_query(Space_Total_Query, {'pk': pk}) total_space = execute_return_json(query) if not total_space: get_space(conn) total_space = execute_return_json(query) query = SummaryQuery.get(db_type) flag, json_data = run_batch_sql(conn, query) if not flag: raise build_exception_from_java(json_data) database_json = { x.get('Variable_name'): x.get('Value') for x in json_data.get('database') if x.get('Variable_name') in ('version', 'version_comment', 'version_compile_machine', 'version_compile_os', 'default_storage_engine', 'general_log', 'log_bin', 'slow_query_log') } memory_json = { x.get('Variable_name'): x.get('Value') for x in json_data.get('database') if x.get('Variable_name') in ('innodb_buffer_pool_size', 'join_buffer_size', 'key_buffer_size', 'query_cache_size', 'sort_buffer_size', 'thread_cache_size') } summary_data = { 'space': total_space, 'database': database_json, 'memory': memory_json } return summary_data
def oracle_analysis(audit_job): try: schema, max_rows, order_by_pred = audit_job.schema, audit_job.max_rows, audit_job.order_by rule_list = [] database = audit_job.database strategy_dict = audit_job.strategy audit_rule_queryset = ((Audit_Rule.objects.filter( database=database)).filter(enabled=True)).filter( is_static_rule=False) for k, v in strategy_dict.items(): rule_list += list((audit_rule_queryset.filter( audit_type=k)).filter(target__in=v)) logger.error('sql_audit_analysis begin build query') query_total_json = build_total_query( TotalTemplateJSON.get(database.db_type), database, schema) query_detail_json = {} query_problem_json = {} for rule in set(rule_list): query_detail_json[rule.name] = build_rule_query( DetailTemplateJson, database, rule, schema, max_rows, order_by_pred) query_problem_json[rule.name] = build_rule_query( ProblemTemplateJSON, database, rule, schema, max_rows, order_by_pred) logger.error('sql_audit_analysis begin run total') flag, total_result = run_batch_sql(database, query_total_json) if not flag: logger.error(total_result) raise build_exception_from_java(total_result) logger.error('sql_audit_analysis begin run detail') flag, detail_result = run_batch_sql(database, query_detail_json) if not flag: logger.error(detail_result) raise build_exception_from_java(detail_result) logger.error('sql_audit_analysis begin run problem') flag, problem_result = run_batch_sql(database, query_problem_json) if not flag: logger.error(problem_result) raise build_exception_from_java(problem_result) logger.error('sql_audit_analysis begin build result') return build_audit_result(database, rule_list, total_result, problem_result, detail_result, audit_job) except Exception as e: logger.error(str(e))
def get_object_type(database, owner, object_name, options, db_name=None): query = Object_Type_Query.get(database.db_type).format(**options) flag, type_data = run_sql(database, query, db_name) if not flag: raise build_exception_from_java(type_data) if type_data: return type_data[0].get('OBJECT_TYPE') else: return type_data
def create_snapshot(pk): query = 'begin sys.dbms_workload_repository.create_snapshot; end;' try: database = Database.objects.get(pk=pk) json_data = [] flag, json_data = run_plsql(database, query) if not flag: raise build_exception_from_java(json_data) flag, json_data = run_sql(database, Max_Snapshot_Query) if not flag: raise build_exception_from_java(json_data) if json_data: return json_data[0] return {} except ObjectDoesNotExist: return {'error_message': ''} except Exception as err: return {'error_message': str(err)}
def oracle_activity(database): if database.version == '10': query = "\n select /*+ leading(b a)*/\n a.inst_id,\n SESSION_ID sid,\n SESSION_SERIAL# serial,\n SESSION_ID || ',' || SESSION_SERIAL# || '@'|| a.inst_id SESSION_ID,\n (select username from dba_users u where u.user_id = a.user_id) username,\n '' machine,\n program,\n --status,\n case SQL_OPCODE\n when 1 then 'CREATE TABLE'\n when 2 then 'INSERT'\n when 3 then 'SELECT'\n when 6 then 'UPDATE'\n when 7 then 'DELETE'\n when 9 then 'CREATE INDEX'\n when 11 then 'ALTER INDEX'\n when 15 then 'ALTER INDEX' else 'Others' end command,\n SQL_ID,\n SQL_PLAN_HASH_VALUE,\n nvl(event, 'ON CPU') event,\n p1,\n p2,\n p3,\n nvl(wait_class, 'ON CPU') wait_class ,\n module,\n action,\n (select name from V$ACTIVE_SERVICES s where s.NAME_HASH = a.SERVICE_HASH) service_name,\n '' plsql_object_name,\n '' plsql_entry_object_name,\n BLOCKING_SESSION,\n BLOCKING_SESSION_SERIAL# BLOCKING_SESSION_SERIAL,\n null SQL_PLAN_LINE_ID,\n '' SQL_PLAN_OPERATION,\n SESSION_TYPE,\n (select SQL_TEXT from v$sql b where b.sql_id = a.sql_id and rownum =1) SQL_TEXT\n from gv$ACTIVE_SESSION_HISTORY a\n where a.SAMPLE_TIME between systimestamp - numtodsinterval(2,'SECOND') and systimestamp - numtodsinterval(1,'SECOND')\n and nvl(a.wait_class,'ON CPU') <> 'Idle'" else: if database.version >= '11': query = "\n select /*+ leading(b a)*/\n a.inst_id,\n SESSION_ID sid,\n SESSION_SERIAL# serial,\n SESSION_ID || ',' || SESSION_SERIAL# || '@'|| a.inst_id SESSION_ID,\n round((cast(sample_time as date)-a.sql_exec_start)*24*3600) SQL_ELAPSED_TIME,\n (select username from dba_users u where u.user_id = a.user_id) username,\n machine,\n program,\n --status,\n case SQL_OPCODE\n when 1 then 'CREATE TABLE'\n when 2 then 'INSERT'\n when 3 then 'SELECT'\n when 6 then 'UPDATE'\n when 7 then 'DELETE'\n when 9 then 'CREATE INDEX'\n when 11 then 'ALTER INDEX'\n when 15 then 'ALTER INDEX' else 'Others' end command,\n SQL_ID,\n SQL_PLAN_HASH_VALUE,\n nvl(event, 'ON CPU') event,\n p1,\n p2,\n p3,\n nvl(wait_class, 'ON CPU') wait_class,\n module,\n action,\n (select name from V$ACTIVE_SERVICES s where s.NAME_HASH = a.SERVICE_HASH) SERVER_NAME ,\n -- (select object_name from dba_objects s where s.object_id = a.PLSQL_OBJECT_ID) plsql_object_name,\n -- (select object_name from dba_objects s where s.object_id = a.PLSQL_ENTRY_OBJECT_ID) plsql_entry_object_name,\n '' plsql_object_name,\n '' plsql_entry_object_name,\n BLOCKING_SESSION,\n BLOCKING_SESSION_SERIAL# BLOCKING_SESSION_SERIAL,\n SQL_PLAN_LINE_ID,\n SQL_PLAN_OPERATION || ' ' || SQL_PLAN_OPTIONS SQL_PLAN_OPERATION,\n SESSION_TYPE,\n (select sql_fulltext from v$sql b where b.sql_id = a.sql_id and rownum =1) SQL_TEXT\n from gv$ACTIVE_SESSION_HISTORY a\n where a.SAMPLE_TIME between systimestamp - numtodsinterval(2,'SECOND') and systimestamp - numtodsinterval(1,'SECOND')\n and nvl(a.wait_class,'ON CPU') <> 'Idle'\n " ash_date = get_10s_time() flag, json_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data))) return for x in json_data: ash = Oracle_ASH() ash.inst_id = x.get('INST_ID') ash.sid = x.get('SID') ash.serial = x.get('SERIAL') ash.username = x.get('USERNAME') ash.db_name = x.get('USERNAME') ash.machine = x.get('MACHINE') ash.program = x.get('PROGRAM') ash.status = x.get('STATUS') ash.command = x.get('COMMAND') ash.sql_hash_value = x.get('SQL_HASH_VALUE') ash.sql_id = x.get('SQL_ID') ash.sql_text = x.get('SQL_TEXT') ash.sql_plan_hash_value = x.get('SQL_PLAN_HASH_VALUE') ash.event = x.get('EVENT') ash.p1 = x.get('P1') ash.p2 = x.get('P2') ash.p3 = x.get('P3') ash.wait_class = x.get('WAIT_CLASS') ash.module = x.get('MODULE') ash.action = x.get('ACTION') ash.service_name = x.get('SERVICE_NAME') ash.plsql_object_name = x.get('PLSQL_OBJECT_NAME') ash.plsql_entry_object_name = x.get('PLSQL_ENTRY_OBJECT_NAME') ash.blocking_session = x.get('BLOCKING_SESSION') ash.blocking_session_serial = x.get('BLOCKING_SESSION_SERIAL') ash.sql_plan_line_id = x.get('SQL_PLAN_LINE_ID') ash.sql_plan_operation = x.get('SQL_PLAN_OPERATION') ash.session_type = x.get('SESSION_TYPE') ash.session_id = x.get('SESSION_ID') ash.sql_elapsed_time = x.get('SQL_ELAPSED_TIME') ash.created_at = ash_date ash.database = database try: ash.save() except Exception as e: logger.error(str(e)) warn = WARN_ENUM.get(database.db_type).Active_Session_Warn p = Performance(inst_id=database.db_name, name=warn.name, value=len(json_data), created_at=ash_date) customized_warn_scanner(warn, p, database, False)
def accept_sql_profile(pk, action): try: database = Database.objects.get(pk=pk) flag, result = run_plsql(database, action) if not flag: raise build_exception_from_java(result) return {'OK': True} except ObjectDoesNotExist: return {'error_message': ''} except Exception as err: return {'error_message': str(err)}
def get_mysql_session_detail(database, session_id): detail_format = get_default_detail_format() detai_query = f'''SELECT * FROM information_schema.processlist WHERE id = {session_id}''' flag, json_data = run_sql(database, detai_query) if not flag: raise build_exception_from_java(json_data) if json_data: detail_format['detail'][''] = json_data[0] return detail_format
def all_sessions(pk): query = {'oracle':"\n select\n s.sid || ',' || s.serial# || '@' || s.inst_id session_id,\n s.username,\n s.status,\n s.sql_id,\n case when s.state = 'WAITING' then s.event else 'ON CPU' end event,\n machine,\n s.program,\n to_char(s.logon_time,'YYYY-MON-DD HH24:MI') logon_time,\n round(Value / 1024 / 1024,1) PGA_MB\n from\n gv$session s, V$sesstat St, V$statname Sn\n Where St.Sid = s.Sid\n And St.Statistic# = Sn.Statistic#\n And Sn.Name Like 'session pga memory'", 'mysql':'\n SELECT * FROM\n information_schema.processlist', 'db2':'select agent_id, db_name, appl_name, authid, appl_id,\n appl_status, client_nname MACHINE\n FROM SYSIBMADM.APPLICATIONS', 'sqlserver':'\n SELECT\n ses.SESSION_ID,\n (select name from master..sysdatabases where dbid = req.database_id) DB_NAME,\n ses.LOGIN_NAME,\n CONVERT(VARCHAR(24), ses.LOGIN_TIME, 120) LOGON_TIME,\n ses.HOST_NAME,\n ses.PROGRAM_NAME,\n --application\n ses.status STATUS,\n --current request\n req.STATUS REQ_STATUS,\n CONVERT(VARCHAR(24), req.start_time, 120) START_TIME,\n req.ROW_COUNT REQ_ROW_COUNT,\n con.CLIENT_NET_ADDRESS,\n substring(sys.fn_sqlvarbasetostr(req.sql_handle),3,1000) SQL_ID\n FROM sys.dm_exec_sessions ses\n inner join sys.dm_exec_connections con on ses.session_id = con.session_id\n left join sys.dm_exec_requests req on req.session_id = ses.session_id\n outer APPLY sys.dm_exec_sql_text(sql_handle) AS sqltext'} database = Database.objects.get(pk=pk) detail_query = query.get(database.db_type) flag, json_data = run_sql(database, detail_query) if not flag: raise build_exception_from_java(json_data) return json_data
def get_oracle_session_detail(database, session_id): detail_format = get_default_detail_format() import re prog = re.compile('([0-9]+),([0-9]+)@([0-9]+)') m = prog.search(session_id) sid, serial, inst_id = (1, 2, 3) if m: sid, serial, inst_id = m.group(1), m.group(2), m.group(3) detail_format['instance_id'] = inst_id options = {'inst_id':inst_id, 'sid':sid, 'serial':serial} detai_query = Detai_Query.format(**options) cursur_query = Old_Cursor_Query.format(**options) query = {'detail':detai_query, 'cursor':cursur_query} flag, json_data = run_batch_sql(database, query) if not flag: raise build_exception_from_java(json_data) detail_data = json_data.get('detail') cursor_data = json_data.get('cursor') if detail_data == None: detai_query = Detai_Query_Without_IP.format(**options) flag, json_data = run_sql(database, detai_query) if not flag: raise build_exception_from_java(json_data) detail_data = json_data if detail_data: detail_data = detail_data[0] detail_info = {u'\u8fde\u63a5\u4fe1\u606f':{x:detail_data[x] for x in ('SID', 'SERIAL#', 'STATUS', 'USERNAME', 'SPID', 'LOGON_TIME', 'SERVER') if x in detail_data}, u'\u5ba2\u6237\u7aef\u4fe1\u606f':{x:detail_data[x] for x in ('OSUSER', 'PROCESS', 'MACHINE', 'IP') if x in detail_data}, u'\u5e94\u7528\u4fe1\u606f':{x:detail_data[x] for x in ('SQL_ID', 'PREV_SQL_ID', 'LAST_CALL_ET', 'PROGRAM', 'MODULE', 'ACTION', 'SERVICE_NAME') if x in detail_data}, u'\u7b49\u5f85\u4fe1\u606f':{x:detail_data[x] for x in ('EVENT', 'WAIT_CLASS', 'P1', 'P2', 'P3') if x in detail_data}, u'\u963b\u585e\u4f1a\u8bdd':{x:detail_data[x] for x in ('BLOCKING_INSTANCE', 'BLOCKING_SESSION') if x in detail_data}, u'\u4e8b\u52a1\u4fe1\u606f':{x:detail_data[x] for x in ('XIDUSN', 'XIDSLOT', 'XIDSQN', 'TRX_STARTED', 'USED_UBLK', 'USED_UREC') if x in detail_data}} detail_format['detail'] = detail_info detail_format['cursor'] = cursor_data return detail_format
def execute_sqltuning_task(pk, sql_id, timeout): try: database = Database.objects.get(pk=pk) import re dt_postfix = datetime.now().strftime('%Y-%m-%d_%H:%M') taskname = sql_id + '_' + dt_postfix query_submit_job = f''' declare a varchar2(100); begin BEGIN dbms_sqltune.execute_tuning_task('%s'); EXCEPTION when others then null; end; a := dbms_sqltune.create_tuning_task( task_name=>'{taskname}', description=>'{taskname}', scope=>dbms_sqltune.scope_comprehensive, time_limit=>{timeout}, sql_id=>'{sql_id}' ); dbms_sqltune.execute_tuning_task('{taskname}'); end;''' query_report = {'report':f'''select dbms_sqltune.report_tuning_task('{taskname}') report FROM dual''', 'benefit':f''' select hint, benefit from ( select case when attr5 like 'OPT_ESTIMATE%' then cast(attr5 as varchar2(4000)) when attr1 like 'OPT_ESTIMATE%' then attr1 end hint,benefit from dba_advisor_recommendations t join dba_advisor_rationale r using (task_id,rec_id) where t.task_name = '{taskname}' and t.type='SQL PROFILE' --and r.message='This attribute adjusts optimizer estimates.' ) where hint is not null order by to_number(regexp_replace(hint,'^.*=([0-9.]+)[^0-9]*$','\1'))'''} flag, result = run_plsql(database, query_submit_job) if not flag: raise build_exception_from_java(result) flag, sqltune_data = run_batch_sql(database, query_report) accept_sql_profile = False sql_list = None report_data = sqltune_data.get('report') sqltune_report = report_data[0].get('REPORT') if report_data else '' if re.search('accept_sql_profile', sqltune_report, re.IGNORECASE): sql_list = re.findall('execute dbms_sqltune.accept_sql_profile[^;]+;', sqltune_report, re.IGNORECASE) for idx, val in enumerate(sql_list): sql_list[idx] = re.sub('$', '\n end;', re.sub('execute', 'begin \n', val)) accept_sql_profile = True sqltune_result = {'report':report_data, 'action':sql_list, 'accept_sql_profile':accept_sql_profile} return sqltune_result except ObjectDoesNotExist: return {'error_message': ''} except Exception as err: return {'error_message': str(err)}
def job_failure_warn(database): query = "\n select SCHEMA_USER OWNER, job || ' '|| what JOB_NAME, failures from dba_jobs where failures > 0\nunion all\nselect OWNER, JOB_NAME, count(*)\nFROM dba_scheduler_job_log\nwhere\nlog_date > sysdate - 1/24 and\nSTATUS != 'SUCCEEDED'\ngroup by OWNER, job_name" flag, json_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data))) return json_data created_at = datetime.now().replace(microsecond=0) warn = WARN_ENUM.get(database.db_type).Job_Warn for x in json_data: options = {'name':x.get('JOB_NAME'), 'schema':x.get('OWNER')} p = Performance(inst_id=database.db_name, name=warn.name, value=x.get('FAILURES'), created_at=created_at) customized_warn_scanner(warn, p, database, True, options)
def get_backup(pk): days = 7 query = { 'config': 'select * from v$rman_configuration', 'history': f'''select j.session_recid, --j.session_stamp, to_char(j.start_time, 'yyyy-mm-dd hh24:mi:ss') start_time, to_char(j.end_time, 'yyyy-mm-dd hh24:mi:ss') end_time, round((j.output_bytes/1024/1024),2) output_mbytes, j.status, j.input_type, decode(to_char(j.start_time, 'd'), 1, 'Sunday', 2, 'Monday', 3, 'Tuesday', 4, 'Wednesday', 5, 'Thursday', 6, 'Friday', 7, 'Saturday') WEEK, round(j.elapsed_seconds,-1) ELAPSED_TIME_SEC, j.TIME_TAKEN_DISPLAY, x.cf, x.df, x.i0, x.i1, x.l, ro.inst_id output_instance,x.device_type from V$RMAN_BACKUP_JOB_DETAILS j left outer join (select d.session_recid, d.session_stamp, sum(case when d.controlfile_included = 'YES' then d.pieces else 0 end) CF, sum(case when d.controlfile_included = 'NO' and d.backup_type||d.incremental_level = 'D' then d.pieces else 0 end) DF, sum(case when d.backup_type||d.incremental_level = 'D0' then d.pieces else 0 end) I0, sum(case when d.backup_type||d.incremental_level = 'I1' then d.pieces else 0 end) I1, sum(case when d.backup_type = 'L' then d.pieces else 0 end) L,d.device_type from V$BACKUP_SET_DETAILS d join V$BACKUP_SET s on s.set_stamp = d.set_stamp and s.set_count = d.set_count where s.input_file_scan_only = 'NO' group by d.session_recid, d.session_stamp,d.device_type) x on x.session_recid = j.session_recid and x.session_stamp = j.session_stamp left outer join (select o.session_recid, o.session_stamp, min(inst_id) inst_id from GV$RMAN_OUTPUT o group by o.session_recid, o.session_stamp) ro on ro.session_recid = j.session_recid and ro.session_stamp = j.session_stamp where j.start_time > trunc(sysdate)-{days} order by j.start_time''', 'long': 'select sid || \',\'|| serial# || \'@\' || inst_id as SESSION_ID,to_char(start_time, \'yyyy-mm-dd hh24:mi:ss\') start_time,ELAPSED_SECONDS,sofar,totalwork,\nopname, round(sofar/totalwork*100,-1) "PCT"\n from gv$session_longops\n where opname like \'RMAN:%\'\n and opname not like \'RMAN: aggregate%\'\n and totalwork!=0' } try: database = Database.objects.get(pk=pk) flag, json_data = run_batch_sql(database, query) if not flag: raise build_exception_from_java(json_data) else: return json_data except ObjectDoesNotExist: return {'error_message': ''} except Exception as err: return {'error_message': str(err)}
def plan_change_warn(database): query = '\nselect sql_id,\n round(max(elapsed_time/decode(executions,0,1,executions))/min(elapsed_time/decode(executions,0,1,executions))) DIFF,\n min(inst_id) INST_ID\nfrom\n gv$sql\nwhere elapsed_time > 0\ngroup by sql_id\nhaving count(distinct plan_hash_value) > 1' flag, json_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data))) return json_data created_at = datetime.now().replace(microsecond=0) warn = WARN_ENUM.get(database.db_type).Plan_Change_Warn for x in json_data: options = {'sql_id': x.get('SQL_ID')} p = Performance(inst_id=x.get('INST_ID'), name=warn.name, value=x.get('DIFF'), created_at=created_at) customized_warn_scanner(warn, p, database, True, options)
def object_change_warn(database): query = "\nselect object_name,owner, to_char(last_ddl_time, 'yyyy-mm-dd hh24:mi:ss') last_ddl_time\nfrom dba_objects\nwhere last_ddl_time > sysdate - 1/24\nand owner not in ('SCOTT','MGMT_VIEW','MDDATA','MDSYS','SI_INFORMTN_SCHEMA','ORDPLUGINS','ORDSYS','OLAPSYS','SYSMAN','ANONYMOUS','XDB','CTXSYS','EXFSYS','WMSYS','ORACLE_OCM','DBSNMP','TSMSYS','DMSYS','DIP','OUTLN','SYSTEM','SYS') " flag, json_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data))) return json_data created_at = datetime.now().replace(microsecond=0) warn = WARN_ENUM.get(database.db_type).DB_Object_Change_Warn for x in json_data: options = {'schema':x.get('OWNER'), 'object_name':x.get('OBJECT_NAME'), 'last_ddl_time':x.get('LAST_DDL_TIME')} p = Performance(inst_id=database.db_name, name=warn.name, value=1, created_at=created_at) customized_warn_scanner(warn, p, database, True, options)
def get_snapshot(pk, snapshot_limit=1000): try: database = Database.objects.get(pk=pk) json_data = [] query = Snapshot_Query.format(snapshot_limit) flag, json_data = run_sql(database, query) if not flag: raise build_exception_from_java(json_data) return json_data except ObjectDoesNotExist: return {'error_message': ''} except Exception as err: return {'error_message': str(err)}
def analysis_from_post(request): database_id = request.data.get('database_id') schema = request.data.get('schema', None) max_rows = request.data.get('max_rows', None) order_by_pred = request.data.get('order_by_pred', '') audit_result = {} try: database = Database.objects.get(pk=database_id) rule_list = ((Audit_Rule.objects.filter(database=database)).filter( enabled=True)).filter(is_static_rule=False) query_total_json = build_total_query(TotalTemplateJSON, database, schema) query_detail_json = {} query_problem_json = {} for rule in rule_list: query_detail_json[rule.name] = build_rule_query( DetailTemplateJson, database, rule, schema, max_rows, order_by_pred) query_problem_json[rule.name] = build_rule_query( ProblemTemplateJSON, database, rule, schema, max_rows, order_by_pred) flag, detail_result = run_batch_sql(database, query_detail_json) if not flag: raise build_exception_from_java(detail_result) flag, problem_result = run_batch_sql(database, query_problem_json) if not flag: raise build_exception_from_java(problem_result) flag, total_result = run_batch_sql(database, query_total_json) if not flag: raise build_exception_from_java(total_result) audit_result = build_audit_result(database, rule_list, total_result, problem_result, detail_result, None) collect_sql_text(database, schema) return Response(audit_result, status=status.HTTP_200_OK) except ObjectDoesNotExist: return Response({'error_message': ''}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def oracle_standby_warn(database): query = "\nSELECT a.thread#, b.last_seq, a.applied_seq, a. last_app_timestamp, b.last_seq-a.applied_seq ARC_DIFF, dest_name\nFROM\n (SELECT thread#, dest_name, MAX(sequence#) applied_seq, MAX(next_time) last_app_timestamp\n FROM gv$archived_log log,\n v$ARCHIVE_DEST dest WHERE log.applied = 'YES' and dest.dest_name is not null and log.dest_id = dest.dest_id GROUP BY dest.dest_name, thread#) a,\n (SELECT thread#, MAX (sequence#) last_seq FROM gv$archived_log GROUP BY thread#) b\nWHERE a.thread# = b.thread#" flag, json_data = run_sql(database, query) if not flag: print(str(build_exception_from_java(json_data))) return json_data created_at = datetime.now().replace(microsecond=0) warn = WARN_ENUM.get(database.db_type).Standby_Gap_Warn for x in json_data: options = {'name':x.get('DEST_NAME'), 'applied_seq':x.get('APPLIED_SEQ'), 'max_seq':x.get('LAST_SEQ'), 'thread':x.get('THREAD#')} p = Performance(inst_id=database.db_name, name=warn.name, value=x.get('ARC_DIFF'), created_at=created_at) customized_warn_scanner(warn, p, database, False, options)
def get_db2_session_detail(database, session_id): detail_format = get_default_detail_format() detai_query = f'''select agent_id, db_name, appl_name, authid, appl_id, appl_status,client_prdid, client_pid, client_platform,client_protocol, client_nname FROM SYSIBMADM.APPLICATIONS WHERE agent_id = {session_id}''' flag, json_data = run_sql(database, detai_query) if not flag: raise build_exception_from_java(json_data) if json_data: detail_data = json_data[0] detail_info = {u'\u8fde\u63a5\u4fe1\u606f':{x:detail_data[x] for x in detail_data if 'CLIENT' in x}, u'\u5ba2\u6237\u7aef\u4fe1\u606f':{x:detail_data[x] for x in detail_data if 'CLIENT' not in x}} detail_format['detail'] = detail_info return detail_format
def get_sqlserver_summary(pk): conn = Database.objects.get(pk=pk) db_type = conn.db_type query = reprocess_query(SQLServer_Space_Total_Query, {'pk': pk}) total_space = execute_return_json(query) if not total_space: get_space(conn) total_space = execute_return_json(query) query = SummaryQuery.get(db_type) flag, json_data = run_batch_sql(conn, query) if not flag: raise build_exception_from_java(json_data) space = {'space': total_space} summary_data = {**space, **json_data} return summary_data
def get_lock_session(pk, time_span=None): try: database = Database.objects.get(pk=pk) db_type = database.db_type json_data = {} command_list = [] if time_span == 'realtime': query = get_lock_query(database) flag, json_data = run_batch_sql(database, query) if not flag: raise build_exception_from_java(json_data) cmd_data = get_unlock_data(database) command_list = [x.get('CMD') for x in cmd_data] else: query_lock = Lock_History_Local_Query[ database.db_type].value.format(pk, time_span) query_trans = Local_Transaction_Query.format(pk, time_span) json_data['lock'] = execute_return_json(query_lock) trans = execute_return_json(query_trans) if trans: json_data['transaction'] = trans[0].get('TRANSACTIONS') return { 'lock': { 'blocker_header': Blocker_Header[database.db_type].value, 'waiter_header': Waiter_Header[database.db_type].value, 'blocker_id': ['B_BLOCKER', 'W_WAITER'], 'waiter_id': 'W_WAITER', 'session_detail_keys': ['B_BLOCKER', 'W_WAITER'], 'sql_detail_keys': ['B_SQL_ID', 'B_PREV_SQL_ID', 'W_SQL_ID', 'W_PREV_SQL_ID'], 'data': json_data.get('lock'), 'advice': command_list }, 'transaction': { 'data': json_data.get('transaction'), 'sql_detail_keys': ['SQL_ID'], 'session_detail_keys': ['SESSION_ID'] } } except ObjectDoesNotExist: return {'error_message': ''} except Exception as err: return {'error_message': str(err)}
def db2_activity(database): padding_str = '_v97' if database.is_v97() else '' query1 = "\n SELECT distinct rtrim(app.db_name) DB_NAME,\n app.agent_id,\n app.appl_id,\n app.appl_name,\n app.appl_status,\n app.authid,\n t.activity_type,\n (select cast(p.stmt_text as varchar(2000)) from table(mon_get_pkg_cache_stmt(NULL, t.executable_id, NULL, -2)) as p FETCH FIRST 1 ROWS ONLY) stmt_text,\n hex(t.EXECUTABLE_ID) EXECUTABLE_ID,\n uow.ELAPSED_TIME_SEC,\n round(uow.TOTAL_CPU_TIME/1000000) TOTAL_CPU_TIME,\n uow.TOTAL_ROWS_READ,\n uow.TOTAL_ROWS_RETURNED\nFROM table(wlm_get_workload_occurrence_activities(NULL, -2)) as t,\n sysibmadm.applications app,\n SYSIBMADM.MON_CURRENT_UOW uow\nWHERE\n app.agent_id = t.application_handle\n and t.application_handle = uow.application_handle\n and app.appl_id != (values application_id())\n and app.appl_status not in ('CONNECTED',\n 'UOWWAIT')" query1_v97_base = "\n SELECT\n distinct rtrim(app.db_name) DB_NAME, app.agent_id, app.appl_id, app.appl_name, app.appl_status, app.authid,\n t.activity_type, cast(p.stmt_text as varchar(2000)) stmt_text, hex(t.EXECUTABLE_ID) EXECUTABLE_ID\n FROM table(wlm_get_workload_occurrence_activities_v97(NULL, -2)) as t,\n table(mon_get_pkg_cache_stmt(NULL, NULL, NULL, -2)) as p,\n sysibmadm.applications app\n WHERE t.executable_id = p.executable_id\n and app.agent_id = t.application_handle\n and app.appl_id != (values application_id())\n and app.appl_status not in ('CONNECTED','UOWWAIT')" query1_v97 = "\n SELECT distinct rtrim(app.db_name) DB_NAME,\n app.agent_id,\n app.appl_id,\n app.appl_name,\n app.appl_status,\n app.authid,\n t.activity_type,\n (select cast(p.stmt_text as varchar(2000)) from table(mon_get_pkg_cache_stmt(NULL, t.executable_id, NULL, -2)) as p FETCH FIRST 1 ROWS ONLY) stmt_text,\n hex(t.EXECUTABLE_ID) EXECUTABLE_ID,\n uow.ELAPSED_TIME_SEC,\n round(uow.TOTAL_CPU_TIME/1000000) TOTAL_CPU_TIME,\n uow.TOTAL_ROWS_READ,\n uow.TOTAL_ROWS_RETURNED\nFROM table(wlm_get_workload_occurrence_activities_v97(NULL, -2)) as t,\n sysibmadm.applications app,\n SYSIBMADM.MON_CURRENT_UOW uow\nWHERE\n app.agent_id = t.application_handle\n and t.application_handle = uow.application_handle\n and app.appl_id != (values application_id())\n and app.appl_status not in ('CONNECTED',\n 'UOWWAIT')" query2 = "\n SELECT\n app.db_name, app.agent_id, app.appl_id, app.appl_name, app.appl_status, app.authid,\n t.activity_type, (select VALUE from table(WLM_GET_ACTIVITY_DETAILS(t.application_handle,t.uow_id,t.activity_id,-2)) where name = 'STMT_TEXT') STMT_TEXT\n FROM table(wlm_get_workload_occurrence_activities(cast(null as bigint), -1)) as t,\n sysibmadm.applications app\n WHERE app.agent_id = t.application_handle\n and app.appl_id != (values application_id())\n and app.appl_status not in ('CONNECTED','UOWWAIT')" ash_date = get_10s_time() if not database.is_v95_base(): if database.is_v97(): flag, json_data = run_sql(database, query1_v97) if not flag: flag, json_data = run_sql(database, query1_v97_base) else: flag, json_data = run_sql(database, query1) else: flag, json_data = run_sql(database, query2) if not flag: print(str(build_exception_from_java(json_data))) return for x in json_data: ash = DB2_ASH() ash.db_name = x.get('AUTHID').strip() ash.session_id = x.get('AGENT_ID') ash.machine = x.get('APPL_ID') ash.program = x.get('APPL_NAME') ash.appl_status = x.get('APPL_STATUS') ash.username = x.get('AUTHID').strip() ash.command = x.get('ACTIVITY_TYPE') ash.sql_text = x.get('STMT_TEXT') if not database.is_v95_base(): ash.sql_id = x.get('EXECUTABLE_ID') ash.sql_elapsed_time = x.get('ELAPSED_TIME_SEC') ash.total_cpu_time = x.get('TOTAL_CPU_TIME') ash.rows_read = x.get('TOTAL_ROWS_READ') ash.rows_returned = x.get('TOTAL_ROWS_RETURNED') else: ash.sql_id = gen_sql_id( x.get('STMT_TEXT')) if x.get('STMT_TEXT') else None ash.created_at = ash_date ash.database = database ash.save() warn = WARN_ENUM.get(database.db_type).Active_Session_Warn p = Performance(inst_id=database.db_name, name=warn.name, value=len(json_data), created_at=ash_date) customized_warn_scanner(warn, p, database, False)