Пример #1
0
def sqlserver_analysis(audit_job):
    try:
        schema, max_rows, order_by_pred, time_span = (audit_job.schema,
                                                      audit_job.max_rows,
                                                      audit_job.order_by,
                                                      audit_job.time_span)
        if time_span:
            begin_time, end_time = datetime.now(), datetime.now() - (timedelta(
                hours=1))
        else:
            begin_time, end_time = audit_job.snapshot_begin_time, audit_job.snapshot_end_time
        rule_list = []
        database = audit_job.database
        strategy_dict = audit_job.strategy
        audit_rule_queryset = (Audit_Rule.objects.filter(
            database=database)).filter(enabled=True)
        for k, v in strategy_dict.items():
            rule_list += list((audit_rule_queryset.filter(
                audit_type=k)).filter(target__in=v))

        logger.error('sql_audit_analysis begin build query')
        query_total_json = build_total_query(
            TotalTemplateJSON.get(database.db_type), database, schema)
        query_detail_json = {}
        query_problem_json = {}
        object_rules_list = [
            rule for rule in rule_list if rule.audit_type == ''
        ]
        for rule in set(object_rules_list):
            query_detail_json[rule.name] = build_rule_query(
                DetailTemplateJson, database, rule, schema, max_rows,
                order_by_pred)
            query_problem_json[rule.name] = build_rule_query(
                ProblemTemplateJSON, database, rule, schema, max_rows,
                order_by_pred)

        logger.error('sql_audit_analysis begin run total')
        flag, total_result = run_batch_sql(database, query_total_json, schema)
        if not flag:
            logger.error(total_result)
            raise build_exception_from_java(total_result)
        logger.error('sql_audit_analysis begin run detail')
        flag, detail_result = run_batch_sql(database, query_detail_json,
                                            schema)
        if not flag:
            logger.error(detail_result)
            raise build_exception_from_java(detail_result)
        logger.error('sql_audit_analysis begin run problem')
        flag, problem_result = run_batch_sql(database, query_problem_json,
                                             schema)
        if not flag:
            logger.error(problem_result)
            raise build_exception_from_java(problem_result)
        logger.error('sql_audit_analysis begin build result')
        return build_audit_result(database, rule_list, total_result,
                                  problem_result, detail_result, audit_job)
    except Exception as e:
        logger.error(str(e))
Пример #2
0
def get_mysql_summary(pk):
    conn = Database.objects.get(pk=pk)
    db_type = conn.db_type
    query = reprocess_query(Space_Total_Query, {'pk': pk})
    total_space = execute_return_json(query)
    if not total_space:
        get_space(conn)
        total_space = execute_return_json(query)
    query = SummaryQuery.get(db_type)
    flag, json_data = run_batch_sql(conn, query)
    if not flag:
        raise build_exception_from_java(json_data)
    database_json = {
        x.get('Variable_name'): x.get('Value')
        for x in json_data.get('database')
        if x.get('Variable_name') in ('version', 'version_comment',
                                      'version_compile_machine',
                                      'version_compile_os',
                                      'default_storage_engine', 'general_log',
                                      'log_bin', 'slow_query_log')
    }
    memory_json = {
        x.get('Variable_name'): x.get('Value')
        for x in json_data.get('database')
        if x.get('Variable_name') in ('innodb_buffer_pool_size',
                                      'join_buffer_size', 'key_buffer_size',
                                      'query_cache_size', 'sort_buffer_size',
                                      'thread_cache_size')
    }
    summary_data = {
        'space': total_space,
        'database': database_json,
        'memory': memory_json
    }
    return summary_data
Пример #3
0
def get_ash_report(pk, instance_id, begin_time, end_time):
    try:
        database = Database.objects.get(pk=pk)
        json_data = []
        flag, json_data = run_sql(database, DBID_Query)
        if not flag:
            raise build_exception_from_java(json_data)
        db_id = json_data[0].get('DBID')
        key, inst_str = get_key_inst_str(database, instance_id)
        query_ash = ASH_Query.get(key)
        options = {
            'db_id': db_id,
            'inst_str': inst_str,
            'begin_time': timestamp_to_char(begin_time),
            'end_time': timestamp_to_char(end_time)
        }
        query_ash['report'] = query_ash.get('report').format(**options)
        flag, report_data = run_batch_sql(database, query_ash)
        if not flag:
            raise build_exception_from_java(report_data)
        report_html = ('').join([
            x.get('OUTPUT') for x in report_data.get('report')
            if x.get('OUTPUT')
        ]) if report_data.get('report') else ''
        return {'report_html': report_html}
    except ObjectDoesNotExist:
        return {'error_message': ''}
    except Exception as err:
        return {'error_message': str(err)}
Пример #4
0
def get_sqlmon_report(pk, sql_id, inst_id, sql_exec_id, report_type, time_span):
    try:
        database = Database.objects.get(pk=pk)
        sqlmon = None
        sqlmon_data = ((((SQLMON.objects.filter(database=database)).filter(sql_id=sql_id)).filter(inst_id=inst_id)).filter(sql_exec_id=sql_exec_id)).first()
        if sqlmon_data:
            sqlmon = sqlmon_data.sqlmon
        else:
            query_json = {'alter_session':"ALTER SESSION SET EVENTS '31156 trace name context forever, level 0x400'", 
             'sqlmon':f'''
            select
                dbms_sqltune.report_sql_monitor(
                type=>'{report_type}',
                inst_id=>{inst_id},
                sql_id=>'{sql_id}',
                sql_exec_id=>{sql_exec_id},
                report_level=>'ALL')
                MONITOR_REPORT
            from dual'''}
            flag, sqlmon_data = run_batch_sql(database, query_json)
            sqlmon = sqlmon_data.get('sqlmon')[0].get('MONITOR_REPORT') if sqlmon_data.get('sqlmon') else ''
        sqlmon = sqlmon.replace('http://download.oracle.com', '/static') if sqlmon else ''
        return {'report_html': sqlmon}
    except ObjectDoesNotExist:
        return {'error_message': ''}
    except Exception as err:
        return {'error_message': str(err)}
Пример #5
0
def oracle_analysis(audit_job):
    try:
        schema, max_rows, order_by_pred = audit_job.schema, audit_job.max_rows, audit_job.order_by
        rule_list = []
        database = audit_job.database
        strategy_dict = audit_job.strategy
        audit_rule_queryset = ((Audit_Rule.objects.filter(
            database=database)).filter(enabled=True)).filter(
                is_static_rule=False)
        for k, v in strategy_dict.items():
            rule_list += list((audit_rule_queryset.filter(
                audit_type=k)).filter(target__in=v))

        logger.error('sql_audit_analysis begin build query')
        query_total_json = build_total_query(
            TotalTemplateJSON.get(database.db_type), database, schema)
        query_detail_json = {}
        query_problem_json = {}
        for rule in set(rule_list):
            query_detail_json[rule.name] = build_rule_query(
                DetailTemplateJson, database, rule, schema, max_rows,
                order_by_pred)
            query_problem_json[rule.name] = build_rule_query(
                ProblemTemplateJSON, database, rule, schema, max_rows,
                order_by_pred)

        logger.error('sql_audit_analysis begin run total')
        flag, total_result = run_batch_sql(database, query_total_json)
        if not flag:
            logger.error(total_result)
            raise build_exception_from_java(total_result)
        logger.error('sql_audit_analysis begin run detail')
        flag, detail_result = run_batch_sql(database, query_detail_json)
        if not flag:
            logger.error(detail_result)
            raise build_exception_from_java(detail_result)
        logger.error('sql_audit_analysis begin run problem')
        flag, problem_result = run_batch_sql(database, query_problem_json)
        if not flag:
            logger.error(problem_result)
            raise build_exception_from_java(problem_result)
        logger.error('sql_audit_analysis begin build result')
        return build_audit_result(database, rule_list, total_result,
                                  problem_result, detail_result, audit_job)
    except Exception as e:
        logger.error(str(e))
Пример #6
0
def detail_info(pk, name, days=7, limit=200):
    try:
        database = Database.objects.get(pk=pk)
        db_type = database.db_type
        options = {'pk': pk, 'days': days, 'limit': limit, 'name': name}
        space_detail = (Space_Detail.objects.filter(database=database)).first()
        if not space_detail:
            get_space(database)
            space_detail = (Space_Detail.objects.filter(
                database=database)).first()
        name_detail = {}
        for x in space_detail.detail:
            if x.get('TABLESPACE_NAME') == name:
                name_detail = x
                break

        query = reprocess_query(Space_Detail_Lag_Query, options)
        space_trend = execute_return_json(query)
        query = reprocess_query(Space_Detail_Realtime_Query.get(db_type),
                                options)
        if not is_temp(db_type, name_detail):
            if db_type in ('db2', 'oracle'):
                query.pop('temp')
        if db_type == 'sqlserver':
            flag, json_data = run_batch_sql(database, query, name)
        else:
            flag, json_data = run_batch_sql(database, query)
        if not flag:
            raise build_exception_from_java(json_data)
        detail_data = {
            'space_detail': name_detail,
            'space_trend': {
                'name':
                '(MB)',
                'data':
                [[x.get('CREATED_AT'), x.get('DELTA')] for x in space_trend]
            },
            'table_data': json_data.get('segment') if json_data else [],
            'temp': json_data.get('temp') if json_data else []
        }
        if db_type != 'mysql':
            detail_data['datafile'] = json_data.get(
                'datafile') if json_data.get('datafile') else []
        return detail_data
    except ObjectDoesNotExist:
        return {'error_message': ''}
Пример #7
0
def get_backup(pk):
    days = 7
    query = {
        'config':
        'select * from v$rman_configuration',
        'history':
        f'''select
  j.session_recid, --j.session_stamp,
  to_char(j.start_time, 'yyyy-mm-dd hh24:mi:ss') start_time,
  to_char(j.end_time, 'yyyy-mm-dd hh24:mi:ss') end_time,
  round((j.output_bytes/1024/1024),2) output_mbytes, j.status, j.input_type,
  decode(to_char(j.start_time, 'd'), 1, 'Sunday', 2, 'Monday',
                                     3, 'Tuesday', 4, 'Wednesday',
                                     5, 'Thursday', 6, 'Friday',
                                     7, 'Saturday') WEEK,
  round(j.elapsed_seconds,-1) ELAPSED_TIME_SEC, j.TIME_TAKEN_DISPLAY,
  x.cf, x.df, x.i0, x.i1, x.l,
  ro.inst_id output_instance,x.device_type
from V$RMAN_BACKUP_JOB_DETAILS j
  left outer join (select
                     d.session_recid, d.session_stamp,
                     sum(case when d.controlfile_included = 'YES' then d.pieces else 0 end) CF,
                     sum(case when d.controlfile_included = 'NO'
                               and d.backup_type||d.incremental_level = 'D' then d.pieces else 0 end) DF,
                     sum(case when d.backup_type||d.incremental_level = 'D0' then d.pieces else 0 end) I0,
                     sum(case when d.backup_type||d.incremental_level = 'I1' then d.pieces else 0 end) I1,
                     sum(case when d.backup_type = 'L' then d.pieces else 0 end) L,d.device_type
                   from
                     V$BACKUP_SET_DETAILS d
                     join V$BACKUP_SET s on s.set_stamp = d.set_stamp and s.set_count = d.set_count
                   where s.input_file_scan_only = 'NO'
                   group by d.session_recid, d.session_stamp,d.device_type) x
    on x.session_recid = j.session_recid and x.session_stamp = j.session_stamp
  left outer join (select o.session_recid, o.session_stamp, min(inst_id) inst_id
                   from GV$RMAN_OUTPUT o
                   group by o.session_recid, o.session_stamp)
    ro on ro.session_recid = j.session_recid and ro.session_stamp = j.session_stamp
where j.start_time > trunc(sysdate)-{days}
order by j.start_time''',
        'long':
        'select sid || \',\'|| serial# || \'@\' || inst_id as SESSION_ID,to_char(start_time, \'yyyy-mm-dd hh24:mi:ss\') start_time,ELAPSED_SECONDS,sofar,totalwork,\nopname,           round(sofar/totalwork*100,-1) "PCT"\n           from gv$session_longops\n            where opname like \'RMAN:%\'\n           and opname not like \'RMAN: aggregate%\'\n           and totalwork!=0'
    }
    try:
        database = Database.objects.get(pk=pk)
        flag, json_data = run_batch_sql(database, query)
        if not flag:
            raise build_exception_from_java(json_data)
        else:
            return json_data
    except ObjectDoesNotExist:
        return {'error_message': ''}
    except Exception as err:
        return {'error_message': str(err)}
Пример #8
0
def execute_sqltuning_task(pk, sql_id, timeout):
    try:
        database = Database.objects.get(pk=pk)
        import re
        dt_postfix = datetime.now().strftime('%Y-%m-%d_%H:%M')
        taskname = sql_id + '_' + dt_postfix
        query_submit_job = f'''
        declare
        a varchar2(100);
        begin
            BEGIN
            dbms_sqltune.execute_tuning_task('%s');
            EXCEPTION when others then
                null;
            end;
            a := dbms_sqltune.create_tuning_task(
                 task_name=>'{taskname}',
                 description=>'{taskname}',
                 scope=>dbms_sqltune.scope_comprehensive,
                 time_limit=>{timeout},
                 sql_id=>'{sql_id}'
             );
             dbms_sqltune.execute_tuning_task('{taskname}');
        end;'''
        query_report = {'report':f'''select dbms_sqltune.report_tuning_task('{taskname}') report FROM dual''',  'benefit':f'''
            select hint, benefit from (
            select case when attr5 like 'OPT_ESTIMATE%' then cast(attr5 as varchar2(4000)) when attr1 like 'OPT_ESTIMATE%' then attr1 end hint,benefit
            from dba_advisor_recommendations t join dba_advisor_rationale r using (task_id,rec_id)
            where t.task_name = '{taskname}' and t.type='SQL PROFILE'
            --and r.message='This attribute adjusts optimizer estimates.'
        ) where hint is not null order by to_number(regexp_replace(hint,'^.*=([0-9.]+)[^0-9]*$','\1'))'''}
        flag, result = run_plsql(database, query_submit_job)
        if not flag:
            raise build_exception_from_java(result)
        flag, sqltune_data = run_batch_sql(database, query_report)
        accept_sql_profile = False
        sql_list = None
        report_data = sqltune_data.get('report')
        sqltune_report = report_data[0].get('REPORT') if report_data else ''
        if re.search('accept_sql_profile', sqltune_report, re.IGNORECASE):
            sql_list = re.findall('execute dbms_sqltune.accept_sql_profile[^;]+;', sqltune_report, re.IGNORECASE)
            for idx, val in enumerate(sql_list):
                sql_list[idx] = re.sub('$', '\n end;', re.sub('execute', 'begin \n', val))

            accept_sql_profile = True
        sqltune_result = {'report':report_data, 
         'action':sql_list, 
         'accept_sql_profile':accept_sql_profile}
        return sqltune_result
    except ObjectDoesNotExist:
        return {'error_message': ''}
    except Exception as err:
        return {'error_message': str(err)}
Пример #9
0
def analysis_from_post(request):
    database_id = request.data.get('database_id')
    schema = request.data.get('schema', None)
    max_rows = request.data.get('max_rows', None)
    order_by_pred = request.data.get('order_by_pred', '')
    audit_result = {}
    try:
        database = Database.objects.get(pk=database_id)
        rule_list = ((Audit_Rule.objects.filter(database=database)).filter(
            enabled=True)).filter(is_static_rule=False)
        query_total_json = build_total_query(TotalTemplateJSON, database,
                                             schema)
        query_detail_json = {}
        query_problem_json = {}
        for rule in rule_list:
            query_detail_json[rule.name] = build_rule_query(
                DetailTemplateJson, database, rule, schema, max_rows,
                order_by_pred)
            query_problem_json[rule.name] = build_rule_query(
                ProblemTemplateJSON, database, rule, schema, max_rows,
                order_by_pred)

        flag, detail_result = run_batch_sql(database, query_detail_json)
        if not flag:
            raise build_exception_from_java(detail_result)
        flag, problem_result = run_batch_sql(database, query_problem_json)
        if not flag:
            raise build_exception_from_java(problem_result)
        flag, total_result = run_batch_sql(database, query_total_json)
        if not flag:
            raise build_exception_from_java(total_result)
        audit_result = build_audit_result(database, rule_list, total_result,
                                          problem_result, detail_result, None)
        collect_sql_text(database, schema)
        return Response(audit_result, status=status.HTTP_200_OK)
    except ObjectDoesNotExist:
        return Response({'error_message': ''},
                        status=status.HTTP_500_INTERNAL_SERVER_ERROR)
Пример #10
0
def get_sqlserver_summary(pk):
    conn = Database.objects.get(pk=pk)
    db_type = conn.db_type
    query = reprocess_query(SQLServer_Space_Total_Query, {'pk': pk})
    total_space = execute_return_json(query)
    if not total_space:
        get_space(conn)
        total_space = execute_return_json(query)
    query = SummaryQuery.get(db_type)
    flag, json_data = run_batch_sql(conn, query)
    if not flag:
        raise build_exception_from_java(json_data)
    space = {'space': total_space}
    summary_data = {**space, **json_data}
    return summary_data
Пример #11
0
def get_lock_session(pk, time_span=None):
    try:
        database = Database.objects.get(pk=pk)
        db_type = database.db_type
        json_data = {}
        command_list = []
        if time_span == 'realtime':
            query = get_lock_query(database)
            flag, json_data = run_batch_sql(database, query)
            if not flag:
                raise build_exception_from_java(json_data)
            cmd_data = get_unlock_data(database)
            command_list = [x.get('CMD') for x in cmd_data]
        else:
            query_lock = Lock_History_Local_Query[
                database.db_type].value.format(pk, time_span)
            query_trans = Local_Transaction_Query.format(pk, time_span)
            json_data['lock'] = execute_return_json(query_lock)
            trans = execute_return_json(query_trans)
            if trans:
                json_data['transaction'] = trans[0].get('TRANSACTIONS')
        return {
            'lock': {
                'blocker_header':
                Blocker_Header[database.db_type].value,
                'waiter_header':
                Waiter_Header[database.db_type].value,
                'blocker_id': ['B_BLOCKER', 'W_WAITER'],
                'waiter_id':
                'W_WAITER',
                'session_detail_keys': ['B_BLOCKER', 'W_WAITER'],
                'sql_detail_keys':
                ['B_SQL_ID', 'B_PREV_SQL_ID', 'W_SQL_ID', 'W_PREV_SQL_ID'],
                'data':
                json_data.get('lock'),
                'advice':
                command_list
            },
            'transaction': {
                'data': json_data.get('transaction'),
                'sql_detail_keys': ['SQL_ID'],
                'session_detail_keys': ['SESSION_ID']
            }
        }
    except ObjectDoesNotExist:
        return {'error_message': ''}
    except Exception as err:
        return {'error_message': str(err)}
Пример #12
0
def get_db2_sql_audit(database, sql_id, sql_text=None, plans=[]):
    schema_list = [("('{}','{}')").format(x.get('OBJECT_OWNER'), x.get('OBJECT_NAME')) for x in plans if x.get('OBJECT_NAME')]
    schema_list_str = (',').join(schema_list) if schema_list else "('','')"
    audit_rule_queryset = Audit_Rule.objects.filter((Q(database=database)) & (Q(enabled=True)) & (Q(audit_type='')) & (Q(single=True)))
    query_detail_json = {}
    for rule in audit_rule_queryset:
        query = build_db2_rule_single_query(SingleTemplateJson.get(database.db_type), database, rule, schema_list_str)
        if query:
            query_detail_json[rule.name] = query

    flag, audit_data = run_batch_sql(database, query_detail_json)
    audit_result = build_single_audit_result(audit_rule_queryset, audit_data)
    sql_audit_plan_result = get_db2_single_sql_plan_audit(database, sql_text, plans)
    sql_audit_text_result = get_db2_single_sql_text_audit(database, sql_text, plans)
    audit_result = {**audit_result, **sql_audit_plan_result, **sql_audit_text_result}
    print(audit_result)
    return audit_result
Пример #13
0
def space_info(pk, days=7):
    try:
        database = Database.objects.get(pk=pk)
        db_type = database.db_type
        options = {'pk': pk, 'days': days}
        space_detail = (Space_Detail.objects.filter(database=database)).first()
        if not space_detail:
            get_space(database)
            space_detail = (Space_Detail.objects.filter(
                database=database)).first()
        space_detail = space_detail.detail if space_detail else {}
        space_total_query = Space_Total_Query if db_type != 'sqlserver' else SQLServer_Space_Total_Query
        query = reprocess_query(space_total_query, options)
        total_space = execute_return_json(query)
        query = reprocess_query(Space_Total_Lag_Query, options)
        total_trend = execute_return_json(query)
        if db_type == 'oracle':
            query = reprocess_query(Space_Realtime_Query.get(db_type), options)
            flag, json_data = run_batch_sql(database, query)
            if not flag:
                raise build_exception_from_java(json_data)
            else:
                json_data = {}
        local_data = {
            'space_detail': space_detail,
            'total_space': total_space[0] if total_space else {},
            'total_trend': {
                'name':
                '(MB)',
                'data':
                [[x.get('CREATED_AT'), x.get('DELTA')] for x in total_trend]
            }
        }
        json_data['switch_trend'] = {
            'name':
            '',
            'data': [[x.get('TIME'), x.get('COUNT')]
                     for x in json_data.get('switch_trend', [])]
        }
        space_summary = {**local_data, **json_data}
        return space_summary
    except ObjectDoesNotExist:
        return {'error_message': ''}
    except Exception as err:
        return {'error_message': str(err)}
Пример #14
0
def gen_sql_mononitor_and_binds(database, sqlmon_list):
    sqlmon_filter_list = (',').join([
        ("({},'{}',{})").format(x.get('INST_ID'), x.get('SQL_ID'),
                                x.get('SQL_EXEC_ID')) for x in sqlmon_list
    ])
    query = {
        'binds':
        f'''
        SELECT xt.*, (select child_number from gv$sql sql where sql.inst_id = x.inst_id and sql.address = x.SQL_CHILD_ADDRESS) CHILD_NUMBER, to_char(x.SQL_EXEC_START, 'yyyy-mm-dd hh24:mi:ss')LAST_CAPTURED, null REAL_DATA
FROM   (select xmltype(binds_xml) xml_data, SQL_EXEC_START, inst_id, SQL_CHILD_ADDRESS, sql_id from gv$sql_monitor
where binds_xml is not null and (inst_id, sql_id, sql_exec_id) in ({sqlmon_filter_list})) x,
       XMLTABLE('/binds/bind'
         PASSING x.xml_data
         COLUMNS
           name     VARCHAR2(100)  PATH '@name',
           pos     number PATH '@pos',
           DATATYPE_STRING       VARCHAR2(100)  PATH '@dtystr',
           VALUE_STRING  VARCHAR2(100) PATH '/'
         ) xt
        '''
    }
    flag, sqlmon_data = run_batch_sql(database, query)
    if not flag:
        return sqlmon_data
    else:
        if sqlmon_data.get('sqlmon'):
            for x in sqlmon_data.get('sqlmon'):
                m = SQLMON()
                m.inst_id = x.get('INST_ID')
                m.sql_id = x.get('SQL_ID')
                m.status = x.get('STATUS')
                m.username = x.get('USERNAME')
                m.elapsed_time = x.get('ELAPSED_TIME')
                m.db_time = x.get('DB_TIME')
                m.db_cpu = x.get('DB_CPU')
                m.sql_exec_id = x.get('SQL_EXEC_ID')
                m.sql_exec_start = x.get('SQL_EXEC_START')
                m.sql_plan_hash_value = x.get('SQL_PLAN_HASH_VALUE')
                m.sql_text = x.get('SQL_TEXT')
                m.sqlmon = x.get('SQLMON')
                m.database = database
                m.created_at = datetime.now().replace(microsecond=0)
                m.save()

        return sqlmon_data.get('binds')
Пример #15
0
def get_oracle_session_detail(database, session_id):
    detail_format = get_default_detail_format()
    import re
    prog = re.compile('([0-9]+),([0-9]+)@([0-9]+)')
    m = prog.search(session_id)
    sid, serial, inst_id = (1, 2, 3)
    if m:
        sid, serial, inst_id = m.group(1), m.group(2), m.group(3)
    detail_format['instance_id'] = inst_id
    options = {'inst_id':inst_id, 
     'sid':sid, 
     'serial':serial}
    detai_query = Detai_Query.format(**options)
    cursur_query = Old_Cursor_Query.format(**options)
    query = {'detail':detai_query, 
     'cursor':cursur_query}
    flag, json_data = run_batch_sql(database, query)
    if not flag:
        raise build_exception_from_java(json_data)
    detail_data = json_data.get('detail')
    cursor_data = json_data.get('cursor')
    if detail_data == None:
        detai_query = Detai_Query_Without_IP.format(**options)
        flag, json_data = run_sql(database, detai_query)
        if not flag:
            raise build_exception_from_java(json_data)
        detail_data = json_data
    if detail_data:
        detail_data = detail_data[0]
        detail_info = {u'\u8fde\u63a5\u4fe1\u606f':{x:detail_data[x] for x in ('SID', 'SERIAL#', 'STATUS', 'USERNAME', 'SPID', 'LOGON_TIME',
                            'SERVER') if x in detail_data}, 
         u'\u5ba2\u6237\u7aef\u4fe1\u606f':{x:detail_data[x] for x in ('OSUSER', 'PROCESS', 'MACHINE', 'IP') if x in detail_data}, 
         u'\u5e94\u7528\u4fe1\u606f':{x:detail_data[x] for x in ('SQL_ID', 'PREV_SQL_ID', 'LAST_CALL_ET', 'PROGRAM', 'MODULE',
                            'ACTION', 'SERVICE_NAME') if x in detail_data}, 
         u'\u7b49\u5f85\u4fe1\u606f':{x:detail_data[x] for x in ('EVENT', 'WAIT_CLASS', 'P1', 'P2', 'P3') if x in detail_data}, 
         u'\u963b\u585e\u4f1a\u8bdd':{x:detail_data[x] for x in ('BLOCKING_INSTANCE', 'BLOCKING_SESSION') if x in detail_data}, 
         u'\u4e8b\u52a1\u4fe1\u606f':{x:detail_data[x] for x in ('XIDUSN', 'XIDSLOT', 'XIDSQN', 'TRX_STARTED', 'USED_UBLK',
                            'USED_UREC') if x in detail_data}}
        detail_format['detail'] = detail_info
        detail_format['cursor'] = cursor_data
    return detail_format
Пример #16
0
def get_oracle_summary(pk):
    key = 'oracle:%s:index' % pk
    read_data = redis.get(key)
    if read_data:
        return json.loads(read_data)
    else:
        conn = Database.objects.get(pk=pk)
        db_type = conn.db_type
        query = reprocess_query(Space_Total_Query, {'pk': pk})
        total_space = execute_return_json(query)
        if not total_space:
            get_space(conn)
            total_space = execute_return_json(query)
        query = SummaryQuery.get(db_type)
        flag, json_data = run_batch_sql(conn, query)
        if not flag:
            raise build_exception_from_java(json_data)
        space = {'space': total_space}
        summary_data = {**space, **json_data}
        redis.setex(key, 86400, json.dumps(summary_data))
        return summary_data
Пример #17
0
def get_oracle_sql_audit(database, sql_id, only_tune=False):
    audit_rule_queryset = []
    if only_tune:
        audit_rule_queryset = Audit_Rule.objects.filter((Q(database=database)) & (Q(enabled=True)) & (Q(single=True)) & (Q(is_static_rule=False)) & (Q(name__in=['', '', ''])))
    else:
        audit_rule_queryset = Audit_Rule.objects.filter((Q(database=database)) & (Q(enabled=True)) & (Q(single=True)) & (Q(is_static_rule=False)))
    tables_query = build_tables_query(database, sql_id)
    flag, tables = run_sql(database, tables_query)
    if not flag:
        return tables
    else:
        schema_list = [("('{}','{}')").format(x.get('OBJECT_OWNER'), x.get('OBJECT_NAME')) for x in tables]
        schema_list_str = (',').join(schema_list) if schema_list else "('','')"
        query_detail_json = {}
        for rule in audit_rule_queryset:
            query = build_rule_single_query(SingleTemplateJson.get(database.db_type), database, rule, sql_id, schema_list_str)
            if query:
                query_detail_json[rule.name] = query

        flag, audit_data = run_batch_sql(database, query_detail_json)
        audit_result = build_single_audit_result(audit_rule_queryset, audit_data)
        return audit_result
Пример #18
0
def oracle_sql_detail(pk, sql_id, sql_text=None, instance_id=None, time_span=None, begin_time=None, end_time=None, cache=True, activity=True, sql_audit=True, only_tune=False):
    database = Database.objects.get(pk=pk)
    if instance_id == 'null':
        instance_id = database.db_name
    inst_id = database.instance_id_list.split(',')[0] if not instance_id or instance_id == database.db_name or instance_id == '0' else instance_id
    if sql_audit:
        inst_id = database.instance_id_list
    key_audit = f'''{pk}:sql_detail:{sql_id}:audit'''
    audit_data = None
    audit_data_json = {}
    if cache:
        audit_data = redis.get(key_audit)
        if audit_data != None:
            audit_data_json = json.loads(audit_data)
    sql_detail = get_default_sql_detail_format(database.db_type)
    if sql_id != 'null':
        if time_span == 'realtime':
            sqldetail_sql = get_realtime_sql(sql_id, inst_id)
            if only_tune:
                sqldetail_sql.pop('binds')
            flag, sqldetail_data = run_batch_sql(database, sqldetail_sql)
            if not flag:
                return sqldetail_data
            stat_data = sqldetail_data.get('stats')
            plan_data = sqldetail_data.get('plans')
            sqlmon_data = sqldetail_data.get('sqlmon')
            bind_data = only_tunesqldetail_data.get('binds')[]
            for x in stat_data:
                key = ('{}-{}-{}').format(x.get('INST_ID'), x.get('CHILD_NUMBER'), x.get('PLAN_HASH_VALUE'))
                child_summary = {k:v for k, v in x.items() if k in ('CHILD_NUMBER',
                                                                    'PLAN_HASH_VALUE',
                                                                    'PARSING_SCHEMA_NAME',
                                                                    'LAST_LOAD_TIME',
                                                                    'MODULE', 'ACTION',
                                                                    'SERVICE')}
                pie_chart_data = {k:v for k, v in x.items() if k in ('ON CPU', 'Application',
                                                                     'Cluster', 'Concurrency',
                                                                     'User I/O')}
                execution_stats = {k:v for k, v in x.items() if k in ('EXECUTIONS',
                                                                      'ELAPSED_TIME',
                                                                      'CPU_TIME',
                                                                      'BUFFER_GETS',
                                                                      'DISK_READS',
                                                                      'DIRECT_WRITES',
                                                                      'ROWS_PROCESSED',
                                                                      'FETCHES')}
                metric_dict = {'EXECUTIONS':'', 
                 'ELAPSED_TIME':'()', 
                 'CPU_TIME':'CPU()', 
                 'BUFFER_GETS':'', 
                 'DISK_READS':'', 
                 'DIRECT_WRITES':'', 
                 'ROWS_PROCESSED':'', 
                 'FETCHES':''}
                total_executions = execution_stats.get('EXECUTIONS') if execution_stats.get('EXECUTIONS') != 0 else 1
                total_rows = execution_stats.get('ROWS_PROCESSED') if execution_stats.get('ROWS_PROCESSED') != 0 else 1
                execution_data = [{u'\u6307\u6807':metric_dict.get(k),  u'\u603b\u6570':v,  u'\u5e73\u5747\u6bcf\u6b21\u6267\u884c':round(v / total_executions),  u'\u5e73\u5747\u6bcf\u884c\u8bb0\u5f55':round(v / total_rows)} for k, v in execution_stats.items()]
                sql_detail['stats'][key] = {'child_summary':child_summary, 
                 'pie_chart_data':pie_chart_data, 
                 'execution_stats':{'header':[
                   '', '', '', ''], 
                  'data':execution_data}}

            plan_dic = defaultdict(list)
            for x in plan_data:
                key = ('{}-{}-{}').format(x.get('INST_ID'), x.get('CHILD_NUMBER'), x.get('PLAN_HASH_VALUE'))
                x.pop('INST_ID')
                plan_dic[key].append(x)

            sql_detail['plans']['data'] = plan_dic
            if sql_audit:
                if sqlmon_data:
                    sqlmon_data = sqlmon_data[:MAX_SQLMON_FOR_SQL_AUDIT]
                    binds_from_sqlmon = gen_sql_mononitor_and_binds(database, sqlmon_data)
                    bind_data = bind_data + binds_from_sqlmon
            sql_detail['sqlmon']['data'] = sqlmon_data
            sql_detail['binds']['data'] = bind_data
        else:
            sqldetail_sql = get_hist_sql(sql_id, inst_id, begin_time, end_time)
            query_sqlmon = f'''
                select
                ID,
                STATUS,
                SQL_ID,
                ELAPSED_TIME,
                DB_TIME,
                DB_CPU,
                SQL_EXEC_ID,
                SQL_EXEC_START,
                SQL_PLAN_HASH_VALUE,
                INST_ID,
                USERNAME
                from monitor_sqlmon
                where created_at BETWEEN to_timestamp({begin_time}) and to_timestamp({end_time})
                and sql_id = '{sql_id}' and database_id = '{pk}'
            '''
            flag, sqldetail_data = run_batch_sql(database, sqldetail_sql)
            if not flag:
                return sqldetail_data
            stat_data = sqldetail_data.get('stats')
            plan_data = sqldetail_data.get('plans')
            bind_data = sqldetail_data.get('binds')
            sqlmon_data = execute_return_json(query_sqlmon)
            exec_delta = defaultdict(list)
            avg_elapse_time = defaultdict(list)
            avg_cpu_time = defaultdict(list)
            avg_crs = defaultdict(list)
            avg_reads = defaultdict(list)
            plan_dic = defaultdict(list)
            stats_dict = defaultdict(dict)
            for x in stat_data:
                phv = str(x.get('PLAN_HASH_VALUE'))
                snap_time = x.get('SNAP_TIME')
                exec_delta[phv].append([snap_time, x.get('EXEC_DELTA')])
                avg_elapse_time[phv].append([snap_time, x.get('AVG_ELAPSE_TIME')])
                avg_cpu_time[phv].append([snap_time, x.get('AVG_CPU_TIME')])
                avg_crs[phv].append([snap_time, x.get('AVG_CRS')])
                avg_reads[phv].append([snap_time, x.get('AVG_READS')])

            stats_dict[''] = exec_delta
            stats_dict['(s)'] = avg_elapse_time
            stats_dict['CPU(s)'] = avg_elapse_time
            stats_dict[''] = avg_crs
            stats_dict[''] = avg_reads
            for x in plan_data:
                phv = str(x.get('PLAN_HASH_VALUE'))
                plan_dic[phv].append(x)

            sql_detail['stats'] = stats_dict
            sql_detail['plans']['data'] = plan_dic
            sql_detail['sqlmon']['data'] = sqlmon_data
            sql_detail['binds']['data'] = bind_data
        if cache == True:
            if audit_data != None:
                if not only_tune:
                    sql_detail['audit'] = audit_data_json
                audit_data_json = get_sql_audit(pk, sql_id, only_tune=only_tune)
                sql_detail['audit'] = audit_data_json
                redis.setex(key_audit, SQLTEXT_RETENTION, json.dumps(audit_data_json))
            if audit_data:
                new_plan_dict = {}
                if plan_dic:
                    new_plan_dict = {k[k.rfind('-') + 1:]:v for k, v in plan_dic.items()}
                tune_data = get_sql_tune(database, audit_data_json, new_plan_dict)
                if tune_data:
                    sql_detail['tune'] = tune_data
        return sql_detail
Пример #19
0
def lock_history(database):
    query = get_lock_query(database)
    flag, json_data = run_batch_sql(database, query)
    if not flag:
        print(str(build_exception_from_java(json_data)))
        return
    created_at = datetime.now.replace(microsecond=0)
    session_list = []
    if json_data.get('lock'):
        session_list = get_blocking_session_detail(database)
    lock_list = json_data.get('lock') if json_data.get('lock') else []
    save_lock_history(database, lock_list, created_at)
    transactions = json_data.get('transaction')
    trans = Transaction
    trans.database = database
    trans.created_at = created_at
    trans.transactions = transactions
    trans.save
    db_type = database.db_type
    locks = len(json_data.get('lock')) if json_data.get('lock') else 0
    warn = WARN_ENUM.get(db_type).Blocking_Session_Warn
    p = Performance(inst_id=database.db_name, name=warn.name, value=locks, created_at=created_at)
    customized_warn_scanner(warn, p, database, False)
    if transactions:
        warn = WARN_ENUM.get(db_type).Long_Transaction_Warn
        p = Performance(inst_id=database.db_name, name=warn.name, created_at=created_at)
        for t in transactions:
            options = {'SESSION_ID':t.get('SESSION_ID'),  'MACHINE':t.get('MACHINE'), 
             'TRX_STARTED':t.get('TRX_STARTED')}
            if t.get('INST_ID'):
                p.inst_id = t.get('INST_ID')
            p.value = t.get('TRX_SECONDS')
            if customized_warn_scanner(warn, p, database, False, options, True):
                session_list.append(t.get('SESSION_ID'))

        warn = WARN_ENUM.get(db_type).Transaction_Warn
        p = Performance(inst_id=database.db_name, name=warn.name, value=len(transactions), created_at=created_at)
        customized_warn_scanner(warn, p, database, False, {}, True)
        warn = WARN_ENUM.get(db_type).Big_Transaction_Warn
        p = Performance(inst_id=database.db_name, name=warn.name, created_at=created_at)
        big_transaction_key_dict = {'oracle':'USED_UBLK', 
         'mysql':'TRX_ROWS_MODIFIED', 
         'db2':'UOW_LOG_SPACE_USED', 
         'sqlserver':'LOG_BYTES'}
        for x in transactions:
            options = {'session_id':x.get('SESSION_ID'), 
             'start_time':x.get('TRX_STARTED')}
            if t.get('INST_ID'):
                p.inst_id = t.get('INST_ID')
            p.value = x.get(big_transaction_key_dict.get(db_type))
            if customized_warn_scanner(warn, p, database, False, options, True):
                session_list.append(t.get('SESSION_ID'))

    save_session_detail_list(database, list(set(session_list)))
    if db_type == 'mysql':
        lock_tables = json_data.get('tables')
        if lock_tables:
            if len(lock_tables):
                warn = WARN_ENUM.get(database.db_type).Locked_Table_Warn
                p = Performance(inst_id=database.db_name, name=warn.name, value=len(lock_tables), created_at=created_at)
                table_list = [('{}.{}').format(x.get('Database'), x.get('Table')) for x in lock_tables]
                options = {'table_list': (' ').join(table_list)}
                customized_warn_scanner(warn, p, database, False, options)
Пример #20
0
def apply_sql_profile(pk, sql_id, plan_hash_value):
    import re
    try:
        database = Database.objects.get(pk=pk)
        instance_id_list = database.instance_id_list
        query_json = {'SQLTEXT':f'''select * from (select REPLACE(sql_fulltext, CHR(00), ' ') SQLTEXT from gv$sql where inst_id in ({instance_id_list}) and sql_id='{sql_id}' and rownum = 1 union all
        select REPLACE(SQL_TEXT, CHR(00), ' ') from DBA_HIST_SQLTEXT where sql_id='{sql_id}' and rownum = 1) where rownum=1''',  'HINT':f'''
        select /*+ opt_param('parallel_execution_enabled', 'false') */ 'q''[' || SUBSTR(EXTRACTVALUE(VALUE(h), '/hint'), 1, 4000) || ']'',' HINT
        from
        (
            select other_xml from
            (
                SELECT other_xml
                FROM gv$sql_plan
                WHERE sql_id = '{sql_id}'
                and inst_id in ({instance_id_list})
                AND plan_hash_value = {plan_hash_value}
                AND other_xml IS NOT NULL
                and rownum = 1
                union all
                SELECT other_xml
                FROM dba_hist_sql_plan
                WHERE sql_id = '{sql_id}'
                AND plan_hash_value = {plan_hash_value}
                AND other_xml IS NOT NULL
                and rownum = 1
            ) where rownum = 1
        ) p, table(xmlsequence(extract(xmltype(p.other_xml),'/*/outline_data/hint'))) h'''}
        flag, result = run_batch_sql(database, query_json)
        if not flag:
            raise build_exception_from_java(result)
        sqltext = ''
        if result.get('SQLTEXT'):
            sqltext = result.get('SQLTEXT')[0].get('SQLTEXT') if result.get('SQLTEXT') else ''
        sqltext = ('\n').join((line.strip() for line in re.findall('.{1,160}(?:\\s+|$)', sqltext)))
        profile_name = 'coe_' + sql_id + '_' + plan_hash_value
        outlines = []
        outlines.append('DECLARE')
        outlines.append('sql_txt CLOB;')
        outlines.append('h       SYS.SQLPROF_ATTR;')
        outlines.append('BEGIN')
        outlines.append("sql_txt := q'[")
        outlines.append(sqltext)
        outlines.append("]';")
        outlines.append('h := SYS.SQLPROF_ATTR(')
        outlines.append("q'[BEGIN_OUTLINE_DATA]',")
        for hint in result.get('HINT'):
            tmp = ''
            if len(hint.get('HINT')) > 500:
                tmp = ("]',\nq'[").join((line.strip() for line in re.findall('.{1,160}(?:\\s+|$)', hint.get('HINT'))))
            else:
                tmp = hint.get('HINT')
            outlines.append(tmp)

        outlines.append("q'[END_OUTLINE_DATA]');")
        outlines.append('DBMS_SQLTUNE.IMPORT_SQL_PROFILE (')
        outlines.append('sql_text    => sql_txt,')
        outlines.append('profile     => h,')
        outlines.append("name        => '" + profile_name + "',")
        outlines.append("description => '" + profile_name + "',")
        outlines.append("category    => 'DEFAULT',")
        outlines.append('validate    => FALSE,')
        outlines.append('replace     => TRUE,')
        outlines.append('force_match => FALSE /* TRUE:FORCE (match even when different literals in SQL). FALSE:EXACT (similar to CURSOR_SHARING) */ );')
        outlines.append('END;')
        import_sql_profile = ('\n').join(outlines)
        flag, result = run_plsql(database, import_sql_profile)
        if not flag:
            raise build_exception_from_java(result)
        return {'OK': True}
    except ObjectDoesNotExist:
        return {'error_message': ''}
    except Exception as err:
        return {'error_message': str(err)}
Пример #21
0
def db2_analysis(audit_job):
    try:
        schema, max_rows, order_by_pred, time_span = (audit_job.schema,
                                                      audit_job.max_rows,
                                                      audit_job.order_by,
                                                      audit_job.time_span)
        if time_span:
            begin_time, end_time = datetime.now(), datetime.now() - (timedelta(
                hours=1))
        else:
            begin_time, end_time = audit_job.snapshot_begin_time, audit_job.snapshot_end_time
        rule_list = []
        database = audit_job.database
        strategy_dict = audit_job.strategy
        audit_rule_queryset = (Audit_Rule.objects.filter(
            database=database)).filter(enabled=True)
        for k, v in strategy_dict.items():
            rule_list += list((audit_rule_queryset.filter(
                audit_type=k)).filter(target__in=v))

        logger.error('sql_audit_analysis begin build query')
        query_total_json = build_total_query(
            TotalTemplateJSON.get(database.db_type), database, schema)
        query_detail_json = {}
        query_problem_json = {}
        object_rules_list = [
            rule for rule in rule_list if rule.audit_type == ''
        ]
        for rule in set(object_rules_list):
            query_detail_json[rule.name] = build_rule_query(
                DetailTemplateJson, database, rule, schema, max_rows,
                order_by_pred)
            query_problem_json[rule.name] = build_rule_query(
                ProblemTemplateJSON, database, rule, schema, max_rows,
                order_by_pred)

        logger.error('sql_audit_analysis begin run total')
        flag, total_result = run_batch_sql(database, query_total_json)
        if not flag:
            logger.error(total_result)
            raise build_exception_from_java(total_result)
        logger.error('sql_audit_analysis begin run detail')
        flag, detail_result = run_batch_sql(database, query_detail_json)
        if not flag:
            logger.error(detail_result)
            raise build_exception_from_java(detail_result)
        logger.error('sql_audit_analysis begin run problem')
        flag, problem_result = run_batch_sql(database, query_problem_json)
        if not flag:
            logger.error(problem_result)
            raise build_exception_from_java(problem_result)
        logger.error('sql_audit_analysis begin build result')
        sql_list = ((((DB2_ASH.objects.filter(database=database)).filter(
            created_at__range=(begin_time, end_time))).filter(
                db_name__in=schema.split(','))).values(
                    'sql_id', 'sql_text',
                    'db_name').annotate(total=Count('sql_id'))
                    ).order_by('-total')[:MAX_SQL_LIMIT]
        sql_count = sql_list.count()
        sql_rules_list = [
            rule for rule in rule_list if rule.audit_type == 'SQL'
        ]
        for rule in sql_rules_list:
            total_result[rule.target] = [{'COUNT': sql_count}]
            problem_result[rule.name] = [{'COUNT': 0}]

        for x in sql_list:
            sql_id = x.get('sql_id')
            sql_text = x.get('sql_text')
            sql_schema = x.get('db_name')
            if sql_id:
                if sql_text:
                    flag, json_data = get_sql_plan(database, sql_text,
                                                   sql_schema)
                    if not flag:
                        print(sql_text)
                        print(str(build_exception_from_java(json_data)))
            if json_data:
                try:
                    plan_audit_data = get_db2_single_sql_plan_audit(
                        database, sql_text, json_data)
                    text_audit_data = get_db2_single_sql_text_audit(
                        database, sql_text, json_data)
                    audit_data = {**plan_audit_data, **text_audit_data}
                    for k, v in plan_audit_data.items():
                        if not detail_result.get(k):
                            detail_result[k] = []
                        detail_result[k].append({
                            'SQL_ID': sql_id,
                            'SQL_TEXT': sql_text,
                            'SCHEMA': sql_schema
                        })
                        current_problem_count = problem_result[k][0].get(
                            'COUNT')
                        problem_result[k] = [{
                            'COUNT': current_problem_count + 1
                        }]

                except Exception as e:
                    print(e)

        return build_audit_result(database, rule_list, total_result,
                                  problem_result, detail_result, audit_job)
    except Exception as e:
        logger.error(str(e))
Пример #22
0
def object_detail(pk,
                  owner,
                  object_name,
                  object_type=None,
                  subobject_name=None,
                  cache=False):
    try:
        schema_name = owner
        key = f'''{pk}:schema:{owner}:{object_name}:{subobject_name}'''
        if cache:
            cache_data = redis.get(key)
            if cache_data:
                return json.loads(cache_data)
            database = Database.objects.get(pk=pk)
            db_type = database.db_type
            type_map = Type_TO_CN.get(database.db_type)
            schema_name = owner
            db_name = None
            if db_type == 'sqlserver':
                db_name, owner = owner.split('.')
            options = {
                'OWNER': owner,
                'OBJECT_NAME': object_name,
                'SUBOBJECT_NAME': subobject_name
            }
            if not object_type:
                object_type = get_object_type(database, owner, object_name,
                                              options, db_name)
                if not object_type:
                    raise Exception('.')
                else:
                    object_type = type_map.get(object_type)
                options['OBJECT_TYPE'] = CN_TO_Type.get(db_type).get(
                    object_type)
                detail_query = {}
                if Object_Detail_Query.get(db_type):
                    if Object_Detail_Query.get(db_type).get(object_type):
                        detail_query = Object_Detail_Query.get(db_type).get(
                            object_type)
                ddl_query = DDL_Query.get(db_type) if DDL_Query.get(
                    db_type) else {}
                if not subobject_name:
                    query = {**detail_query, **ddl_query}
                else:
                    query = detail_query
                if db_type == 'sqlserver':
                    if detail_query:
                        query.pop('DDL')
                query = {k: (v.format(**options)) for k, v in query.items()}
                flag, schema_data = run_batch_sql(database, query, db_name)
                if not flag:
                    raise build_exception_from_java(schema_data)
                if schema_data.get('DDL'):
                    if db_type != 'mysql':
                        schema_data['DDL'] = schema_data.get('DDL')[0].get(
                            'DDL') if schema_data.get('DDL') else ''
                if schema_data.get('DDL'):
                    if db_type == 'mysql':
                        ddl_data = schema_data.get('DDL')[0]
                        schema_data['DDL'] = None
                        for k, v in ddl_data.items():
                            if 'create ' in k.lower():
                                schema_data['DDL'] = v

                        if not schema_data['DDL']:
                            for k, v in ddl_data.items():
                                if 'SQL Original Statement' in k:
                                    schema_data['DDL'] = v

                delta_list = []
                total_list = []
                if object_type == '':
                    query_delta = f'''
            select extract(epoch from created_at)*1000 created_at, rows - lag(rows) over (order by created_at) as rows
            from monitor_table_rows where database_id = '{pk}'
            and owner = '{schema_name}' and table_name = '{object_name}'
            order by created_at
            '''
                    query_total = f'''
            select extract(epoch from created_at)*1000 created_at, rows
            from monitor_table_rows where database_id = '{pk}'
            and owner = '{schema_name}' and table_name = '{object_name}'
            order by created_at
            '''
                    delta_list = execute_return_json(query_delta)
                    total_list = execute_return_json(query_total)
                new_schema = OrderedDict()
                for x in Ordered_List:
                    if x in schema_data:
                        new_schema[x] = schema_data.get(x)

                if delta_list:
                    new_schema[''] = {
                        'delta': [[x.get('CREATED_AT'),
                                   x.get('ROWS')] for x in delta_list
                                  if x.get('ROWS') != None],
                        'total': [[x.get('CREATED_AT'),
                                   x.get('ROWS')] for x in total_list
                                  if x.get('ROWS') != None]
                    }
                redis.set(key, json.dumps(new_schema))
        return new_schema
    except ObjectDoesNotExist:
        return {'error_message': ''}
    except Exception as err:
        return {'error_message': str(err)}
Пример #23
0
def sqlserver_gen_sql_detail(database, sql_id, sql_text=None, schema=None, get_detail=True):
    sql_detail = get_default_sql_detail_format(database.db_type)
    time_str = get_10s_time_str()
    plan_dic = {}
    query = {'detail':f'''
                select
            CONVERT(VARCHAR(24), creation_time, 120) CREATION_TIME,
            CONVERT(VARCHAR(24), last_execution_time, 120) LAST_EXECUTION_TIME,
            EXECUTION_COUNT,
            convert(bigint, total_elapsed_time/1000) TOTAL_ELAPSED_TIME,
            convert(bigint, total_worker_time/1000) TOTAL_WORKER_TIME,
            TOTAL_LOGICAL_READS,
            TOTAL_PHYSICAL_READS,
            TOTAL_LOGICAL_WRITES
            --total_rows
        from
            sys.dm_exec_query_stats
        where sql_handle = cast('' as xml).value('xs:hexBinary("{sql_id}")', 'varbinary(max)')
        order by total_elapsed_time desc
        ''',  'plan':f'''
            select
                QUERY_PLAN
            from
                sys.dm_exec_query_stats
                CROSS APPLY sys.dm_exec_query_plan(plan_handle)
            where sql_handle = cast('' as xml).value('xs:hexBinary("{sql_id}")', 'varbinary(max)') '''}
    if sql_text and schema:
        flag, json_data = run_batch_sql(database, query)
        if not flag:
            print(str(build_exception_from_java(json_data)))
            return sql_detail
        plan_data = json_data.get('plan')
        detail_data = json_data.get('detail')
        plan_dic = plan_data[0].get('QUERY_PLAN') if plan_data else ''
        if detail_data:
            metric_dict = {'TOTAL_ELAPSED_TIME':'()',  'TOTAL_WORKER_TIME':'()', 
             'TOTAL_LOGICAL_READS':'', 
             'TOTAL_PHYSICAL_READS':'', 
             'TOTAL_LOGICAL_WRITES':'', 
             'TOTAL_ROWS':'', 
             'EXECUTION_COUNT':''}
            for idx, x in enumerate(detail_data):
                pie_chart_data = {k:v for k, v in x.items() if k in ('TOTAL_ELAPSED_TIME',
                                                                     'TOTAL_WORKER_TIME')}
                total_executions = x.get('EXECUTION_COUNT') if x.get('EXECUTION_COUNT') != 0 else 1
                execution_data = [{u'\u6307\u6807':metric_dict.get(k),  u'\u603b\u6570':v,  u'\u5e73\u5747\u6bcf\u6b21\u6267\u884c':round(v / total_executions)} for k, v in x.items() if k in metric_dict]
                key = f'''{time_str}-{idx}'''
                sql_detail['stats'][key] = {'child_summary':[
                  {'CREATION_TIME':x.get('CREATION_TIME'), 
                   'LAST_EXECUTION_TIME':x.get('LAST_EXECUTION_TIME')}], 
                 'pie_chart_data':pie_chart_data, 
                 'execution_stats':{'header':[
                   '', '', '', ''], 
                  'data':execution_data}}

    sql_detail['sql_text'] = sql_text
    if plan_dic:
        sql_detail['plans'] = plan_dic
    detail = SQL_Detail()
    detail.created_at = datetime.now().replace(microsecond=0)
    detail.sql_detail = sql_detail
    detail.sql_id = sql_id
    detail.database = database
    detail.save()
    return sql_detail
Пример #24
0
def oracle_performance(database):
    query = {
        'stats':
        "\nselect inst_id, name, value\nfrom\n    (\n        select ss.inst_id\n        ,      sn.name\n        ,      ss.value\n        from   v$statname sn\n        ,      gv$sysstat  ss\n        where  sn.statistic# = ss.statistic#\n        and    sn.name in (\n        'execute count', 'logons cumulative',\n        'parse count (hard)', 'parse count (total)', 'parse count (failures)',\n        'physical read total IO requests', 'physical read total bytes',\n        'physical write total IO requests', 'physical write total bytes',\n        'redo size', --'session cursor cache hits',\n        'session logical reads', 'user calls', 'user commits', 'user rollbacks','logons current',\n        'gc cr blocks received','gc current blocks received',\n        'gc cr block receive time', 'gc current block receive time')\n        union all\n        select inst_id\n        ,      STAT_NAME\n        ,      VALUE\n        from gv$osstat\n        where STAT_NAME in ('BUSY_TIME','IDLE_TIME')\n        union all\n        select\n          (select min(INSTANCE_NUMBER) from gv$instance),\n          'SCN GAP Per Minute',\n          current_scn\n        from v$database\n    )\norder by 1,2",
        'wait':
        "\n        select inst_id, event, TIME_WAITED, TOTAL_WAITS\n        from gv$system_event\n        where\n        event in (\n            'log file sync',\n            'log file parallel write',\n            'db file parallel write',\n            'db file sequential read',\n            'db file scattered read',\n            'direct path read',\n            'direct path read temp'\n            )\n            order by 1,2",
        'dg':
        "\n        select\n            INST_ID,\n            NAME,\n            VALUE\n        from gv$dataguard_stats\n        where name in ('apply lag','transport lag')"
    }
    stats_list = {
        'session cursor cache hits': 'session cursor cache hits',
        'BUSY_TIME': 'Host CPU Utilization (%)',
        'physical write total IO requests':
        'Physical Write IO Requests Per Sec',
        'physical write total bytes': 'Physical Write Total Bytes Per Sec',
        'physical read total IO requests': 'Physical Read IO Requests Per Sec',
        'physical read total bytes': 'Physical Read Total Bytes Per Sec',
        'SCN GAP Per Minute': 'SCN GAP Per Minute',
        'execute count': 'Executions Per Sec',
        'logons cumulative': 'Logons Per Sec',
        'logons current': 'Session Count',
        'parse count (failures)': 'Parse Failure Count Per Sec',
        'parse count (hard)': 'Hard Parse Count Per Sec',
        'parse count (total)': 'Total Parse Count Per Sec',
        'redo size': 'Redo Generated Per Sec',
        'session logical reads': 'Logical Reads Per Sec',
        'user rollbacks': 'User Rollbacks Per Sec',
        'user calls': 'User Calls Per Sec',
        'user commits': 'User Commits Per Sec',
        'gc cr blocks received': 'GC CR Block Received Per Second',
        'gc current blocks received': 'GC Current Block Received Per Second',
        'gc cr block receive time': 'Global Cache Average CR Get Time',
        'gc current block receive time':
        'Global Cache Average Current Get Time'
    }
    none_delta_stats = ('BUSY_TIME', 'IDLE_TIME', 'gc cr block receive time',
                        'gc current block receive time', 'logons current')
    date_current = get_10s_time()
    if not database.dg_stats:
        query.pop('dg')
    flag, json_data = run_batch_sql(database, query)
    if not flag:
        print(str(build_exception_from_java(json_data)))
        return
    json_data_1_current = json_data.get('stats')
    json_data_2_current = json_data.get('wait')
    json_data_dg = json_data.get('dg')
    key1 = str(database.id) + ':performance1'
    key2 = str(database.id) + ':performance2'
    date_key = str(database.id) + ':performance_date'
    json_data_str_1 = redis.get(key1)
    json_data_str_2 = redis.get(key2)
    date_prev = redis.get(date_key)
    keys1 = ['NAME', 'VALUE']
    keys2 = ['EVENT', 'TIME_WAITED', 'TOTAL_WAITS']
    redis.setex(key1, MAX_INTERVAL, json.dumps(json_data_1_current))
    redis.setex(key2, MAX_INTERVAL, json.dumps(json_data_2_current))
    redis.setex(date_key, MAX_INTERVAL, str(date_current))
    if json_data_str_1:
        if json_data_str_2:
            if date_prev:
                if (date_current -
                        to_date(date_prev)).total_seconds() < MAX_INTERVAL:
                    json_data_1_prev = json.loads(json_data_str_1)
                    json_data_2_prev = json.loads(json_data_str_2)
                    for idx, obj in enumerate(json_data_1_current):
                        name = obj.get(keys1[0])
                        if name == 'IDLE_TIME':
                            continue
                        value = obj.get(keys1[1])
                        inst_id = obj.get('INST_ID')
                        p = Performance()
                        p.name = stats_list.get(name)
                        p.database = database
                        p.created_at = date_current
                        p.inst_id = inst_id
                        delta = float(value) - float(json_data_1_prev[idx].get(
                            keys1[1]))
                        total = 1
                        if name in none_delta_stats:
                            if name in 'BUSY_TIME':
                                total = delta + float(
                                    json_data_1_current[idx + 1].get(
                                        keys1[1])) - float(
                                            json_data_1_prev[idx + 1].get(
                                                keys1[1]))
                                value = round(
                                    delta / (total if total != 0 else 1) * 100,
                                    1)
                            else:
                                if name in ('gc cr block receive time',
                                            'gc current block receive time'):
                                    delta = 10.0 * delta
                                    total = float(
                                        json_data_1_current[idx + 1].get(
                                            keys1[1])) - float(
                                                json_data_1_prev[idx + 1].get(
                                                    keys1[1]))
                                    value = round(
                                        delta / (total if total != 0 else 1),
                                        1)
                            p.value = value
                        else:
                            p.value = round(delta / INTERVAL, 1)
                        p.save()

                    for idx, obj in enumerate(json_data_2_current):
                        name = obj.get(keys2[0])
                        value1 = obj.get(keys2[1])
                        value2 = obj.get(keys2[2])
                        inst_id = obj.get('INST_ID')
                        delta = 10.0 * (float(value1) - float(
                            json_data_2_prev[idx].get(keys2[1])))
                        total = float(value2) - float(
                            json_data_2_prev[idx].get(keys2[2]))
                        value = round(delta / (total if total != 0 else 1), 1)
                        p = Performance()
                        p.name = name
                        p.database = database
                        p.created_at = date_current
                        p.inst_id = inst_id
                        p.value = value
                        p.save()

                    if json_data_dg:
                        for x in json_data_dg:
                            p = Performance()
                            p.name = x.get('NAME')
                            p.inst_id = x.get('INST_ID')
                            p.value = convert_oracle_interval_to_secodns(
                                x.get('VALUE'))
                            p.database = database
                            p.created_at = date_current
                            p.save()