def flush_visual_data():
    while True:
        timeline = Timeline(is_auto_play=True, timeline_bottom=0)
        cpudata = cpu(attr)
        memdata = mem(attr)
        diskdata = disk(attr)
        timeline.add(cpudata, 'CPU')
        timeline.add(memdata, '内存')
        timeline.add(diskdata, '磁盘')
        context = dict(
            visual_sysinfo=timeline.render_embed(),
            host=DEFAULT_HOST,
            script_list=timeline.get_js_dependencies(),
        )
        context_json = json.dumps(context)
        with open('temp_json_info', 'wb') as temp_file:
            temp_file.write(context_json)
        time.sleep(600)
Esempio n. 2
0
def home(request):
    # return render(request,'home.html')
    # if request.method == 'POST':
    #     username = request.POST.get('username',None)
    #     age = request.POST.get("age",None)
    #     gender = request.POST.get("gender",None)
    #     usergroup_id = request.POST.get("group",None)
    #     models.UserInfo.objects.create(username=username, age=age, gender=gender,usergroup_id=usergroup_id)
    # USER_LIST = list(models.UserInfo.objects.all())
    # return render(request,"./node_modules/gentelella/production/anom_total.html",{"userlist":USER_LIST})

    conn = database_method.initial_connect('dmuser', 'dmuser', 'dmtest')
    conn = conn.create_conn()
    cursor = conn.cursor()

    cpu_outlier_result = """
        select "TIME",
    "SNAP_ID", 
    "DB_CPU", 
    db_id,
    '/ora_dual/load_profile_trend/?snapid='||SNAP_ID||'&dbid='||db_id
    from (
    SELECT 
    "TIME",
    "SNAP_ID", 
    "DB_CPU", 
    db_id,
    PREDICTION_PROBABILITY(dmuser.ANOM_SVM_1_6 USING *) ANOM_SVM_1_6_PROB,
    PREDICTION(dmuser.ANOM_SVM_1_6 USING *) ANOM_SVM_1_6_PRED
    FROM dmuser.stat_all_pivot_data) where ANOM_SVM_1_6_PRED=1 and rownum=1
        """

    sql_outlier_result = """
                               select * from (
                                   select type
                                   from topsql_all_data_his_view
                                )
                                pivot 
                                (
                                   count(*)
                                   for type in ('CPU Time' as "CPU_TIME",'Elapse Time' as "ELAPSE_TIME",'Buffer Reads' as "BUFFER_READS",'Physical Reads' as "PHYSICAL_READS",'Executions' as "EXECUTIONS")
                                )
    """

    cpu_result = """
            select db_cpu,time from dmuser.stat_all_pivot_data
            """

    # 执行异常探测
    cursor.callproc('dmuser.cpu_outlier_apply_model')

    # 执行异常分析
    cursor.execute(cpu_outlier_result)
    data_result_ = cursor.fetchall()
    data_result = list(data_result_)

    cursor.execute(sql_outlier_result)
    sql_result_ = cursor.fetchall()
    sql_result = list(sql_result_)
    outlier_sql = []
    for idx in range(len(sql_result)):
        outlier_sql.append({
            'CPU': sql_result[idx][0],
            'ELA': sql_result[idx][1],
            'BUFFER': sql_result[idx][2],
            'READ': sql_result[idx][3],
            'EXE': sql_result[idx][4]
        })

    # 提取异常原因
    reasons = []
    for idx in range(len(data_result)):

        url = []
        #         reason_sql = """
        #         select  extractValue(value(reason_name),('//Attribute/@name'))
        # from
        # (
        # select FEATURE_DETAILS(dmuser.feat_pca_1_6, 1, 10 USING *) data
        # from dmuser.stat_all_pivot_data
        # where snap_id =
        #         """ + str(data_result[idx][1]) + """  ) t,TABLE(XMLSequence(Extract(t.data,'//Attribute'))) reason_name where rownum<4
        #         """

        reason_sql = """
                   select stat_name from (
    select * from DBA_HIST_SYS_TIME_MODEL where snap_id=""" + str(
            data_result[idx]
            [1]) + """ and stat_name not in ('DB time','DB CPU')
    order by value desc) where rownum < 4
                          """

        cursor.execute(reason_sql)
        reason_result_ = cursor.fetchall()
        for reaon_idx in range(len(reason_result_)):
            url.append(
                data_result[idx][4] + "&reason=" +
                str(reason_result_[reaon_idx]).upper().replace(' ', '_').
                replace('(', '').replace(')', '').replace(',', '').replace(
                    '[', '').replace(']', '').replace('''''', ''))
            # url.append(data_result[idx][4] + "&reason=" + str(reason_result_[reaon_idx]))
        reasons.append({
            "TIME": data_result[idx][0],
            "snap_id": data_result[idx][1],
            "DB_CPU": data_result[idx][2],
            "URL": url,
            "reason": reason_result_
        })

    cursor.execute(cpu_result)
    cpu_all_result_ = cursor.fetchall()
    cpu_all_result = list(cpu_all_result_)

    normal = []
    normal_tiem = []
    outlier = []
    outlier_time = []
    timeid = []

    for idx_1 in range(len(cpu_all_result)):
        # outlier.append({'time':cpu_all_result[idx_1][0],'ANOM_SVM_1_6_PROB':cpu_all_result[idx_1][3]})
        outlier.append(cpu_all_result[idx_1][0])
        outlier_time.append(cpu_all_result[idx_1][1])
        # else:
        #     #normal.append({'time':cpu_all_result[idx_1][0],'ANOM_SVM_1_6_PROB':cpu_all_result[idx_1][3]})
        #     normal.append( cpu_all_result[idx_1][3])
        #     normal_tiem.append(cpu_all_result[idx_1][0])

    template = loader.get_template(
        './node_modules/gentelella/production/anom_total.html')
    timeline = Timeline(is_auto_play=True, timeline_bottom=0)

    cpu_line = Line(title_pos='center')

    # cpu_line.add(
    #     "正常值",
    #     normal_tiem,
    #     normal,
    #     is_smooth=True,
    #     mark_point=["max", "min"],
    #     mark_line=["average"],
    # legend_top = "50%"
    # )

    cpu_line.add("DB_CPU",
                 outlier_time,
                 outlier,
                 is_smooth=True,
                 mark_point=["max", "min"],
                 mark_line=["average"])

    context = dict(
        # title = [],
        cpu_line=cpu_line.render_embed(),
        data_result=reasons,
        sql_result=outlier_sql,
        # metric_data = load_profile_per_hour,
        myechart=timeline.render_embed(),
        # host=DEFAULT_HOST,#这句改为下面这句
        host=REMOTE_HOST,  # <-----修改为这个
        script_list=timeline.get_js_dependencies())
    return HttpResponse(template.render(context, request))

    # return render(request, "./node_modules/gentelella/production/sel_cpuoutlier_data.html", {'data_result': data_result})
    cursor.close()
def bar_echart(request, *args, **kwargs):
    template = loader.get_template(
        './node_modules/gentelella/production/display_metric_detail.html')
    snap = request.GET.get('snapdate')
    #snap_date = datetime.strptime(snap, '%y/%m/%d').strftime('%Y-%m-%d')
    if snap:

        load_profile_per_hour = list(
            models.loadmetric_hour.objects.values(
                "time", "redo_second", "logical_second", "physical_second",
                "execs_second", "trans_second").filter(snap_date=snap).all())
        space_usage = list(
            models.spaceusage.objects.values(
                "tablespace_name", "percent").filter(collect_time=snap).all())
        print(space_usage)

        # load_profile_obj = apps.get_model('ora_dual', 'loadmetric_hour')
        # load_profile_field = load_profile_obj._meta.fields
        # title = []
        # for ind in range(len(load_profile_field)):
        #     title.append(load_profile_field[ind].name)
        attr = []

        for key, value in load_profile_per_hour[0].items():
            attr.append(key)

        val_usage = []
        val_name = []
        for idx in range(len(space_usage)):
            val_name.append(space_usage[idx]['tablespace_name'])
            val_usage.append(space_usage[idx]['percent'])

        usage_pie = Pie("饼图-空间使用率", title_pos='center')
        usage_pie.add(
            "",
            val_name,
            val_usage,
            radius=[40, 75],
            label_text_color=None,
            is_label_show=True,
            legend_orient="vertical",
            legend_pos="left",
        )
        # pie.render()
        timeline = Timeline(is_auto_play=True, timeline_bottom=0)

        for idx in range(len(load_profile_per_hour)):
            val = []
            for key, value in load_profile_per_hour[idx].items():
                val.append(value)

            bar = Bar("数据库指标", val[0])
            bar.add("值/秒", attr[1:], val[1:])
            timeline.add(bar, val[0])

        context = dict(
            snap_date=snap,
            title=attr,
            usage_pie=usage_pie.render_embed(),
            space_usage=space_usage,
            metric_data=load_profile_per_hour,
            myechart=timeline.render_embed(),
            # host=DEFAULT_HOST,#这句改为下面这句
            host=REMOTE_HOST,  # <-----修改为这个
            script_list=timeline.get_js_dependencies())
        return HttpResponse(template.render(context, request))