Пример #1
0
def classic_week_pwd_async():
    try:
        classic_pwd().week_pwd()
        #sleep(20)
        Redis_base().set("classic_week_pwd", 2)
    except:
        Redis_base().set("classic_week_pwd", 3)
Пример #2
0
def handle_history_channel(start_date, end_date):
    try:
        info = UpdateChannelDataHaiEr()
        data = info.get_history_data(start_date, end_date)
        # 脚本执行状态修改为已完成 = 2
        Redis_base().set("history_channel_status", 2)
        return data
    except Exception as e:
        # 脚本执行状态修改为执行失败 = 3
        Redis_base().set("history_channel_status", 3)
        return e.args
Пример #3
0
def handle_visitor(start_date, end_date):
    try:
        # 访客量
        visitor_manage = ReportManager()
        data = visitor_manage.get_historys(start_date, end_date)
        # 脚本执行状态修改为已完成 = 2
        Redis_base().set("visitor_status", 2)
        return data
    except Exception as e:
        # 脚本执行状态修改为执行失败 = 3
        Redis_base().set("visitor_status", 3)
        return e.args
Пример #4
0
def handle_consult(start_date, end_date):
    try:
        # 咨询量
        consult_manager = InquiresFetcherManager()
        data = consult_manager.fetch_history(start_date, end_date)
        # 脚本执行状态修改为已完成 = 2
        Redis_base().set("consult_status", 2)
        return {"status": 2}
    except Exception as e:
        # 脚本执行状态修改为执行失败 = 3
        Redis_base().set("consult_status", 3)
        return e.args
Пример #5
0
def classic_week_pwd(request):
    #查看redis脚本执行状态 进行中=1,已完成=2,执行失败=3
    req = Redis_base().get("classic_week_pwd")
    if req == 1:
        return JsonResponse({}, safe=False)
    else:
        Redis_base().set("classic_week_pwd", 1)
        classic_week_pwd_async()
        request.body = json.dumps({"data": ""}).encode()
        request.method = "SCRIPT"
        OperateLog.create_log(request)
        return JsonResponse({}, safe=False)
Пример #6
0
def handle_grid_open_station(grid):
    try:
        info = ForGridSynchronous(grid)
        base_info = BaseStart(info.grid_name, info.site_ids)
        base_info.start()

        # 脚本执行状态修改为已完成 = 2
        Redis_base().set("grid_open_station_status", 2)
        return []
    except Exception as e:
        # 脚本执行状态修改为执行失败 = 3
        Redis_base().set("grid_open_station_status", 3)
        return e.args
Пример #7
0
def update_all_open_station(request):
    if Redis_base().get("all_open_station_status") == 1:
        return Response({"status": 1}, status=status.HTTP_200_OK)
    else:
        handle_update_all_open_station()

        request.body = json.dumps({"data": ""}).encode()
        request.method = "SCRIPT"
        OperateLog.create_log(request)

        Redis_base().set("all_open_station_status", 1)

        return Response(status=status.HTTP_200_OK)
Пример #8
0
def update_siteid_open_station(request):
    siteid = request.GET.get('siteid', '')
    if not siteid:
        return Response(status=status.HTTP_400_BAD_REQUEST,
                        data={'error': '缺少节点参数'})

    if Redis_base().get("siteid_open_station_status") == 1:
        return Response(status=status.HTTP_200_OK)
    else:
        Redis_base().set("siteid_open_station_status", 1)
        handle_siteid_open_station(siteid)
        request.body = json.dumps({"siteid": siteid}).encode()
        request.method = "SCRIPT"
        OperateLog.create_log(request)
        return Response({}, status=status.HTTP_200_OK)
Пример #9
0
def handle_update_all_open_station():
    try:

        for site_id in ForAllSynchronous().get_all_site_id():
            info = ForSiteSynchronous(site_id)
            try:
                base_info = BaseStart(info.grid_name, info.site_ids)
                base_info.start()

            except Exception as e:
                raise e.args

        # 脚本执行状态修改为已完成 = 2
        Redis_base().set("all_open_station_status", 2)
        return []
    except Exception as e:
        # 脚本执行状态修改为执行失败 = 3
        Redis_base().set("all_open_station_status", 3)
        return e.args
Пример #10
0
def test_history_channel(request):
    str_date_start = request.GET.get('start_date')
    str_date_end = request.GET.get('end_date')
    start_date = str_to_date(str_date_start)
    end_date = str_to_date(str_date_end)

    if Redis_base().get("history_channel_status") == 1:
        return Response({"status": 1}, status=status.HTTP_200_OK)
    else:
        handle_history_channel(start_date, end_date)

        request.body = json.dumps({
            "start_date": str_date_start,
            "end_date": str_date_end
        }).encode()
        request.method = "SCRIPT"
        OperateLog.create_log(request)

        # 脚本执行状态存储到redis 进行中 = 1
        Redis_base().set("history_channel_status", 1)
    return Response({}, status=status.HTTP_200_OK)
Пример #11
0
def get_consult(request):
    str_date_start = request.GET.get('start_date')
    str_date_end = request.GET.get('end_date')
    start_date = str_to_date(str_date_start)
    end_date = str_to_date(str_date_end)
    if not (start_date and end_date):
        return Response({'error': '日期上传错误'},
                        status=status.HTTP_400_BAD_REQUEST)

    if Redis_base().get("consult_status") == 1:
        return Response({"status": 1}, status=status.HTTP_200_OK)
    else:
        # 脚本执行状态修改为已完成 = 1
        Redis_base().set("consult_status", 1)
        handle_consult(start_date, end_date)
        request.body = json.dumps({
            "start_date": str_date_start,
            "end_date": str_date_end
        }).encode()
        request.method = "SCRIPT"
        OperateLog.create_log(request)
        return Response({}, status=status.HTTP_200_OK)
Пример #12
0
    def parse_ms_loss(self):
        result_data, host_dict = self.get_network_ms()
        network = []
        packet = []
        url_dict = {}
        for item in result_data:
            url_value = item.get('url')
            host_value = item.get('host')
            ms = item.get('ms')
            loss = item.get('loss')
            if url_dict.get(url_value):
                url_dict[url_value][host_value] = [ms, loss]
            else:
                url_dict[url_value] = {host_value: [ms, loss]}

        for url, each in url_dict.items():
            inner_dict_ms = {'name': url}
            inner_dict_loss = {'name': url}
            for value, inner_each in each.items():
                inner_dict_ms[value] = inner_each[0]
                inner_dict_loss[value] = inner_each[1]

            network.append(inner_dict_ms)
            packet.append(inner_dict_loss)

        # 按规则收敛
        packet = self.constriction_loss(packet, host_dict)
        network = self.constriction_ms(network, host_dict)

        packet = self.parse_dict(packet)
        network = self.parse_dict(network)
        host_dict = self.parse_dict(host_dict)

        Redis_base().set("network_yzq", network)
        Redis_base().set("packet_yzq", packet)
        Redis_base().set("title_yzq", host_dict)
        return network, packet, host_dict
Пример #13
0
    def logic(self):
        starttime_ing = (int(
            time.mktime(
                time.strptime(time.strftime('%Y-%m-%d %H:%M:%S'),
                              '%Y-%m-%d %H:%M:%S'))) - 24 * 60 * 60) * 1000
        starttime = int(
            time.mktime(
                time.strptime(
                    time.strftime('%Y-%m-%d') + ' 00:00:00',
                    '%Y-%m-%d %H:%M:%S'))) * 1000
        endtime = int(
            time.mktime(
                time.strptime(
                    time.strftime('%Y-%m-%d') + ' 23:59:59',
                    '%Y-%m-%d %H:%M:%S'))) * 1000
        ret = self.get_grid_dbcon()
        _ret = []
        consult_num_total, consulting_num_total = 0, 0
        visitor_num_total = 0
        _letao_address_list = []

        for k in ret:
            consult_num, consulting = 0, 0
            db_address = k['db_address']
            db_username = k['db_username']
            db_pwd = k['db_pwd']
            db_port = k['db_port']
            if k['db_name'] == 'kf':
                try:
                    db_name = 'kf'
                    dbcon_kf = MysqldbHelper(db_address, db_username,
                                             decrypt(db_pwd), db_name,
                                             int(db_port))
                    if dbcon_kf == False:
                        continue
                    sql = f"SELECT count(*) as num from t2d_chatscene where starttime>={starttime} and endtime<={endtime}"
                    consult = dbcon_kf.select(sql)
                    consult_num_total = int(consult_num_total +
                                            consult[0]['num'])
                    consult_num = consult[0]['num']
                    sql = f"SELECT count(*) as num from t2d_chatscene where starttime>={starttime_ing} and endtime=0"
                    consulting = dbcon_kf.select(sql)
                    consulting_num_total = int(consulting_num_total +
                                               consulting[0]['num'])
                except:
                    continue
            elif k['db_name'] == 'letaotrailcenter':
                try:
                    if _letao_address_list.count(db_address) > 0:
                        continue
                    db_name = 'letaotrailcenter'
                    dbcon_letao = MysqldbHelper(db_address, db_username,
                                                decrypt(db_pwd), db_name,
                                                int(db_port))
                    if dbcon_letao == False:
                        continue
                    date = time.strftime('%Y%m%d')
                    sql = f'select table_name from information_schema.tables where table_name LIKE "t2d_%_ip_hits_{date}"'
                    table_name = dbcon_letao.select(sql)
                    visitor_num = 0
                    for key in table_name:
                        sql = f"select count(*) as num from {key['table_name']}"
                        visitor = dbcon_letao.select(sql)
                        visitor_num = int(visitor_num + visitor[0]['num'])
                    visitor_num_total = visitor_num_total + visitor_num
                    _letao_address_list.append(db_address)
                except:
                    continue
            else:
                continue

            if consult_num >= 25000:
                state = "灾难"
            elif consult_num >= 18000:
                state = "告警"
            else:
                state = "正常"
            strr = {
                "grid_name": k['grid_name'],
                "consult_num": consult_num,
                "threshold": 18000,
                "state": state
            }
            _ret.append(strr)
        # redis存入正在咨询量
        consulting_str = f"{time.strftime('%H:%M:%S')}|{consulting_num_total}"
        consulting_key = "consulting" + time.strftime('%Y%m%d')
        yest_consulting_key = "consulting" + (
            datetime.date.today() -
            datetime.timedelta(days=1)).strftime('%Y%m%d')
        if Redis_base().exists(yest_consulting_key):
            Redis_base().delete(yest_consulting_key)
        Redis_base().lpush(consulting_key, consulting_str)
        consulting_json = []
        consulting_len = Redis_base().llen(consulting_key)
        for k in range(consulting_len):
            try:
                data = Redis_base().lindex(consulting_key, k)
                if data:
                    data_lsit = str(data).split('|')
                    data_dict = {data_lsit[0]: data_lsit[1]}
                    consulting_json.append(data_dict)
            except:
                consulting_json = consulting_json
        sorted_ret = sorted(_ret,
                            key=lambda _ret: _ret['consult_num'],
                            reverse=True)
        ret_str = {
            "consult": {
                "total": consult_num_total,
                "grid_num": sorted_ret
            },
            "visitor": {
                "total": visitor_num_total
            },
            "consulting": consulting_json
        }
        Redis_base().set("classic_gjf", ret_str)
        return ret_str
Пример #14
0
def xg_grid_monitor():
    checkout = CheckoutResult()
    zabbix = ZabbixApi()
    # 获取密钥
    auth = ZabbixApi().get_key()

    # 获取所有主机组
    major = zabbix.get_major_unit(auth)
    # 创建进程池
    pp_memory_available = Pool(8)

    li = []
    for m in major:
        group_id = m["groupid"]
        group_name = m["name"]
        if group_name[0:2] in MAIN_TJ:
            # 某个主机组下的所有主机
            mainframe = zabbix.get_mainframe(auth, group_id)
        else:
            continue
        if mainframe == []:
            continue
        # 遍历所有的主机
        for main in mainframe:
            hostid = main["hostid"]
            main_name = main["name"]

            memory_available = pp_memory_available.apply_async(
                zabbix.get_main_monitor,
                args=(auth, hostid, "vm.memory.size[available]", group_name,
                      main_name))
            # cpu_idle = pp_memory_available.apply_async(zabbix.get_main_monitor, args=(auth,hostid, "system.cpu.util[,idle]", group_name, main_name))
            cpu_iowait = pp_memory_available.apply_async(
                zabbix.get_main_monitor,
                args=(auth, hostid, "system.cpu.util[,iowait]", group_name,
                      main_name))
            cpu_loda = pp_memory_available.apply_async(
                zabbix.get_main_monitor,
                args=(auth, hostid, "system.cpu.load[percpu,avg1]", group_name,
                      main_name))
            opt_free = pp_memory_available.apply_async(
                zabbix.get_main_monitor,
                args=(auth, hostid, "vfs.fs.size[/opt,free]", group_name,
                      main_name))
            fs_free = pp_memory_available.apply_async(
                zabbix.get_main_monitor,
                args=(auth, hostid, "vfs.fs.size[/,free]", group_name,
                      main_name))
            opt_total = pp_memory_available.apply_async(
                zabbix.get_main_monitor,
                args=(auth, hostid, "vfs.fs.size[/opt,total]", group_name,
                      main_name))
            fs_total = pp_memory_available.apply_async(
                zabbix.get_main_monitor,
                args=(auth, hostid, "vfs.fs.size[/,total]", group_name,
                      main_name))
            li.extend([
                memory_available, cpu_iowait, cpu_loda, opt_free, fs_free,
                opt_total, fs_total
            ])

    # 调用join之前,先调用close函数,否则会出错。执行完close后不会有新的进程加入到pool,join函数等待所有子进程结束
    pp_memory_available.close()
    pp_memory_available.join()

    pp_result = {}

    for item in li:
        l = item.get()
        # ll.get()获取的数据结构
        # {'group_name': '北京轨迹集群', 'host_name': 'bj-ksy-g1-traildb_master-01', 'key': 'vfs.fs.size[/,total]', 'lastvalue': '19549736960'}
        group_name = l["group_name"]
        host_name = l["host_name"]
        key_ = l["key"]
        key = key_dict[key_]
        lastvalue = l["lastvalue"]

        if group_name not in pp_result:
            pp_result[group_name] = {host_name: {key: lastvalue}}
        elif host_name not in pp_result[group_name]:
            pp_result[group_name].update({host_name: {key: lastvalue}})
        else:
            pp_result[group_name][host_name].update({key: lastvalue})
    result = []
    look_list = []
    # 遍历所有主机组
    for p_k, p_v in pp_result.items():
        # 一个主机组下的非正常主机信息列表
        group_list = []
        s_set = set()
        m_set = set()
        members_list = []
        for ma_k, ma_v in p_v.items():
            # 存放非正常的状态和信息的列表
            ma_list = []
            memory_available = ma_v["系统可用内存"]
            # cpu_idle = ma_v["cpu可用使用率"]
            cpu_iowait = ma_v["cpu等待"]
            cpu_loda = ma_v["一分钟负载"]
            opt_free = ma_v["opt可用"]
            fs_free = ma_v["根可用"]
            opt_total = ma_v["opt总容量"]
            fs_total = ma_v["根总容量"]

            m_status, m_massage = checkout.memory(ma_k, memory_available)
            if m_status != 1:
                ma_list.append({"status": m_status, "massage": m_massage})
                s_set.add(m_status)
                m_set.add(m_massage)
            # i_status, i_massage = checkout.cpu_use(cpu_idle)
            # if i_status != 1:
            #     s_set.add(i_status)
            #     m_set.add(i_massage)
            w_status, w_massage = checkout.cpu_waite(cpu_iowait)
            if w_status != 1:
                ma_list.append({"status": w_status, "massage": w_massage})
                s_set.add(w_status)
                m_set.add(w_massage)

            l_status, l_massage = checkout.cpu_load(cpu_loda)
            if l_status != 1:
                ma_list.append({"status": l_status, "massage": l_massage})
                s_set.add(l_status)
                m_set.add(l_massage)
            d_status, d_massage = checkout.disk(opt_free, fs_free, opt_total,
                                                fs_total)
            if d_status != 1:
                ma_list.append({"status": d_status, "massage": d_massage})
                s_set.add(d_status)
                m_set.add(d_massage)
            # 一个主机的所有报警信息
            ma_dict = {"ma_k": ma_k, "st_me": ma_list}
            if len(ma_list) > 0:
                group_list.append(ma_dict)
            else:
                continue

        m_list = list(m_set)

        if len(m_list) == 0:
            m_list = ["正常"]

        s_list = list(s_set)
        if len(s_list) == 0:
            statu = 1
        else:
            st = sorted(s_list)
            statu = st[-1]
        date = datetime.datetime.now().strftime('%m-%d %H:%M')

        if statu != 1:
            result.append({
                "group_name": p_k,
                "status": statu,
                "massage": m_list,
                "date": date,
                "member": group_list
            })
        else:
            continue
    sorted_x = sorted(result, key=operator.itemgetter('status'), reverse=True)
    Redis_base().set("zabbix_zxy", sorted_x)
    return '存储ok'
Пример #15
0
 def logic(self):
     try:
         table = "monitor_result_" + time.strftime('%Y_%m_%d')
         sql = "select * from " + table + " where time>=now()-interval 5 minute GROUP BY taskId ORDER BY time DESC"
         data = self.powerdog_db.select(sql)
         ret = []
         for k in data:
             str = {}
             if k['pingTrailServer'] == 0:
                 str = {
                     "taskId": k['taskId'],
                     "error": "轨迹网络异常",
                     "time": k['time']
                 }
             elif k['pingGetFlashServer'] == 0:
                 str = {
                     "taskId": k['taskId'],
                     "error": "FlashServer网络异常",
                     "time": k['time']
                 }
             elif k['getServerAddr'] == 0:
                 str = {
                     "taskId": k['taskId'],
                     "error": "ServerAddr异常",
                     "time": k['time']
                 }
             elif k['webTrail'] == 0:
                 str = {
                     "taskId": k['taskId'],
                     "error": "用户轨迹异常",
                     "time": k['time']
                 }
             elif k['kfConnectT2d'] == 0:
                 str = {
                     "taskId": k['taskId'],
                     "error": "客服连接异常",
                     "time": k['time']
                 }
             elif k['kfLoginT2d'] == 0:
                 str = {
                     "taskId": k['taskId'],
                     "error": "客服登录异常",
                     "time": k['time']
                 }
             elif k['visitorRequestKf'] == 0:
                 str = {
                     "taskId": k['taskId'],
                     "error": "访客请求失败",
                     "time": k['time']
                 }
             elif k['visitorConnectTchat'] == 0:
                 str = {
                     "taskId": k['taskId'],
                     "error": "访客连接失败",
                     "time": k['time']
                 }
             elif k['kfConnectTchat'] == 0:
                 str = {
                     "taskId": k['taskId'],
                     "error": "客服会话失败",
                     "time": k['time']
                 }
             elif k['uploadFile'] == 0:
                 str = {
                     "taskId": k['taskId'],
                     "error": "文件上传失败",
                     "time": k['time']
                 }
             elif k['downloadFile'] == 0:
                 str = {
                     "taskId": k['taskId'],
                     "error": "文件下载失败",
                     "time": k['time']
                 }
             ret.append(str)
     except:
         ret = []
     Redis_base().set("power_dog0829", ret)
     return ret