예제 #1
0
 def set(self):
     date_time = DateTime()
     now = date_time.get_now()
     HH = date_time.get_hour(now)
     if HH > SYSTEM["broadcast_time"]["FROM"] and HH < SYSTEM["broadcast_time"]["TO"]:
         self.create_snmp_at_broadcast_time()
     else:
         self.create_snmp_at_broadcast_timeout()
예제 #2
0
 def update_data(self, video_status, source_status):
     date_time = DateTime()
     opdate = date_time.get_now()
     child_thread_list = []
     profile = ProfileBLL()
     profile_data = {
         "video": video_status,
         "agent": self.agent,
         "ip": self.ip
     }
     self.logger.debug(
         "update profile data: video{0} - agent{1} - ip{2}".format(
             video_status, self.agent, self.ip))
     child_thread = threading.Thread(target=profile.put,
                                     args=(
                                         self.id,
                                         profile_data,
                                     ))
     child_thread.start()
     child_thread_list.append(child_thread)
     human_readable_status = self.get_human_readable_status(source_status)
     message = """%s %s (ip:%s) %s in host: %s (%s)""" % (
         self.name, self.type, self.source, human_readable_status, self.ip,
         self.agent)
     log_data = {
         "host": self.protocol + "://" + self.source,
         "tag": "status",
         "msg": message
     }
     rslog = {
         "sev": "Critical",
         "jname": self.name,
         "type": self.type,
         "res": self.source,
         "desc": human_readable_status,
         "cat": "Communication",
         "host": self.agent,
         "opdate": opdate,
         "cldate": opdate
     }
     self.logger.critical(json.dumps(rslog))
     log = LogBLL()
     child_thread = threading.Thread(target=log.post, args=(log_data, ))
     child_thread.start()
     child_thread_list.append(child_thread)
     """Update local snmp IPTV"""
     local_snmp = LocalSnmp(profile=self.source + "-" + self.type,
                            name=self.name,
                            status=2)
     child_thread = threading.Thread(target=local_snmp.set)
     child_thread.start()
     child_thread_list.append(child_thread)
     """
     Wait for update database complete
     """
     for child_thread in child_thread_list:
         child_thread.join()
     return 0
예제 #3
0
def get_history_auto_return_main(thomson_host, jobid):
    query = {
        "from": 0,
        "size": 1000,
        "sort": [{
            "@timestamp": {
                "order": "desc"
            }
        }],
        "_source": ["message"],
        "query": {
            "bool": {
                "must": [{
                    "match": {
                        "host.keyword": "thomson"
                    }
                }, {
                    "match": {
                        "message": "tool just"
                    }
                }, {
                    "match": {
                        "message": "the main source"
                    }
                }, {
                    "match": {
                        "message": "%s" % (thomson_host)
                    }
                }, {
                    "match": {
                        "message": "%d" % (jobid)
                    }
                }],
                "filter": {
                    "range": {
                        "@timestamp": {
                            "gte": "now-5m",
                            "lte": "now"
                        }
                    }
                }
            }
        }
    }
    d_time = DateTime()
    now = d_time.get_now_as_logtash_fortmat()
    yesterday = d_time.get_yesterday_as_logtash_fortmat()
    index = "logstash-%s,logstash-%s" % (now, yesterday)
    elast = Elasticsearch([{
        'host': host,
        'port': port
    }]).search(
        index=index,
        body=query,
    )
    return elast['hits']['hits']
예제 #4
0
def callback(ch, method, properties, body):
    date_time = DateTime()
    print "------------->\n" + str(date_time.get_now_as_human_creadeble(
    )) + " recieved: " + body + "\n<-------------"
    logger.info("received " + body)
    if not body:
        logger.warning("received " + body + "empty!")
        return 1
    t = threading.Thread(target=return_main, args=(body, ))
    t.start()
예제 #5
0
def get_the_last_active_backup_log_by_jobid(thomson_host, jobid):
    query = {
        "from": 0,
        "size": 1,
        "_source": ["message"],
        "sort": [{
            "@timestamp": {
                "order": "desc"
            }
        }],
        "query": {
            "bool": {
                "must": [{
                    "match": {
                        "message": "%s" % (thomson_host)
                    }
                }, {
                    "match": {
                        "message": "%d" % (jobid)
                    }
                }, {
                    "match": {
                        "message": "input:backup"
                    }
                }]
            }
        }
    }
    d_time = DateTime()
    now = d_time.get_now_as_logtash_fortmat()
    yesterday = d_time.get_yesterday_as_logtash_fortmat()
    index = "logstash-%s,logstash-%s" % (now, yesterday)
    elast = Elasticsearch([{
        'host': host,
        'port': port
    }]).search(
        index=index,
        body=query,
    )
    return elast['hits']['hits']
예제 #6
0
    def check_source(self, source, last_status, id, agent, name, type):
        """
        Get status of profile, if stastus not change then update check equal 1.      
        Ffmpeg: Use Ffprobe to check stastus profile (source) and return flag 
        0 is down
        1 is up
        2 is video error
        3 is audio eror 
        """
        ffmpeg = Ffmpeg()
        check = ffmpeg.check_source(source)
        # print "%s : %s"%(check, last_status)
        self.logger.debug("Curent :%s <> Last: %s, %s %s %s"%(check, last_status, source, name, type))
        if check != last_status:
            date_time = DateTime()
            opdate = date_time.get_now()
            time.sleep(SYSTEM["BREAK_TIME"])
            self.logger.debug("Recheck : %s %s %s"%(source, name, type))
            recheck = ffmpeg.check_source(source)
            if recheck == check:
                status = {0: "DOWN       ", 1: "UP         ", 2: "VIDEO ERROR", 3: "AUDIO ERROR"} [check]
                """
                Update status and write log
                """
                child_thread_list = []
                profile = ProfileBLL()
                profile_data = {"status": check, "agent": agent, "ip": SYSTEM["HOST"]}
                child_thread = threading.Thread(target=profile.put, args=(id, profile_data,))
                child_thread.start()
                child_thread_list.append(child_thread)
                """Append log"""
                channel = """%s %s"""%(name, type)
                while len(channel) < 22:
                    channel += " "
                while len(source) < 27:
                    source += " "
                ip_config = SYSTEM["HOST"]
                while len(ip_config) < 16:
                    ip_config += " "
                message = """%s (ip:%s) %s in host: %s (%s)""" % (channel, source, status, ip_config, agent)
                cldate = date_time.get_now()
                rslog = {
                         "sev"        : "Critical",
                         "jname"      : name,
                         "type"       : type,
                         "res"        : source,
                         "desc"       : status,
                         "cat"        : "Communication",
                         "host"       : agent,
                         "opdate"     : opdate,
                         "cldate"     : cldate
                     }
                self.logger.critical(json.dumps(rslog))
                log_data = {"host": source, "tag": "status", "msg": message}
                log = LogBLL()
                child_thread = threading.Thread(target=log.post, args=(log_data,))
                child_thread.start()
                child_thread_list.append(child_thread)
                """Update local snmp IPTV"""
                if "origin" or "4500" in agent:
                    self.logger.debug("%s is core probe"%(agent))
                    time.sleep(2)
                    snmp = Snmp()
                    child_thread = threading.Thread(target=snmp.set)
                    child_thread.start()
                    child_thread_list.append(child_thread)

                """
                Wait for update database complete
                """
                for child_thread in child_thread_list:
                    child_thread.join()
                return 1
        return 0
예제 #7
0
파일: views.py 프로젝트: ahcan/api.monitor
 def put(self, request, pk):
     data = request.body
     data = json.loads(data)
     self.logger.debug("message: %s" % (str(data)))
     #Status
     if ("status" in data):
         self.logger.debug("message: update status -->%s" % (str(data)))
         agent_name = data["agent"]
         profile_agent = self.get_object(pk)
         date_time = DateTime()
         if not profile_agent:
             self.logger.info("Not found profile_agent_id: %s" % (str(pk)))
             return HttpResponse(status=400)
         if profile_agent.status != data["status"]:
             profile_agent.status = data["status"]
             profile_agent.last_update = date_time.get_now()
             profile_agent.save()
             self.logger.debug("message: update status --> success")
             if "Origin" in agent_name or "4500" in agent_name or "ott" in agent_name:
                 self.logger.info("%s is core probe" % (agent_name))
                 if PUSH_ALARM_CORE:
                     self.logger.info("Push alarm to scc is active")
                     time.sleep(0.5)
                     snmp = Snmp(str(data["ip"]).replace(' ', ''))
                     alarm_status, msg = snmp.check_agent()
                     data = {
                         "ishost": False,
                         "queueServiceName": "Check_Agent_IPTV_Status",
                         "queueHost": agent_name,
                         "msg": str(msg),
                         "AlertStatus": alarm_status,
                         "agentId": int(pk)
                     }
                     self.logger.debug("alarm contain: %s" % (str(data)))
                     scc = Scc()
                     scc.post(data)
                 return HttpResponse(status=202)
             else:
                 self.logger.info("%s is not core probe" % (agent_name))
                 if PUSH_ALARM_PROBE:
                     self.logger.info("Push alarm to scc is active")
                     time.sleep(0.5)
                     snmp = Snmp(str(data["ip"]).replace(' ', ''))
                     alarm_status, msg = snmp.check_agent()
                     data = {
                         "ishost": False,
                         "queueServiceName": "Check_Agent_IPTV_Status",
                         "queueHost": agent_name,
                         "msg": str(msg),
                         "AlertStatus": alarm_status,
                         "agentId": int(pk)
                     }
                     self.logger.debug("alarm contain: %s" % (str(data)))
                     scc = Scc()
                     scc.post(data)
                 return HttpResponse(status=202)
         else:
             self.logger.warning("Value not change: %s" % (str(pk)))
             return HttpResponse(status=400)
             # querry = "update profile_agent set status=%s,last_update=unix_timestamp() where id=%s;"%(data['status'],pk)
             # RabbitMQQueue().push_query(querry)
             # return HttpResponse(status=202)
     #video
     elif ('video' in data):
         self.logger.debug("message: update video status -->%s" %
                           (str(data)))
         agent_name = data["agent"]
         profile_agent = self.get_object(pk)
         date_time = DateTime()
         if not profile_agent:
             self.logger.warning("message: profile id %s not exist" %
                                 (str(pk)))
             return HttpResponse(status=400)
         if profile_agent.video != data["video"]:
             profile_agent.video = data["video"]
             profile_agent.last_update = date_time.get_now()
             profile_agent.save()
             self.logger.debug("message: update video status --> success")
             if "Origin" in agent_name or "4500" in agent_name or "ott" in agent_name:
                 """
                 Push to SCC
                 """
                 self.logger.info("%s is core probe" % (agent_name))
                 if PUSH_ALARM_CORE:
                     self.logger.info("Push alarm to scc is active")
                     time.sleep(0.5)
                     snmp = Snmp(str(data["ip"]).replace(' ', ''))
                     alarm_status, msg = snmp.check_agent()
                     data = {
                         "ishost": False,
                         "queueServiceName": "Check_Agent_IPTV_Status",
                         "queueHost": agent_name,
                         "msg": str(msg),
                         "AlertStatus": alarm_status,
                         "agentId": int(pk)
                     }
                     self.logger.debug("alarm contain: %s" % (str(data)))
                     scc = Scc()
                     scc.post(data)
                 return HttpResponse(status=202)
             else:
                 self.logger.info("%s is not core probe" % (agent_name))
                 if PUSH_ALARM_PROBE:
                     self.logger.info("Push alarm to scc is active")
                     time.sleep(0.5)
                     snmp = Snmp(str(data["ip"]).replace(' ', ''))
                     alarm_status, msg = snmp.check_agent()
                     data = {
                         "ishost": False,
                         "queueServiceName": "Check_Agent_IPTV_Status",
                         "queueHost": agent_name,
                         "msg": str(msg),
                         "AlertStatus": alarm_status,
                         "agentId": int(pk)
                     }
                     self.logger.debug("alarm contain: %s" % (str(data)))
                     scc = Scc()
                     scc.post(data)
                 return HttpResponse(status=202)
         else:
             self.logger.warning("Value not change: %s" % (str(pk)))
             return HttpResponse(status=400)
             # querry="update profile_agent set video=%s,last_update=unix_timestamp() where id=%s;"%(data['video'],pk)
             # RabbitMQQueue().push_query(querry)
             # return HttpResponse(status=202)
     #dropframe
     elif ('dropframe' in data) and len(data) == 1:
         self.logger.debug("message: update dropframe -->%s" % (str(data)))
         querry = "update profile_agent set dropframe=%s,last_update=unix_timestamp() where id=%s;" % (
             data['dropframe'], pk)
         RabbitMQQueue().push_query(querry)
         self.logger.info(
             "message: update dropframe added to RabbitMQQueue: %s" %
             (querry))
         return HttpResponse(status=202)
     #discontinuity
     elif ('discontinuity' in data) and len(data) == 1:
         self.logger.debug("message: update discontinuity -->%s" %
                           (str(data)))
         querry = "update profile_agent set discontinuity=%s,last_update=unix_timestamp() where id=%s;" % (
             data['discontinuity'], pk)
         RabbitMQQueue().push_query(querry)
         self.logger.info(
             "message: update dropframe added to RabbitMQQueue: %s" %
             (querry))
         return HttpResponse(status=202)
     #analyzer_status
     elif ('analyzer_status' in data) and len(data) == 1:
         self.logger.debug("message: update analyzer_status -->%s" %
                           (str(data)))
         querry = "update profile_agent set analyzer_status=%s,last_update=unix_timestamp() where id=%s;" % (
             data['analyzer_status'], pk)
         RabbitMQQueue().push_query(querry)
         self.logger.info(
             "message: update dropframe added to RabbitMQQueue: %s" %
             (querry))
         return HttpResponse(status=202)
     return HttpResponse(status=400)
예제 #8
0
def return_main(body):
    if not is_json(body):
        logger.error("Recieve: %s not json fortmat --> break" % (str(body)))
        print "Not json %s" % (str(body))
        return 1
    data = json.loads(body)
    if data["status"] != 1:
        logger.warning(
            "Job(%s)single status is %s --> not ok --> not return main" %
            (str(data)))
    jid = int(data["jid"])
    target_host = data["host"]
    account = None
    for i in THOMSON_HOST:
        if THOMSON_HOST[i]["host"] == target_host:
            account = THOMSON_HOST[i]
            break
    if not account:
        logger.error("Host %s not found on setting list: %s" %
                     (target_host, str(THOMSON_HOST)))
        print "Host: %s not found!" % (target_host)
        return 1
    jd = JobDetail(account["host"], account["user"], account["passwd"], jid)
    is_auto = is_auto_return_main(data)
    is_not_overwork = is_not_overworked(data)
    is_running_backup = is_running_backup_on_thomson(data)
    if not is_running_backup:
        time.sleep(10)
        is_running_backup = is_running_backup_on_thomson(data)
    if not is_running_backup:
        time.sleep(10)
        is_running_backup = is_running_backup_on_thomson(data)
    if not (is_auto and is_running_backup and is_not_overwork):
        logger.warning(
            "Job(%s) is not auto --> check your config: is_auto(%s), is_not_overwork(%s), is_running_backup(%s)"
            % (str(data), str(is_auto), str(is_not_overwork),
               str(is_running_backup)))
        date_time = DateTime()
        print "%s Job: %d from thomson %s not auto" % (str(
            date_time.get_now_as_human_creadeble()), jid, target_host)
        return 0
    if not SYSTEM["auto"]["RETURN_MAIN"]:
        logger.warning("System auto return main not active check your config!")
        return 1
    job_status = get_job_status(jd)
    logger.info("Job(%s) status --> |%s|" % (str(data), job_status))
    if job_status.upper() == "OK":
        origin_source_backup, origin_udp_port = get_job_backup_info(jd)
        disable_backup = deactive_backup(jd)
        time.sleep(2)
        enable_backup = active_backup(jd)
        if enable_backup.upper() == "NOTOK":
            stop = stop_job(jd)
            #logger.warning("Job(%s) STOP --> %s"%(str(data), stop))
            time.sleep(1)
            enable_backup = active_backup(jd)
            logger.warning("Job(%s) enable backup --> %s" %
                           (str(data), enable_backup))
            time.sleep(1)
            start = start_job(jd)
            #logger.warning("Job(%s) START --> %s"%(str(data), start))
        logger.critical(
            "Tool just returned the main source by disable and enable Active backup: Job(%s)"
            % (str(data)))
        source_backup, udp_port = get_job_backup_info(jd)
        if origin_source_backup != source_backup:
            set_backup = set_backup_ip(jd, origin_source_backup)
            logger.warning(
                "Job(%s) thomson tool change value ip backup from %s to %s --> %s"
                % (str(data), source_backup, origin_source_backup, set_backup))
        if origin_udp_port != udp_port:
            set_backup_port = set_backup_udp_port(jd, origin_udp_port)
            logger.warning(
                "Job(%s) thomson tool change value udp port backup from %s to %s --> %s"
                % (str(data), str(source_backup), str(origin_source_backup),
                   set_backup_port))
    elif job_status.upper() == "MAJOR":
        stop = stop_job(jd)
        time.sleep(2)
        start = start_job(jd)
        logger.critical(
            "Tool just returned the main source by stop and start job: Job(%s)"
            % (str(data)))
    return 0