def bulk_field_isinmaintenance(self, data): if not data: return data if data[0]["status"] == "A": mtc = set(Maintenance.currently_affected()) for x in data: x["isInMaintenance"] = x["managed_object"] in mtc else: mos = [x["managed_object"] for x in data] pipeline = [ {"$match": {"affected_objects.object": {"$in": mos}}}, {"$unwind": "$affected_objects"}, { "$project": { "_id": 0, "managed_object": "$affected_objects.object", "interval": ["$start", "$stop"], } }, {"$group": {"_id": "$managed_object", "intervals": {"$push": "$interval"}}}, ] mtc = { x["_id"]: x["intervals"] for x in Maintenance._get_collection().aggregate(pipeline) } for x in data: if x["managed_object"] in mtc: left, right = list(zip(*mtc[x["managed_object"]])) x["isInMaintenance"] = bisect.bisect( right, dateutil.parser.parse(x["timestamp"]).replace(tzinfo=None) ) != bisect.bisect( left, dateutil.parser.parse(x["clear_timestamp"]).replace(tzinfo=None) ) else: x["isInMaintenance"] = False return data
def start_maintenance(maintenance_id): logger.info("[%s] Start maintenance", maintenance_id) m = Maintenance.get_by_id(maintenance_id) if not m: logger.info("[%s] Not found, skipping") return if not m.escalate_managed_object: logger.info("[%s] No managed object to escalate", maintenance_id) return if m.escalation_tt: logger.info("[%s] Already escalated as TT %s", maintenance_id, m.escalation_tt) return # Get external TT system tts_id = m.escalate_managed_object.tt_system_id if not tts_id: logger.info( "[%s] No TT mapping for object %s(%s)", maintenance_id, m.escalate_managed_object.name, m.escalate_managed_object.address, ) return tt_system = m.escalate_managed_object.tt_system if not tt_system: logger.info("[%s] Cannot find TT system '%s'", maintenance_id, m.escalate_managed_object) return tts = tt_system.get_system() try: logger.info("[%s] Creating TT", maintenance_id) tt_id = tts.create_tt( queue=1, obj=tts_id, reason=0, subject=m.subject, body=m.description or m.subject, login="******", timestamp=m.start, ) logger.info("[%s] TT %s created", maintenance_id, tt_id) if tts.promote_group_tt: gtt = tts.create_group_tt(tt_id, m.start) d = Maintenance._get_collection().find_one( {"_id": m.id}, {"_id": 0, "affected_objects": 1} ) if d: objects = [x["object"] for x in d["affected_objects"]] for d in ManagedObject.objects.filter(id__in=list(objects)): logger.info("[%s] Appending object %s to group TT %s", maintenance_id, d, gtt) tts.add_to_group_tt(gtt, d.tt_system_id) metrics["maintenance_tt_create"] += 1 except tts.TTError as e: logger.error("[%s] Failed to escalate: %s", maintenance_id, e) metrics["maintenance_tt_fail"] += 1
def close_maintenance(maintenance_id): logger.info("[%s] Close maintenance", maintenance_id) m = Maintenance.get_by_id(maintenance_id) if not m: logger.info("[%s] Not found, skipping", maintenance_id) return if not m.escalation_tt: logger.info("[%s] Not escalated, skipping", maintenance_id) return tts_name, tt_id = m.escalation_tt.split(":", 1) tts = TTSystem.get_by_name(tts_name).get_system() if not tts: logger.error("[%s] TT system '%s' is not found", maintenance_id, tts_name) return try: logger.info("[%s] Closing TT %s", maintenance_id, tt_id) tts.close_tt(tt_id, subject="Closed", body="Closed", login="******") metrics["maintenance_tt_close"] += 1 except tts.TTError as e: logger.error("[%s] Failed to close TT %s: %s", maintenance_id, tt_id, e) metrics["maintenance_tt_close_fail"] += 1
def api_test(self, request, id): r = [] data = [ d for d in Maintenance._get_collection().aggregate([ { "$match": { "_id": bson.ObjectId(id) } }, { "$project": { "objects": "$affected_objects.object" }, }, ]) ] for mo in (ManagedObject.objects.filter( is_managed=True, id__in=data[0].get("objects")).values("id", "name", "is_managed", "profile", "address", "description", "tags").distinct()): r += [{ "id": mo["id"], "name": mo["name"], "is_managed": mo["is_managed"], "profile": Profile.get_by_id(mo["profile"]).name, "address": mo["address"], "description": mo["description"], "tags": mo["tags"], }] out = {"total": len(r), "success": True, "data": r} return self.response(out, status=self.OK)
def fix(): max_value = Maintenance._get_collection().estimated_document_count() for m in progressbar.progressbar(Maintenance.objects.filter(), max_value=max_value): try: m.save() except Exception as e: print("[%s] %s" % (m.id, e))
def get_maintenance(objects: List[int]) -> Set[int]: """ Returns a set of objects currently in maintenance :param objects: :return: """ now = datetime.datetime.now() so = set(objects) mnt_objects = set() for m in Maintenance._get_collection().find( {"is_completed": False, "start": {"$lte": now}}, {"_id": 0, "affected_objects": 1} ): mnt_objects |= so & {x["object"] for x in m["affected_objects"]} return mnt_objects
def bulk_field_isinmaintenance(self, data): if not data: return data if data[0]["status"] == "A": mtc = set(Maintenance.currently_affected()) for x in data: x["isInMaintenance"] = x["managed_object"] in mtc else: mos = set([x["managed_object"] for x in data]) mtc = {} for mo in list(mos): interval = [] for ao in AffectedObjects._get_collection().find( {"affected_objects.object": { "$eq": mo }}, { "_id": 0, "maintenance": 1 }): m = Maintenance.get_by_id(ao["maintenance"]) interval += [(m.start, m.stop)] if interval: mtc[mo] = interval for x in data: if x["managed_object"] in mtc: left, right = list(zip(*mtc[x["managed_object"]])) x["isInMaintenance"] = bisect.bisect( right, dateutil.parser.parse(x["timestamp"]).replace( tzinfo=None)) != bisect.bisect( left, dateutil.parser.parse( x["clear_timestamp"]).replace(tzinfo=None)) else: x["isInMaintenance"] = False return data
def get_maintenance(objects): """ Returns a set of objects currently in maintenance :param objects: :return: """ now = datetime.datetime.now() so = set(objects) r = set() for m in Maintenance._get_collection().find( { "is_completed": False, "start": { "$lte": now } }, { "_id": 0, "affected_objects": 1 }): mo = set(r["object"] for r in m["affected_objects"]) r |= so & mo return r
def escalate(alarm_id, escalation_id, escalation_delay, *args, **kwargs): def log(message, *args): msg = message % args logger.info("[%s] %s", alarm_id, msg) alarm.log_message(msg, to_save=True) def summary_to_list(summary, model): r = [] for k in summary: p = model.get_by_id(k.profile) if not p or getattr(p, "show_in_summary", True) is False: continue r += [{ "profile": p.name, "summary": k.summary, "order": (getattr(p, "display_order", 100), -k.summary), }] return sorted(r, key=operator.itemgetter("order")) logger.info("[%s] Performing escalations", alarm_id) alarm = get_alarm(alarm_id) if alarm is None: logger.info("[%s] Missing alarm, skipping", alarm_id) metrics["escalation_missed_alarm"] += 1 return if alarm.status == "C": logger.info("[%s] Alarm is closed, skipping", alarm_id) metrics["escalation_already_closed"] += 1 return if alarm.root: log("[%s] Alarm is not root cause, skipping", alarm_id) metrics["escalation_alarm_is_not_root"] += 1 return # escalation = AlarmEscalation.get_by_id(escalation_id) if not escalation: log("Escalation %s is not found, skipping", escalation_id) metrics["escalation_not_found"] += 1 return if alarm.managed_object.tt_system: sample = alarm.managed_object.tt_system.telemetry_sample else: sample = PARENT_SAMPLE with Span(client="escalator", sample=sample) as ctx: alarm.set_escalation_context() # Evaluate escalation chain mo = alarm.managed_object for a in escalation.escalations: if a.delay != escalation_delay: continue # Try other type # Check administrative domain if a.administrative_domain and a.administrative_domain.id not in alarm.adm_path: continue # Check severity if a.min_severity and alarm.severity < a.min_severity: continue # Check selector if a.selector and not SelectorCache.is_in_selector(mo, a.selector): continue # Check time pattern if a.time_pattern and not a.time_pattern.match(alarm.timestamp): continue # Render escalation message if not a.template: log("No escalation template, skipping") continue # Check global limits # @todo: Move into escalator service # @todo: Process per-ttsystem limits ets = datetime.datetime.now() - datetime.timedelta( seconds=config.escalator.ets) ae = ActiveAlarm._get_collection().count_documents( {"escalation_ts": { "$gte": ets }}) ae += ArchivedAlarm._get_collection().count_documents( {"escalation_ts": { "$gte": ets }}) if ae >= config.escalator.tt_escalation_limit: logger.error( "Escalation limit exceeded (%s/%s). Skipping", ae, config.escalator.tt_escalation_limit, ) metrics["escalation_throttled"] += 1 alarm.set_escalation_error( "Escalation limit exceeded (%s/%s). Skipping" % (ae, config.escalator.tt_escalation_limit)) return # Check whether consequences has escalations cons_escalated = sorted(alarm.iter_escalated(), key=operator.attrgetter("timestamp")) affected_objects = sorted(alarm.iter_affected(), key=operator.attrgetter("name")) # segment = alarm.managed_object.segment if segment.is_redundant: uplinks = alarm.managed_object.data.uplinks lost_redundancy = len(uplinks) > 1 affected_subscribers = summary_to_list( segment.total_subscribers, SubscriberProfile) affected_services = summary_to_list(segment.total_services, ServiceProfile) else: lost_redundancy = False affected_subscribers = [] affected_services = [] # ctx = { "alarm": alarm, "affected_objects": affected_objects, "cons_escalated": cons_escalated, "total_objects": summary_to_list(alarm.total_objects, ManagedObjectProfile), "total_subscribers": summary_to_list(alarm.total_subscribers, SubscriberProfile), "total_services": summary_to_list(alarm.total_services, ServiceProfile), "tt": None, "lost_redundancy": lost_redundancy, "affected_subscribers": affected_subscribers, "affected_services": affected_services, } # Escalate to TT if a.create_tt and mo.can_escalate(): tt_id = None if alarm.escalation_tt: log("Already escalated with TT #%s", alarm.escalation_tt) else: pre_reason = escalation.get_pre_reason(mo.tt_system) active_maintenance = Maintenance.get_object_maintenance(mo) if active_maintenance: for m in active_maintenance: log( "Object is under maintenance: %s (%s-%s)", m.subject, m.start, m.stop, ) metrics["escalation_stop_on_maintenance"] += 1 elif pre_reason is not None: subject = a.template.render_subject(**ctx) body = a.template.render_body(**ctx) logger.debug( "[%s] Escalation message:\nSubject: %s\n%s", alarm_id, subject, body) log("Creating TT in system %s", mo.tt_system.name) tts = mo.tt_system.get_system() try: try: tt_id = tts.create_tt( queue=mo.tt_queue, obj=mo.tt_system_id, reason=pre_reason, subject=subject, body=body, login="******", timestamp=alarm.timestamp, ) except TemporaryTTError as e: metrics["escalation_tt_retry"] += 1 log( "Temporary error detected. Retry after %ss", RETRY_TIMEOUT) mo.tt_system.register_failure() Job.retry_after(get_next_retry(), str(e)) ctx["tt"] = "%s:%s" % (mo.tt_system.name, tt_id) alarm.escalate( ctx["tt"], close_tt=a.close_tt, wait_tt=ctx["tt"] if a.wait_tt else None, ) if tts.promote_group_tt and a.promote_group_tt: # Create group TT log("Promoting to group tt") gtt = tts.create_group_tt( tt_id, alarm.timestamp) # Append affected objects for ao in alarm.iter_affected(): if ao.can_escalate(True): if ao.tt_system == mo.tt_system: log( "Appending object %s to group tt %s", ao.name, gtt) try: tts.add_to_group_tt( gtt, ao.tt_system_id) except TTError as e: alarm.set_escalation_error( "[%s] %s" % (mo.tt_system.name, e)) else: log( "Cannot append object %s to group tt %s: Belongs to other TT system", ao.name, gtt, ) else: log( "Cannot append object %s to group tt %s: Escalations are disabled", ao.name, gtt, ) metrics["escalation_tt_create"] += 1 except TTError as e: log("Failed to create TT: %s", e) metrics["escalation_tt_fail"] += 1 alarm.log_message("Failed to escalate: %s" % e, to_save=True) alarm.set_escalation_error("[%s] %s" % (mo.tt_system.name, e)) else: log("Cannot find pre reason") metrics["escalation_tt_fail"] += 1 if tt_id and cons_escalated: # Notify consequences for ca in cons_escalated: c_tt_name, c_tt_id = ca.escalation_tt.split(":") cts = TTSystem.get_by_name(c_tt_name) if cts: tts = cts.get_system() try: log("Appending comment to TT %s", tt_id) tts.add_comment(c_tt_id, body="Covered by TT %s" % tt_id, login="******") metrics["escalation_tt_comment"] += 1 except NotImplementedError: log( "Cannot add comment to %s: Feature not implemented", ca.escalation_tt, ) metrics["escalation_tt_comment_fail"] += 1 except TTError as e: log("Failed to add comment to %s: %s", ca.escalation_tt, e) metrics["escalation_tt_comment_fail"] += 1 else: log( "Failed to add comment to %s: Invalid TT system", ca.escalation_tt) metrics["escalation_tt_comment_fail"] += 1 # Send notification if a.notification_group and mo.can_notify(): subject = a.template.render_subject(**ctx) body = a.template.render_body(**ctx) logger.debug("[%s] Notification message:\nSubject: %s\n%s", alarm_id, subject, body) log("Sending notification to group %s", a.notification_group.name) a.notification_group.notify(subject, body) alarm.set_clear_notification(a.notification_group, a.clear_template) metrics["escalation_notify"] += 1 # if a.stop_processing: logger.debug("Stopping processing") break nalarm = get_alarm(alarm_id) if nalarm and nalarm.status == "C": nalarm.log_message( "Alarm has been closed during escalation. Try to deescalate") logger.info( "[%s] Alarm has been closed during escalation. Try to deescalate", alarm.id) metrics["escalation_closed_while_escalated"] += 1 if tt_id and not nalarm.escalation_tt: nalarm.escalation_ts = datetime.datetime.now() nalarm.escalation_tt = "%s:%s" % (mo.tt_system.name, tt_id) nalarm.save() if not nalarm.escalation_close_ts and not nalarm.escalation_close_error: notify_close( alarm_id=alarm_id, tt_id=nalarm.escalation_tt, subject="Closing", body="Closing", notification_group_id=alarm.clear_notification_group.id if alarm.clear_notification_group else None, close_tt=alarm.close_tt, ) elif nalarm == "A" and not nalarm.escalation_tt and tt_id: logger.error("[%s] Alarm without escalation TT: %s", alarm.id, tt_id) logger.info("[%s] Escalations loop end", alarm_id)
def get_ajax_data(self, **kwargs): def update_dict(d, s): for k in s: if k in d: d[k] -= s[k] else: d[k] = s[k] object_id = self.handler.get_argument("object_id") # zoom = int(self.handler.get_argument("z")) # west = float(self.handler.get_argument("w")) # east = float(self.handler.get_argument("e")) # north = float(self.handler.get_argument("n")) # south = float(self.handler.get_argument("s")) # ms = int(self.handler.get_argument("maintenance")) # active_layers = [l for l in self.get_pop_layers() if l.min_zoom <= zoom <= l.max_zoom] if self.current_user.is_superuser: moss = ManagedObject.objects.filter(is_managed=True) else: moss = ManagedObject.objects.filter( is_managed=True, administrative_domain__in=self.get_user_domains()) objects = [] objects_status = { "error": [], "warning": [], "good": [], "maintenance": [] } sss = {"error": {}, "warning": {}, "good": {}, "maintenance": {}} # s_def = { # "service": {}, # "subscriber": {}, # "interface": {} # } services = defaultdict(list) try: object_root = Object.objects.filter(id=object_id).first() except ValidationError: object_root = None if object_root: con = [str(c) for c in self.get_containers_by_root(object_root.id)] moss = moss.filter(container__in=con).order_by("container") else: moss = moss.exclude(container=None).order_by("container") con = list(moss.values_list("container", flat=True)) mo_ids = list(moss.values_list("id", flat=True)) # Getting Alarms severity dict MO: Severity @todo List alarms if not object_root: alarms = self.get_alarms_info(None, alarms_all=True) else: alarms = self.get_alarms_info(mo_ids) # Get maintenance maintenance = Maintenance.currently_affected() # Getting services if not object_root: services_map = self.get_objects_summary_met(mo_ids, info_all=True) else: services_map = self.get_objects_summary_met(mo_ids) # Getting containers name and coordinates containers = { str(o["_id"]): (o["name"], o["data"]) for o in Object.objects.filter( data__geopoint__exists=True, id__in=con, read_preference=ReadPreference.SECONDARY_PREFERRED, ).fields(id=1, name=1, data__geopoint__x=1, data__geopoint__y=1).as_pymongo() } # Main Loop. Get ManagedObject group by container for container, mol in itertools.groupby(moss.values_list( "id", "name", "container").order_by("container"), key=lambda o: o[2]): name, data = containers.get(container, ("", {"geopoint": {}})) x = data["geopoint"].get("x") y = data["geopoint"].get("y") ss = { "objects": [], "total": 0, "error": 0, "warning": 0, "good": 0, "maintenance": 0 } for mo_id, mo_name, container in mol: # Status by alarm severity # s_service = s_services.get(mo_id, s_def) status = "good" if mo_id in maintenance: status = "maintenance" elif 100 < alarms.get(mo_id) <= 2000: status = "warning" elif alarms.get(mo_id) > 2000: status = "error" objects_status[status] += [mo_id] # update_dict(sss[status], s_service["service"]) ss[status] += 1 ss["total"] += 1 services_ss = [ "%s-%s" % (sm, status) for sm in services_map.get(mo_id, [self.fake_service]) ] ss["objects"] += [{ "id": mo_id, "name": mo_name, "status": status, "services": services_ss }] if not x or not y: continue objects += [{ "name": name, "id": str(container), "x": x if x > -168 else x + 360, # For Chukotskiy AO "y": y, "objects": [], "total": 0, "error": 0, "warning": 0, "good": 0, "maintenance": 0, }] objects[-1].update(ss) profiles = set() for r in ["error", "warning", "good", "maintenance"]: if not objects_status[r]: continue if not object_root and r == "good": m_services, m_subscribers = ServiceSummary.get_direct_summary( objects_status[r], summary_all=True) else: m_services, m_subscribers = ServiceSummary.get_direct_summary( objects_status[r]) # update_dict(s_services["service"], m["serivce"]) # if not object_root and r == "good": # for s in s_services["service"]: # if s in m["service"]: # s_services["service"][s] -= m["service"][s] # m = s_services profiles |= set(m_services) sss[r] = m_services for r in sorted(sss, key=lambda k: ("error", "warning", "good", "maintenance").index(k)): # for p in sss[r]: for p in profiles: services[p] += [(r, sss[r].get(p, None))] return { "objects": objects, "summary": self.f_glyph_summary({ "service": services # "subscriber": subscribers }), }
def cleaned_query(self, q): q = q.copy() status = q["status"] if "status" in q else "A" for p in self.ignored_params: if p in q: del q[p] for p in (self.limit_param, self.page_param, self.start_param, self.format_param, self.sort_param, self.query_param, self.only_param): if p in q: del q[p] # Normalize parameters for p in q: qp = p.split("__")[0] if qp in self.clean_fields: q[p] = self.clean_fields[qp].clean(q[p]) # Exclude maintenance if "maintenance" not in q: q["maintenance"] = "hide" if q["maintenance"] == "hide": q["managed_object__nin"] = Maintenance.currently_affected() elif q["maintenance"] == "only": q["managed_object__in"] = Maintenance.currently_affected() del q["maintenance"] if "administrative_domain" in q: q["adm_path"] = int(q["administrative_domain"]) q.pop("administrative_domain") if "segment" in q: q["segment_path"] = bson.ObjectId(q["segment"]) q.pop("segment") if "managedobjectselector" in q: s = SelectorCache.objects.filter( selector=q["managedobjectselector"]).values_list("object") if "managed_object__in" in q: q["managed_object__in"] = list( set(q["managed_object__in"]).intersection(s)) else: q["managed_object__in"] = s q.pop("managedobjectselector") if "cleared_after" in q: q["clear_timestamp__gte"] = datetime.datetime.now( ) - datetime.timedelta(seconds=int(q["cleared_after"])) q.pop("cleared_after") # if "wait_tt" in q: q["wait_tt__exists"] = True q["wait_ts__exists"] = False del q["wait_tt"] # if "collapse" in q: c = q["collapse"] del q["collapse"] if c != "0": q["root__exists"] = False if status == "C": if ("timestamp__gte" not in q and "timestamp__lte" not in q and "escalation_tt__contains" not in q and "managed_object" not in q): q["timestamp__gte"] = datetime.datetime.now( ) - self.DEFAULT_ARCH_ALARM return q
def instance_to_dict(self, o, fields=None): s = AlarmSeverity.get_severity(o.severity) n_events = (ActiveEvent.objects.filter(alarms=o.id).count() + ArchivedEvent.objects.filter(alarms=o.id).count()) mtc = o.managed_object.id in Maintenance.currently_affected() if o.status == "C": # For archived alarms mtc = Maintenance.objects.filter( start__lte=o.clear_timestamp, stop__lte=o.timestamp, affected_objects__in=[ MaintenanceObject(object=o.managed_object) ]).count() > 0 d = { "id": str(o.id), "status": o.status, "managed_object": o.managed_object.id, "managed_object__label": o.managed_object.name, "administrative_domain": o.managed_object.administrative_domain_id, "administrative_domain__label": o.managed_object.administrative_domain.name, "severity": o.severity, "severity__label": s.name, "alarm_class": str(o.alarm_class.id), "alarm_class__label": o.alarm_class.name, "timestamp": self.to_json(o.timestamp), "subject": o.subject, "events": n_events, "duration": o.duration, "clear_timestamp": self.to_json(o.clear_timestamp) if o.status == "C" else None, "row_class": s.style.css_class_name, "segment__label": o.managed_object.segment.name, "segment": str(o.managed_object.segment.id), "location_1": self.location(o.managed_object.container.id)[0] if o.managed_object.container else "", "location_2": self.location(o.managed_object.container.id)[1] if o.managed_object.container else "", "escalation_tt": o.escalation_tt, "escalation_error": o.escalation_error, "platform": o.managed_object.platform.name if o.managed_object.platform else "", "address": o.managed_object.address, "isInMaintenance": mtc, "summary": self.f_glyph_summary({ "subscriber": SummaryItem.items_to_dict(o.total_subscribers), "service": SummaryItem.items_to_dict(o.total_services) }), "total_objects": sum(x.summary for x in o.total_objects) } if fields: d = dict((k, d[k]) for k in fields) return d
def get_ajax_data(self, **kwargs): def update_dict(d, s): for k in s: if k in d: d[k] += s[k] else: d[k] = s[k] zoom = int(self.handler.get_argument("z")) west = float(self.handler.get_argument("w")) east = float(self.handler.get_argument("e")) north = float(self.handler.get_argument("n")) south = float(self.handler.get_argument("s")) ms = int(self.handler.get_argument("maintenance")) active_layers = [ l for l in self.get_pop_layers() if l.min_zoom <= zoom <= l.max_zoom ] alarms = [] services = {} subscribers = {} t_data = defaultdict(list) if self.current_user.is_superuser: qs = ActiveAlarm.objects.all() else: qs = ActiveAlarm.objects.filter( adm_path__in=self.get_user_domains()) if ms == 0: # Filter out equipment under maintenance qs = qs.filter( managed_object__nin=Maintenance.currently_affected()) for a in qs.only("id", "managed_object", "direct_subscribers", "direct_services"): s_sub, s_service = {}, {} if a.direct_subscribers: s_sub = SummaryItem.items_to_dict(a.direct_subscribers) if a.direct_services: s_service = SummaryItem.items_to_dict(a.direct_services) mo = a.managed_object if not mo: continue if mo.x and mo.y: w = ServiceSummary.get_weight({ "subscriber": s_sub, "service": s_service }) # @todo: Should we add the object's weight to summary? # @todo: Check west/south hemisphere if active_layers and west <= mo.x <= east and south <= mo.y <= north: t_data[mo.x, mo.y] += [(mo, w)] else: w = 0 alarms += [{ "alarm_id": str(a.id), "managed_object": mo.name, "x": mo.x, "y": mo.y, "w": max(w, 1), }] if s_service: update_dict(services, s_service) if s_sub: update_dict(subscribers, s_sub) links = None o_seen = set() points = None o_data = {} if t_data and active_layers: # Create lines bbox = get_bbox(west, east, north, south) lines = [] for d in ObjectConnection._get_collection().find( { "type": "pop_link", "layer": { "$in": [l.id for l in active_layers] }, "line": { "$geoIntersects": { "$geometry": bbox } }, }, { "_id": 0, "connection": 1, "line": 1 }, ): for c in d["line"]["coordinates"]: if tuple(c) in t_data: for c in d["line"]["coordinates"]: tc = tuple(c) o_data[tc] = t_data.get(tc, []) o_seen.add(tuple(c)) lines += [d["line"]] break if lines: links = geojson.FeatureCollection(features=lines) # Create points points = [] for x, y in o_data: mos = {} for mo, w in o_data[x, y]: if mo not in mos: mos[mo] = w mos = sorted(mos, key=lambda z: mos[z], reverse=True)[:self.TOOLTIP_LIMIT] points += [ geojson.Feature( geometry=geojson.Point(coordinates=[x, y]), properties={ "alarms": len(t_data[x, y]), "objects": [{ "id": mo.id, "name": mo.name, "address": mo.address } for mo in mos], }, ) ] points = geojson.FeatureCollection(features=points) return { "alarms": alarms, "summary": self.f_glyph_summary({ "service": services, "subscriber": subscribers }), "links": links, "pops": points, }
def api_report( self, request, from_date, to_date, o_format, min_duration=0, max_duration=0, min_objects=0, min_subscribers=0, segment=None, administrative_domain=None, selector=None, ex_selector=None, columns=None, source="both", alarm_class=None, subscribers=None, enable_autowidth=False, ): def row(row, container_path, segment_path): def qe(v): if v is None: return "" if isinstance(v, unicode): return v.encode("utf-8") elif isinstance(v, datetime.datetime): return v.strftime("%Y-%m-%d %H:%M:%S") elif not isinstance(v, str): return str(v) else: return v r = [qe(x) for x in row] if len(container_path) < self.CONTAINER_PATH_DEPTH: container_path += [""] * (self.CONTAINER_PATH_DEPTH - len(container_path)) else: container_path = container_path[:self.CONTAINER_PATH_DEPTH] if len(segment_path) < self.SEGMENT_PATH_DEPTH: segment_path += [""] * (self.SEGMENT_PATH_DEPTH - len(segment_path)) else: segment_path = segment_path[:self.SEGMENT_PATH_DEPTH] return r + container_path + segment_path def translate_row(row, cmap): return [row[i] for i in cmap] cols = ([ "id", "root_id", "from_ts", "to_ts", "duration_sec", "object_name", "object_address", "object_hostname", "object_profile", "object_admdomain", "object_platform", "object_version", "alarm_class", "alarm_subject", "maintenance", "objects", "subscribers", "tt", "escalation_ts", "location", "container_address", ] + ["container_%d" % i for i in range(self.CONTAINER_PATH_DEPTH)] + ["segment_%d" % i for i in range(self.SEGMENT_PATH_DEPTH)]) header_row = ( [ "ID", _("ROOT_ID"), _("FROM_TS"), _("TO_TS"), _("DURATION_SEC"), _("OBJECT_NAME"), _("OBJECT_ADDRESS"), _("OBJECT_HOSTNAME"), _("OBJECT_PROFILE"), _("OBJECT_ADMDOMAIN"), _("OBJECT_PLATFORM"), _("OBJECT_VERSION"), _("ALARM_CLASS"), _("ALARM_SUBJECT"), _("MAINTENANCE"), _("OBJECTS"), _("SUBSCRIBERS"), _("TT"), _("ESCALATION_TS"), _("LOCATION"), _("CONTAINER_ADDRESS"), ] + ["CONTAINER_%d" % i for i in range(self.CONTAINER_PATH_DEPTH)] + ["SEGMENT_%d" % i for i in range(self.SEGMENT_PATH_DEPTH)]) if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) subscribers_profile = self.default_subscribers_profile if subscribers: subscribers_profile = set( SubscriberProfile.objects.filter( id__in=subscribers.split(",")).scalar("id")) r = [translate_row(header_row, cmap)] fd = datetime.datetime.strptime( to_date, "%d.%m.%Y") + datetime.timedelta(days=1) match = { "timestamp": { "$gte": datetime.datetime.strptime(from_date, "%d.%m.%Y"), "$lte": fd } } match_duration = {"duration": {"$gte": min_duration}} if max_duration: match_duration = { "duration": { "$gte": min_duration, "$lte": max_duration } } mos = ManagedObject.objects.filter(is_managed=True) if segment: try: match["segment_path"] = bson.ObjectId(segment) except bson.errors.InvalidId: pass ads = [] if administrative_domain: if administrative_domain.isdigit(): administrative_domain = [int(administrative_domain)] ads = AdministrativeDomain.get_nested_ids( administrative_domain[0]) if not request.user.is_superuser: user_ads = UserAccess.get_domains(request.user) if administrative_domain and ads: if administrative_domain[0] not in user_ads: ads = list(set(ads) & set(user_ads)) else: ads = administrative_domain else: ads = user_ads if ads: mos = mos.filter(administrative_domain__in=ads) if selector: selector = ManagedObjectSelector.get_by_id(int(selector)) mos = mos.filter(selector.Q) if ex_selector: ex_selector = ManagedObjectSelector.get_by_id(int(ex_selector)) mos = mos.exclude(ex_selector.Q) # Working if Administrative domain set if ads: try: match["adm_path"] = {"$in": ads} # @todo More 2 level hierarhy except bson.errors.InvalidId: pass mos_id = list(mos.order_by("id").values_list("id", flat=True)) mo_hostname = {} maintenance = [] if mos_id and (selector or ex_selector): match["managed_object"] = {"$in": mos_id} if "maintenance" in columns.split(","): maintenance = Maintenance.currently_affected() if "object_hostname" in columns.split(","): mo_hostname = ReportObjectsHostname1(sync_ids=mos_id) mo_hostname = mo_hostname.get_dictionary() moss = ReportAlarmObjects(mos_id).get_all() # container_lookup = ReportContainer(mos_id) container_lookup = None subject = "alarm_subject" in columns loc = AlarmApplication([]) if source in ["archive", "both"]: # Archived Alarms for a in (ArchivedAlarm._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED ).aggregate([ { "$match": match }, { "$addFields": { "duration": { "$divide": [ { "$subtract": ["$clear_timestamp", "$timestamp"] }, 1000, ] } } }, { "$match": match_duration }, # {"$sort": {"timestamp": 1}} ])): if int(a["managed_object"]) not in moss: continue dt = a["clear_timestamp"] - a["timestamp"] duration = int(dt.total_seconds()) total_objects = sum(ss["summary"] for ss in a["total_objects"]) if min_objects and total_objects < min_objects: continue total_subscribers = sum( ss["summary"] for ss in a["total_subscribers"] if subscribers_profile and ss["profile"] in subscribers_profile) if min_subscribers and total_subscribers < min_subscribers: continue if "segment_" in columns.split( ",") or "container_" in columns.split(","): path = ObjectPath.get_path(a["managed_object"]) if path: segment_path = [ NetworkSegment.get_by_id(s).name for s in path.segment_path if NetworkSegment.get_by_id(s) ] container_path = [ Object.get_by_id(s).name for s in path.container_path if Object.get_by_id(s) ] else: segment_path = [] container_path = [] else: segment_path = [] container_path = [] r += [ translate_row( row( [ str(a["_id"]), str(a["root"]) if a.get("root") else "", a["timestamp"], a["clear_timestamp"], str(duration), moss[a["managed_object"]][0], moss[a["managed_object"]][1], mo_hostname.get(a["managed_object"], ""), Profile.get_by_id( moss[a["managed_object"]][3]).name if moss[a["managed_object"]][5] else "", moss[a["managed_object"]][6], Platform.get_by_id( moss[a["managed_object"]][9]) if moss[a["managed_object"]][9] else "", Firmware.get_by_id( moss[a["managed_object"]][10]) if moss[a["managed_object"]][10] else "", AlarmClass.get_by_id(a["alarm_class"]).name, ArchivedAlarm.objects.get( id=a["_id"]).subject if subject else "", "", total_objects, total_subscribers, a.get("escalation_tt"), a.get("escalation_ts"), ", ".join(l for l in ( loc.location(moss[a["managed_object"]][5] ) if moss[a["managed_object"]] [5] is not None else "") if l), container_lookup[a["managed_object"]].get( "text", "") if container_lookup else "", ], container_path, segment_path, ), cmap, ) ] # Active Alarms if source in ["active", "both"]: for a in (ActiveAlarm._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED). aggregate([ { "$match": match }, { "$addFields": { "duration": { "$divide": [{ "$subtract": [fd, "$timestamp"] }, 1000] } } }, { "$match": match_duration }, # {"$sort": {"timestamp": 1}} ])): dt = fd - a["timestamp"] duration = int(dt.total_seconds()) total_objects = sum(ss["summary"] for ss in a["total_objects"]) if min_objects and total_objects < min_objects: continue total_subscribers = sum( ss["summary"] for ss in a["total_subscribers"] if subscribers_profile and ss["profile"] in subscribers_profile) if min_subscribers and total_subscribers < min_subscribers: continue if "segment_" in columns.split( ",") or "container_" in columns.split(","): path = ObjectPath.get_path(a["managed_object"]) if path: segment_path = [ NetworkSegment.get_by_id(s).name for s in path.segment_path if NetworkSegment.get_by_id(s) ] container_path = [ Object.get_by_id(s).name for s in path.container_path if Object.get_by_id(s) ] else: segment_path = [] container_path = [] else: segment_path = [] container_path = [] r += [ translate_row( row( [ str(a["_id"]), str(a["root"]) if a.get("root") else "", a["timestamp"], # a["clear_timestamp"], "", str(duration), moss[a["managed_object"]][0], moss[a["managed_object"]][1], mo_hostname.get(a["managed_object"], ""), Profile.get_by_id(moss[a["managed_object"]][3]) if moss[a["managed_object"]][5] else "", moss[a["managed_object"]][6], Platform.get_by_id( moss[a["managed_object"]][9]) if moss[a["managed_object"]][9] else "", Firmware.get_by_id( moss[a["managed_object"]][10]) if moss[a["managed_object"]][10] else "", AlarmClass.get_by_id(a["alarm_class"]).name, ActiveAlarm.objects.get( id=a["_id"]).subject if subject else None, "Yes" if a["managed_object"] in maintenance else "No", total_objects, total_subscribers, a.get("escalation_tt"), a.get("escalation_ts"), ", ".join(l for l in ( loc.location(moss[a["managed_object"]][5] ) if moss[a["managed_object"]] [5] is not None else "") if l), container_lookup[a["managed_object"]].get( "text", "") if container_lookup else "", ], container_path, segment_path, ), cmap, ) ] if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = 'attachment; filename="alarms.csv"' writer = csv.writer(response) writer.writerows(r) return response elif o_format == "xlsx": response = StringIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Alarms") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and (r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]]): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") response[ "Content-Disposition"] = 'attachment; filename="alarms.xlsx"' response.close() return response
def cleaned_query(self, q): q = q.copy() status = q["status"] if "status" in q else "A" for p in self.ignored_params: if p in q: del q[p] for p in ( self.limit_param, self.page_param, self.start_param, self.format_param, self.sort_param, self.query_param, self.only_param, ): if p in q: del q[p] # Extract IN # extjs not working with same parameter name in query for p in list(q): if p.endswith("__in") and self.rx_oper_splitter.match(p): field = self.rx_oper_splitter.match(p).group("field") + "__in" if field not in q: q[field] = [q[p]] else: q[field] += [q[p]] del q[p] # Normalize parameters for p in list(q): qp = p.split("__")[0] if qp in self.clean_fields: q[p] = self.clean_fields[qp].clean(q[p]) # Advanced filter for p in self.advanced_filter_params: params = [] for x in list(q): if x.startswith(p): params += [q[x]] del q[x] if params: af = self.advanced_filter(self.advanced_filter_params[p], params) if "__raw__" in q and "__raw__" in af: # Multiple raw query q["__raw__"].update(af["__raw__"]) del af["__raw__"] q.update(af) # Exclude maintenance if "maintenance" not in q: q["maintenance"] = "hide" if q["maintenance"] == "hide" and status == "A": q["managed_object__nin"] = Maintenance.currently_affected() elif q["maintenance"] == "only" and status == "A": q["managed_object__in"] = Maintenance.currently_affected() del q["maintenance"] if "administrative_domain" in q: if q["administrative_domain"] != "_root_": q["adm_path"] = int(q["administrative_domain"]) q.pop("administrative_domain") if "administrative_domain__in" in q: if "_root_" not in q["administrative_domain__in"]: q["adm_path__in"] = q["administrative_domain__in"] q.pop("administrative_domain__in") if "segment" in q: if q["segment"] != "_root_": q["segment_path"] = bson.ObjectId(q["segment"]) q.pop("segment") if "managedobjectselector" in q: s = SelectorCache.objects.filter( selector=q["managedobjectselector"]).values_list("object") if "managed_object__in" in q: q["managed_object__in"] = list( set(q["managed_object__in"]).intersection(s)) else: q["managed_object__in"] = s q.pop("managedobjectselector") if "cleared_after" in q: q["clear_timestamp__gte"] = datetime.datetime.now( ) - datetime.timedelta(seconds=int(q["cleared_after"])) q.pop("cleared_after") # if "wait_tt" in q: q["wait_tt__exists"] = True q["wait_ts__exists"] = False del q["wait_tt"] # if "collapse" in q: c = q["collapse"] del q["collapse"] if c != "0": q["root__exists"] = False if status == "C": if ("timestamp__gte" not in q and "timestamp__lte" not in q and "escalation_tt__contains" not in q and "managed_object" not in q): q["timestamp__gte"] = datetime.datetime.now( ) - self.DEFAULT_ARCH_ALARM return q
def get_ajax_data(self, **kwargs): def update_dict(d, s): for k in s: if k in d: d[k] += s[k] else: d[k] = s[k] zoom = int(self.handler.get_argument("z")) west = float(self.handler.get_argument("w")) east = float(self.handler.get_argument("e")) north = float(self.handler.get_argument("n")) south = float(self.handler.get_argument("s")) ms = int(self.handler.get_argument("maintenance")) active_layers = [ l_r for l_r in self.get_pop_layers() if l_r.min_zoom <= zoom <= l_r.max_zoom ] alarms = [] res = {} services = {} subscribers = {} t_data = defaultdict(list) mos = ManagedObject.objects.filter(is_managed=True).values( "id", "name", "x", "y") if not self.current_user.is_superuser: mos = mos.filter(administrative_domain__in=UserAccess.get_domains( self.current_user)) for mo in mos: res[mo.pop("id")] = mo mos_id = list(res.keys()) if ms == 0: mos_id = list( set(list(res.keys())) - set(Maintenance.currently_affected())) for a in ActiveAlarm._get_collection().find( {"managed_object": { "$in": mos_id, "$exists": True }}, { "_id": 1, "managed_object": 1, "direct_subscribers": 1, "direct_services": 1 }, ): s_sub, s_service = {}, {} if a.get("direct_subscribers"): s_sub = { dsub["profile"]: dsub["summary"] for dsub in a["direct_subscribers"] } if a.get("direct_services"): s_service = { dserv["profile"]: dserv["summary"] for dserv in a["direct_services"] } mo = res.get(a["managed_object"]) if not mo: continue if mo["x"] and mo["y"]: w = ServiceSummary.get_weight({ "subscriber": s_sub, "service": s_service }) # @todo: Should we add the object's weight to summary? # @todo: Check west/south hemisphere if active_layers and west <= mo["x"] <= east and south <= mo[ "y"] <= north: t_data[mo["x"], mo["y"]] += [(mo, w)] else: w = 0 alarms += [{ "alarm_id": str(a.get("_id")), "managed_object": mo["name"], "x": mo["x"], "y": mo["y"], "w": max(w, 1), }] if s_service: update_dict(services, s_service) if s_sub: update_dict(subscribers, s_sub) links = None o_seen = set() points = None o_data = {} if t_data and active_layers: # Create lines bbox = get_bbox(west, east, north, south) lines = [] for d in ObjectConnection._get_collection().find( { "type": "pop_link", "layer": { "$in": [a_l.id for a_l in active_layers] }, "line": { "$geoIntersects": { "$geometry": bbox } }, }, { "_id": 0, "connection": 1, "line": 1 }, ): for c in d["line"]["coordinates"]: if tuple(c) in t_data: for c in d["line"]["coordinates"]: tc = tuple(c) o_data[tc] = t_data.get(tc, []) o_seen.add(tuple(c)) lines += [d["line"]] break if lines: links = geojson.FeatureCollection(features=lines) # Create points points = [] for x, y in o_data: data = {} for mo, w in o_data[x, y]: if mo not in data: data[mo] = w data = sorted(data, key=lambda z: data[z], reverse=True)[:self.TOOLTIP_LIMIT] points += [ geojson.Feature( geometry=geojson.Point(coordinates=[x, y]), properties={ "alarms": len(t_data[x, y]), "objects": [{ "id": mo.id, "name": mo.name, "address": mo.address } for mo in mos], }, ) ] points = geojson.FeatureCollection(features=points) return { "alarms": alarms, "summary": self.f_glyph_summary({ "service": services, "subscriber": subscribers }), "links": links, "pops": points, }
def get_ajax_data(self, **kwargs): object_id = self.handler.get_argument("object_id") if self.current_user.is_superuser: moss = ManagedObject.objects.filter(is_managed=True) else: moss = ManagedObject.objects.filter( is_managed=True, administrative_domain__in=self.get_user_domains()) objects = [] objects_status = { "error": [], "warning": [], "good": [], "maintenance": [] } sss = {"error": {}, "warning": {}, "good": {}, "maintenance": {}} services = defaultdict(list) try: object_root = Object.objects.filter(id=object_id).first() except ValidationError: object_root = None if object_root: con = [str(c) for c in self.get_containers_by_root(object_root.id)] moss = moss.filter(container__in=con).order_by("container") else: moss = moss.exclude(container=None).order_by("container") con = list(moss.values_list("container", flat=True)) mo_ids = list(moss.values_list("id", flat=True)) # Getting Alarms severity dict MO: Severity @todo List alarms if not object_root: alarms = self.get_alarms_info(None, alarms_all=True) else: alarms = self.get_alarms_info(mo_ids) # Get maintenance maintenance = Maintenance.currently_affected() # Getting services if not object_root: services_map = self.get_objects_summary_met(mo_ids, info_all=True) else: services_map = self.get_objects_summary_met(mo_ids) # Getting containers name and coordinates containers = { str(o["_id"]): ( o["name"], { "%s.%s" % (item["interface"], item["attr"]): item["value"] for item in o.get("data", []) }, ) for o in Object.objects. filter(data__match={ "interface": "geopoint" }, id__in=con).read_preference(ReadPreference.SECONDARY_PREFERRED). fields(id=1, name=1, data=1).as_pymongo() } # Main Loop. Get ManagedObject group by container for container, mol in itertools.groupby(moss.values_list( "id", "name", "container").order_by("container"), key=lambda o: o[2]): name, data = containers.get(container, ("", {"geopoint": {}})) x = data.get("geopoint.x") y = data.get("geopoint.y") address = data.get("address.text", "") ss = { "objects": [], "total": 0, "error": 0, "warning": 0, "good": 0, "maintenance": 0 } for mo_id, mo_name, container in mol: # Status by alarm severity # s_service = s_services.get(mo_id, s_def) status = "good" if mo_id in maintenance: status = "maintenance" elif 100 < alarms.get(mo_id, 0) <= 2000: status = "warning" elif alarms.get(mo_id, 0) > 2000: status = "error" objects_status[status] += [mo_id] ss[status] += 1 ss["total"] += 1 services_ss = [ "%s-%s" % (sm, status) for sm in services_map.get(mo_id, [self.fake_service]) ] ss["objects"] += [{ "id": mo_id, "name": mo_name, "status": status, "services": services_ss }] if not x or not y: continue objects += [{ "name": address or name, "id": str(container), "x": x if x > -168 else x + 360, # For Chukotskiy AO "y": y, "objects": [], "total": 0, "error": 0, "warning": 0, "good": 0, "maintenance": 0, }] objects[-1].update(ss) profiles = set() for r in ["error", "warning", "good", "maintenance"]: if not objects_status[r]: continue if not object_root and r == "good": m_services, m_subscribers = ServiceSummary.get_direct_summary( objects_status[r], summary_all=True) else: m_services, m_subscribers = ServiceSummary.get_direct_summary( objects_status[r]) profiles |= set(m_services) sss[r] = m_services for r in sorted(sss, key=lambda k: ("error", "warning", "good", "maintenance").index(k)): for p in profiles: services[p] += [(r, sss[r].get(p, None))] return { "objects": objects, "summary": self.f_glyph_summary({"service": services}), }
def api_report( self, request, from_date, to_date, o_format, min_duration=0, max_duration=0, min_objects=0, min_subscribers=0, segment=None, administrative_domain=None, selector=None, ex_selector=None, columns=None, source="both", alarm_class=None, subscribers=None, enable_autowidth=False, ): def row(row, container_path, segment_path): def qe(v): if v is None: return "" if isinstance(v, str): return smart_text(v) elif isinstance(v, datetime.datetime): return v.strftime("%Y-%m-%d %H:%M:%S") elif not isinstance(v, str): return smart_text(v) else: return v r = [qe(x) for x in row] if len(container_path) < self.CONTAINER_PATH_DEPTH: container_path += [""] * (self.CONTAINER_PATH_DEPTH - len(container_path)) else: container_path = container_path[:self.CONTAINER_PATH_DEPTH] if len(segment_path) < self.SEGMENT_PATH_DEPTH: segment_path += [""] * (self.SEGMENT_PATH_DEPTH - len(segment_path)) else: segment_path = segment_path[:self.SEGMENT_PATH_DEPTH] return r + container_path + segment_path def translate_row(row, cmap): return [row[i] for i in cmap] cols = ([ "id", "root_id", "from_ts", "to_ts", "duration_sec", "object_name", "object_address", "object_hostname", "object_profile", "object_admdomain", "object_platform", "object_version", "alarm_class", "alarm_subject", "maintenance", "objects", "subscribers", "tt", "escalation_ts", "location", "container_address", ] + ["container_%d" % i for i in range(self.CONTAINER_PATH_DEPTH)] + ["segment_%d" % i for i in range(self.SEGMENT_PATH_DEPTH)]) header_row = ( [ "ID", _("ROOT_ID"), _("FROM_TS"), _("TO_TS"), _("DURATION_SEC"), _("OBJECT_NAME"), _("OBJECT_ADDRESS"), _("OBJECT_HOSTNAME"), _("OBJECT_PROFILE"), _("OBJECT_ADMDOMAIN"), _("OBJECT_PLATFORM"), _("OBJECT_VERSION"), _("ALARM_CLASS"), _("ALARM_SUBJECT"), _("MAINTENANCE"), _("OBJECTS"), _("SUBSCRIBERS"), _("TT"), _("ESCALATION_TS"), _("LOCATION"), _("CONTAINER_ADDRESS"), ] + ["CONTAINER_%d" % i for i in range(self.CONTAINER_PATH_DEPTH)] + ["SEGMENT_%d" % i for i in range(self.SEGMENT_PATH_DEPTH)]) if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) subscribers_profile = self.default_subscribers_profile if subscribers: subscribers_profile = set( SubscriberProfile.objects.filter( id__in=subscribers.split(",")).scalar("id")) r = [translate_row(header_row, cmap)] fd = datetime.datetime.strptime( to_date, "%d.%m.%Y") + datetime.timedelta(days=1) match = { "timestamp": { "$gte": datetime.datetime.strptime(from_date, "%d.%m.%Y"), "$lte": fd } } match_duration = {"duration": {"$gte": min_duration}} if max_duration: match_duration = { "duration": { "$gte": min_duration, "$lte": max_duration } } mos = ManagedObject.objects.filter(is_managed=True) if segment: try: match["segment_path"] = bson.ObjectId(segment) except bson.errors.InvalidId: pass ads = [] if administrative_domain: if administrative_domain.isdigit(): administrative_domain = [int(administrative_domain)] ads = AdministrativeDomain.get_nested_ids( administrative_domain[0]) if not request.user.is_superuser: user_ads = UserAccess.get_domains(request.user) if administrative_domain and ads: if administrative_domain[0] not in user_ads: ads = list(set(ads) & set(user_ads)) if not ads: return HttpResponse( "<html><body>Permission denied: Invalid Administrative Domain</html></body>" ) else: ads = user_ads if ads: mos = mos.filter(administrative_domain__in=ads) if selector: selector = ManagedObjectSelector.get_by_id(int(selector)) mos = mos.filter(selector.Q) if ex_selector: ex_selector = ManagedObjectSelector.get_by_id(int(ex_selector)) mos = mos.exclude(ex_selector.Q) # Working if Administrative domain set if ads: try: match["adm_path"] = {"$in": ads} # @todo More 2 level hierarhy except bson.errors.InvalidId: pass mos_id = list(mos.order_by("id").values_list("id", flat=True)) mo_hostname = {} maintenance = [] if mos_id and (selector or ex_selector): match["managed_object"] = {"$in": mos_id} if "maintenance" in columns.split(","): maintenance = Maintenance.currently_affected() if "object_hostname" in columns.split(","): mo_hostname = ReportObjectsHostname1(sync_ids=mos_id) mo_hostname = mo_hostname.get_dictionary() moss = ReportAlarmObjects(mos_id).get_all() # container_lookup = ReportContainer(mos_id) container_lookup = None subject = "alarm_subject" in columns loc = AlarmApplication([]) if source in ["archive", "both"]: # Archived Alarms for a in (ArchivedAlarm._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED ).aggregate([ { "$match": match }, { "$addFields": { "duration": { "$divide": [ { "$subtract": ["$clear_timestamp", "$timestamp"] }, 1000, ] } } }, { "$match": match_duration }, # {"$sort": {"timestamp": 1}} ])): if int(a["managed_object"]) not in moss: continue dt = a["clear_timestamp"] - a["timestamp"] duration = int(dt.total_seconds()) total_objects = sum(ss["summary"] for ss in a["total_objects"]) if min_objects and total_objects < min_objects: continue total_subscribers = sum( ss["summary"] for ss in a["total_subscribers"] if subscribers_profile and ss["profile"] in subscribers_profile) if min_subscribers and total_subscribers < min_subscribers: continue if "segment_" in columns.split( ",") or "container_" in columns.split(","): path = ObjectPath.get_path(a["managed_object"]) if path: segment_path = [ NetworkSegment.get_by_id(s).name for s in path.segment_path if NetworkSegment.get_by_id(s) ] container_path = [ Object.get_by_id(s).name for s in path.container_path if Object.get_by_id(s) ] else: segment_path = [] container_path = [] else: segment_path = [] container_path = [] r += [ translate_row( row( [ smart_text(a["_id"]), smart_text(a["root"]) if a.get("root") else "", a["timestamp"], a["clear_timestamp"], smart_text(duration), moss[a["managed_object"]][0], moss[a["managed_object"]][1], smart_text( mo_hostname.get(a["managed_object"], "")), Profile.get_by_id( moss[a["managed_object"]][3]).name if moss[a["managed_object"]][5] else "", moss[a["managed_object"]][6], Platform.get_by_id( moss[a["managed_object"]][9]) if moss[a["managed_object"]][9] else "", smart_text( Firmware.get_by_id( moss[a["managed_object"]][10]).version) if moss[a["managed_object"]][10] else "", AlarmClass.get_by_id(a["alarm_class"]).name, ArchivedAlarm.objects.get( id=a["_id"]).subject if subject else "", "", total_objects, total_subscribers, a.get("escalation_tt"), a.get("escalation_ts"), ", ".join(ll for ll in ( loc.location(moss[a["managed_object"]][5] ) if moss[a["managed_object"]] [5] is not None else "") if ll), container_lookup[a["managed_object"]].get( "text", "") if container_lookup else "", ], container_path, segment_path, ), cmap, ) ] # Active Alarms if source in ["active", "both"]: datenow = datetime.datetime.now() for a in (ActiveAlarm._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED). aggregate([ { "$match": match }, { "$addFields": { "duration": { "$divide": [{ "$subtract": [fd, "$timestamp"] }, 1000] } } }, { "$match": match_duration }, # {"$sort": {"timestamp": 1}} ])): dt = datenow - a["timestamp"] duration = int(dt.total_seconds()) total_objects = sum(ss["summary"] for ss in a["total_objects"]) if min_objects and total_objects < min_objects: continue total_subscribers = sum( ss["summary"] for ss in a["total_subscribers"] if subscribers_profile and ss["profile"] in subscribers_profile) if min_subscribers and total_subscribers < min_subscribers: continue if "segment_" in columns.split( ",") or "container_" in columns.split(","): path = ObjectPath.get_path(a["managed_object"]) if path: segment_path = [ NetworkSegment.get_by_id(s).name for s in path.segment_path if NetworkSegment.get_by_id(s) ] container_path = [ Object.get_by_id(s).name for s in path.container_path if Object.get_by_id(s) ] else: segment_path = [] container_path = [] else: segment_path = [] container_path = [] r += [ translate_row( row( [ smart_text(a["_id"]), smart_text(a["root"]) if a.get("root") else "", a["timestamp"], # a["clear_timestamp"], "", smart_text(duration), moss[a["managed_object"]][0], moss[a["managed_object"]][1], smart_text( mo_hostname.get(a["managed_object"], "")), Profile.get_by_id(moss[a["managed_object"]][3]) if moss[a["managed_object"]][5] else "", moss[a["managed_object"]][6], Platform.get_by_id( moss[a["managed_object"]][9]) if moss[a["managed_object"]][9] else "", smart_text( Firmware.get_by_id( moss[a["managed_object"]][10]).version) if moss[a["managed_object"]][10] else "", AlarmClass.get_by_id(a["alarm_class"]).name, ActiveAlarm.objects.get( id=a["_id"]).subject if subject else None, "Yes" if a["managed_object"] in maintenance else "No", total_objects, total_subscribers, a.get("escalation_tt"), a.get("escalation_ts"), ", ".join(ll for ll in ( loc.location(moss[a["managed_object"]][5] ) if moss[a["managed_object"]] [5] is not None else "") if ll), container_lookup[a["managed_object"]].get( "text", "") if container_lookup else "", ], container_path, segment_path, ), cmap, ) ] if source in ["long_archive"]: o_format = "csv_zip" columns = [ "ALARM_ID", "MO_ID", "OBJECT_PROFILE", "VENDOR", "PLATFORM", "VERSION", "OPEN_TIMESTAMP", "CLOSE_TIMESTAMP", "LOCATION", "", "POOL", "ADM_DOMAIN", "MO_NAME", "IP", "ESCALATION_TT", "DURATION", "SEVERITY", "REBOOTS", ] from noc.core.clickhouse.connect import connection ch = connection() fd = datetime.datetime.strptime(from_date, "%d.%m.%Y") td = datetime.datetime.strptime( to_date, "%d.%m.%Y") + datetime.timedelta(days=1) if td - fd > datetime.timedelta(days=390): return HttpResponseBadRequest( _("Report more than 1 year not allowed. If nedeed - request it from Administrator" )) ac = AlarmClass.objects.get( name="NOC | Managed Object | Ping Failed") subs = ", ".join( "subscribers.summary[indexOf(subscribers.profile, '%s')] as `%s`" % (sp.bi_id, sp.name) for sp in SubscriberProfile.objects.filter().order_by("name")) if subs: columns += [ sp.name for sp in SubscriberProfile.objects.filter().order_by("name") ] r = ch.execute(LONG_ARCHIVE_QUERY % ( ", %s" % subs if subs else "", fd.date().isoformat(), td.date().isoformat(), ac.bi_id, )) filename = "alarms.csv" if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = 'attachment; filename="%s"' % filename writer = csv.writer(response) writer.writerows(r) return response elif o_format == "csv_zip": response = BytesIO() f = TextIOWrapper(TemporaryFile(mode="w+b"), encoding="utf-8") writer = csv.writer(f, dialect="excel", delimiter=";", quotechar='"') writer.writerow(columns) writer.writerows(r) f.seek(0) with ZipFile(response, "w", compression=ZIP_DEFLATED) as zf: zf.writestr(filename, f.read()) zf.filename = "%s.zip" % filename # response = HttpResponse(content_type="text/csv") response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/zip") response[ "Content-Disposition"] = 'attachment; filename="%s.zip"' % filename return response elif o_format == "xlsx": response = BytesIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Alarms") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and (r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]]): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") response[ "Content-Disposition"] = 'attachment; filename="alarms.xlsx"' response.close() return response