class RebootsExtractor(BaseExtractor): name = "reboots" extract_delay = config.bi.extract_delay_reboots clean_delay = config.bi.clean_delay_reboots def __init__(self, prefix, start, stop): super(RebootsExtractor, self).__init__(prefix, start, stop) self.reboot_stream = Stream(Reboots, prefix) def extract(self): nr = 0 for d in (Reboot._get_collection().find( { "ts": { "$gt": self.start, "$lte": self.stop } }, no_cursor_timeout=True).sort("ts")): mo = ManagedObject.get_by_id(d["object"]) if not mo: continue self.reboot_stream.push( ts=d["ts"], managed_object=mo, pool=mo.pool, ip=mo.address, profile=mo.profile, object_profile=mo.object_profile, vendor=mo.vendor, platform=mo.platform, version=mo.version, administrative_domain=mo.administrative_domain, segment=mo.segment, container=mo.container, x=mo.x, y=mo.y, ) nr += 1 self.last_ts = d["ts"] self.reboot_stream.finish() return nr def clean(self, force=False): if force: print("Clean Reboots collection before %s" % self.clean_ts) Reboot._get_collection().remove({"ts": {"$lte": self.clean_ts}}) @classmethod def get_start(cls): d = Reboot._get_collection().find_one({}, { "_id": 0, "ts": 1 }, sort=[("ts", 1)]) if not d: return None return d.get("ts")
class AlarmsExtractor(ArchivingExtractor): name = "alarms" extract_delay = config.bi.extract_delay_alarms clean_delay = config.bi.clean_delay_alarms reboot_interval = datetime.timedelta(seconds=config.bi.reboot_interval) enable_archive = config.bi.enable_alarms_archive archive_batch_limit = config.bi.alarms_archive_batch_limit archive_collection_template = config.bi.alarms_archive_policy def __init__(self, prefix, start, stop, use_archive=False): self.use_archive = use_archive super().__init__(prefix, start, stop) self.alarm_stream = Stream(Alarms, prefix) def iter_data(self): if self.use_archive: coll = [ self._archive_db.get_collection(coll_name) for coll_name in self.find_archived_collections( self.start, self.stop) ] else: coll = [ArchivedAlarm._get_collection()] for c in coll: for d in c.find( { "clear_timestamp": { "$gt": self.start, "$lte": self.stop } }, no_cursor_timeout=True).sort("clear_timestamp"): yield d def extract(self, *args, **options): nr = 0 # Get reboots r = Reboot._get_collection().aggregate([ { "$match": { "ts": { "$gt": self.start - self.reboot_interval, "$lte": self.stop } } }, { "$sort": { "ts": 1 } }, { "$group": { "_id": "$object", "reboots": { "$push": "$ts" } } }, ]) # object -> [ts1, .., tsN] reboots = {d["_id"]: d["reboots"] for d in r} # for d in self.iter_data(): mo = ManagedObject.get_by_id(d["managed_object"]) if not mo: continue # Process reboot data o_reboots = reboots.get(d["managed_object"], []) n_reboots = hits_in_range(o_reboots, d["timestamp"] - self.reboot_interval, d["clear_timestamp"]) # self.alarm_stream.push( ts=d["timestamp"], close_ts=d["clear_timestamp"], duration=max( 0, int((d["clear_timestamp"] - d["timestamp"]).total_seconds())), alarm_id=str(d["_id"]), root=str(d.get("root") or ""), rca_type=d.get("rca_type") or 0, alarm_class=AlarmClass.get_by_id(d["alarm_class"]), severity=d["severity"], reopens=d.get("reopens") or 0, direct_services=sum(ss["summary"] for ss in d.get("direct_services", [])), direct_subscribers=sum( ss["summary"] for ss in d.get("direct_subscribers", [])), total_objects=sum(ss["summary"] for ss in d.get("total_objects", [])), total_services=sum(ss["summary"] for ss in d.get("total_services", [])), total_subscribers=sum( ss["summary"] for ss in d.get("total_subscribers", [])), escalation_ts=d.get("escalation_ts"), escalation_tt=d.get("escalation_tt"), managed_object=mo, pool=mo.pool, ip=mo.address, profile=mo.profile, object_profile=mo.object_profile, vendor=mo.vendor, platform=mo.platform, version=mo.version, administrative_domain=mo.administrative_domain, segment=mo.segment, container=mo.container, x=mo.x, y=mo.y, reboots=n_reboots, services=[{ "profile": ServiceProfile.get_by_id(ss["profile"]).bi_id, "summary": ss["summary"], } for ss in d.get("direct_services", [])], subscribers=[{ "profile": SubscriberProfile.get_by_id(ss["profile"]).bi_id, "summary": ss["summary"], } for ss in d.get("direct_subscribers", [])], # location=mo.container.get_address_text() ack_user=d.get("ack_user", ""), ack_ts=d.get("ack_ts"), ) nr += 1 self.last_ts = d["clear_timestamp"] self.alarm_stream.finish() return nr def clean(self, force=False): # Archive super().clean() # Clean if force: print("Clean ArchivedAlarm collection before %s" % self.clean_ts) ArchivedAlarm._get_collection().remove( {"clear_timestamp": { "$lte": self.clean_ts }}) @classmethod def get_start(cls): d = ArchivedAlarm._get_collection().find_one({}, { "_id": 0, "timestamp": 1 }, sort=[("timestamp", 1)]) if not d: return None return d.get("timestamp") def iter_archived_items(self): for d in ArchivedAlarm._get_collection().find( {"clear_timestamp": { "$lte": self.clean_ts }}, no_cursor_timeout=True): yield d
class ManagedObjectsExtractor(BaseExtractor): name = "managedobjects" is_snapshot = True # Caps to field mapping CAPS_MAP = { "Network | STP": "has_stp", "Network | CDP": "has_cdp", "Network | LLDP": "has_lldp", "SNMP": "has_snmp", "SNMP | v1": "has_snmp_v1", "SNMP | v2c": "has_snmp_v2c", } def __init__(self, prefix, start, stop): super(ManagedObjectsExtractor, self).__init__(prefix, start, stop) self.mo_stream = Stream(ManagedObjectBI, prefix) def extract(self, *args, **options): nr = 0 ts = datetime.datetime.now() # External data stats_start = self.start - datetime.timedelta(days=1) # configuration ? x_data = [ self.get_interfaces(), self.get_links(), self.get_caps(), self.get_n_subs_n_serv(), self.get_reboots(stats_start, self.stop), self.get_availability(stats_start, self.stop), self.get_object_metrics(stats_start, self.stop), ] sn = self.get_mo_sn() # Extract managed objects for mo in ManagedObject.objects.all().iterator(): did = DiscoveryID.objects.filter(object=mo).first() uptime = Uptime.objects.filter(object=mo.id, stop=None).first() serials = sn.get(mo.id, []) inventory = mo.get_inventory() if inventory: serials += inventory[0].get_object_serials(chassis_only=False) location = "" if mo.container: location = mo.container.get_address_text() r = { "ts": ts, "managed_object": mo, "profile": mo.profile, "administrative_domain": mo.administrative_domain, "segment": mo.segment, "container": mo.container, "level": mo.object_profile.level, "x": mo.x, "y": mo.y, "pool": mo.pool, "object_profile": mo.object_profile, "vendor": mo.vendor, "platform": mo.platform, "hw_version": mo.get_attr("HW version", default=None), "version": mo.version, "bootprom_version": mo.get_attr("Boot PROM", default=None), "name": ch_escape(mo.name), "hostname": ch_escape(did.hostname or "") if did else "", "ip": mo.address, "is_managed": mo.is_managed, "location": ch_escape(location) if location else "", "uptime": uptime.last_value if uptime else 0.0, "availability": 100.0, "tags": [str(t) for t in mo.tags if "{" not in t] if mo.tags else [], # { - bug "serials": list(set(serials)) # subscribers # services } # Apply external data for data in x_data: d = data.get(mo.id) if d: r.update(d) # Submit self.mo_stream.push(**r) nr += 1 self.mo_stream.finish() return nr def get_links(self): """ Build discovery method summary :return: """ t = defaultdict(int) # object -> count r = defaultdict(int) # object_id, method -> count neighbors = defaultdict(set) # object_id -> {objects} for d in Link._get_collection().find(): method = d.get("discovery_method") linked = d.get("linked_objects", []) for o in linked: r[o, method] += 1 t[o] += 1 neighbors[o].update(linked) return dict( ( o, { "n_neighbors": len(neighbors[o]), "n_links": t[o], "nri_links": r[o, "nri"], "mac_links": r[o, "mac"], "stp_links": r[o, "stp"], "lldp_links": r[o, "lldp"], "cdp_links": r[o, "cdp"], }, ) for o in t ) def get_interfaces(self): """ Build interface counts :return: """ r = Interface._get_collection().aggregate( [ {"$match": {"type": "physical"}}, {"$group": {"_id": "$managed_object", "total": {"$sum": 1}}}, ] ) return dict((d["_id"], {"n_interfaces": d["total"]}) for d in r) def get_caps(self): # name -> id map caps = dict( (self.CAPS_MAP[d["name"]], d["_id"]) for d in Capability._get_collection().find( {"name": {"$in": list(self.CAPS_MAP)}}, {"_id": 1, "name": 1} ) ) # object -> caps add_expr = dict((c, {"$in": [caps[c], "$caps.capability"]}) for c in caps) project_expr = dict((c, 1) for c in caps) project_expr["_id"] = 1 return dict( (d["_id"], dict((x, d[x]) for x in d if x != "_id")) for d in ObjectCapabilities._get_collection().aggregate( [{"$addFields": add_expr}, {"$project": project_expr}] ) ) @staticmethod def get_mo_sn(): """ Extract serial number from attributes :return: """ r = { mo_id: [serial] for mo_id, serial in ManagedObjectAttribute.objects.filter( key="Serial Number" ).values_list("managed_object", "value") } return r @staticmethod def get_reboots(start_date=None, stop_date=None): match = {"ts": {"$gte": start_date, "$lte": stop_date}} pipeline = [ {"$match": match}, {"$group": {"_id": "$object", "count": {"$sum": 1}}}, {"$sort": {"count": -1}}, ] data = ( Reboot._get_collection() .with_options(read_preference=ReadPreference.SECONDARY_PREFERRED) .aggregate(pipeline) ) # data = data["result"] return {rb["_id"]: {"n_reboots": rb["count"]} for rb in data} @staticmethod def get_availability(start_date, stop_date, skip_zero_avail=False): # now = datetime.datetime.now() b = start_date d = stop_date outages = defaultdict(list) td = (d - b).total_seconds() # q = Q(start__gte=b) | Q(stop__gte=b) | Q(stop__exists=False) q = (Q(start__gte=b) | Q(stop__gte=b) | Q(stop__exists=False)) & Q(start__lt=d) for o in Outage.objects.filter(q): start = max(o.start, b) stop = o.stop if (o.stop and o.stop < d) else d if (stop - start).total_seconds() == td and skip_zero_avail: continue outages[o.object] += [(stop - start).total_seconds()] # Normalize to percents return { o: { "availability": (td - sum(outages[o])) * 100.0 / td, "total_unavailability": int(sum(outages[o])), "n_outages": len(outages[o]), } for o in outages } @staticmethod def get_n_subs_n_serv(): r = defaultdict(dict) service_pipeline = [ {"$unwind": "$service"}, {"$group": {"_id": "$managed_object", "service_sum": {"$sum": "$service.summary"}}}, ] for doc in ServiceSummary._get_collection().aggregate(service_pipeline): r[doc["_id"]]["n_services"] = doc["service_sum"] subscriber_pipeline = [ {"$unwind": "$subscriber"}, { "$group": { "_id": "$managed_object", "subscriber_sum": {"$sum": "$subscriber.summary"}, } }, ] for doc in ServiceSummary._get_collection().aggregate(subscriber_pipeline): r[doc["_id"]]["n_subscribers"] = doc["subscriber_sum"] return r @staticmethod def get_object_metrics(start, stop): """ :param start: :type stop: datetime.datetime :param stop: :type stop: datetime.datetime :return: """ r = {} bi_map = { str(bi_id): mo_id for mo_id, bi_id in ManagedObject.objects.values_list("id", "bi_id") } ch = connection() for row in ch.execute( "SELECT managed_object, sum(stp_topology_changes_delta) " "FROM routing WHERE ts > '%s' and ts < '%s' GROUP BY managed_object" % ( start.replace(microsecond=0).isoformat(sep=" "), stop.replace(microsecond=0).isoformat(sep=" "), ) ): # delta r[bi_map[row[0]]] = {"n_stp_topo_changes": row[1]} del bi_map return r
class ManagedObjectsExtractor(BaseExtractor): name = "managedobjects" is_snapshot = True # Caps to field mapping CAPS_MAP = { "Network | STP": "has_stp", "Network | CDP": "has_cdp", "Network | LLDP": "has_lldp", "SNMP": "has_snmp", "SNMP | v1": "has_snmp_v1", "SNMP | v2c": "has_snmp_v2c", } def __init__(self, prefix, start, stop): super(ManagedObjectsExtractor, self).__init__(prefix, start, stop) self.mo_stream = Stream(ManagedObjectBI, prefix) def extract(self): nr = 0 ts = datetime.datetime.now() # External data x_data = [ self.get_interfaces(), self.get_links(), self.get_caps() ] # Extract managed objects for mo in ManagedObject.objects.all(): did = DiscoveryID.objects.filter(object=mo).first() uptime = Uptime.objects.filter(object=mo.id, stop=None).first() r = { "ts": ts, "managed_object": mo, "profile": mo.profile, "administrative_domain": mo.administrative_domain, "segment": mo.segment, "container": mo.container, "level": mo.object_profile.level, "x": mo.x, "y": mo.y, "pool": mo.pool, # "object_profile": mo.object_profile, "vendor": mo.vendor, "platform": mo.platform, "version": mo.version, "name": ch_escape(mo.name), "hostname": did.hostname if did else "", "ip": mo.address, "is_managed": mo.is_managed, "location": mo.container.get_address_text() if mo.container else "", "uptime": uptime.last_value if uptime else 0.0 # subscribers # services } # Apply external data for data in x_data: d = data.get(mo.id) if d: r.update(d) # Submit self.mo_stream.push(**r) nr += 1 self.mo_stream.finish() return nr def get_links(self): """ Build discovery method summary :return: """ t = defaultdict(int) # object -> count r = defaultdict(int) # object_id, method -> count neighbors = defaultdict(set) # object_id -> {objects} for d in Link._get_collection().find(): method = d.get("discovery_method") linked = d.get("linked_objects", []) for o in linked: r[o, method] += 1 t[o] += 1 neighbors[o].update(linked) return dict((o, { "n_neighbors": len(neighbors[o]), "n_links": t[o], "nri_links": r[o, "nri"], "mac_links": r[o, "mac"], "stp_links": r[o, "stp"], "lldp_links": r[o, "lldp"], "cdp_links": r[o, "cdp"] }) for o in t) def get_interfaces(self): """ Build interface counts :return: """ r = Interface._get_collection().aggregate([ { "$match": { "type": "physical" } }, { "$group": { "_id": "$managed_object", "total": { "$sum": 1 } } } ]) return dict((d["_id"], { "n_interfaces": d["total"] }) for d in r) def get_caps(self): # name -> id map caps = dict( (self.CAPS_MAP[d["name"]], d["_id"]) for d in Capability._get_collection().find({ "name": { "$in": list(self.CAPS_MAP) } }, { "_id": 1, "name": 1 }) ) # object -> caps add_expr = dict( (c, {"$in": [caps[c], "$caps.capability"]}) for c in caps ) project_expr = dict((c, 1) for c in caps) project_expr["_id"] = 1 return dict( (d["_id"], dict((x, d[x]) for x in d if x != "_id")) for d in ObjectCapabilities._get_collection().aggregate([ {"$addFields": add_expr}, {"$project": project_expr} ]) )