def _apply_interfaces(mo, r): # id -> (object id, name) ifcache = {} # Get interfaces interfaces = sorted( Interface._get_collection().find({"managed_object": mo.id}), key=lambda x: split_alnum(x["name"]), ) # Populate cache for i in interfaces: ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Get subs subs = defaultdict(list) for s in SubInterface._get_collection().find({"managed_object": mo.id}): subs[s["interface"]] += [s] # Get links links = defaultdict(list) for l in Link._get_collection().find({"linked_objects": mo.id}): for li in l.get("interfaces", []): links[li] += [l] # Populate cache with linked interfaces if links: for i in Interface._get_collection().find( {"_id": {"$in": list(links)}}, {"_id": 1, "managed_object": 1, "name": 1} ): ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Populate r["interfaces"] = [ ManagedObjectDataStream._get_interface(i, subs[i["_id"]], links[i["_id"]], ifcache) for i in interfaces ]
def bulk_field_link_count(self, data): """ Apply link_count fields :param data: :return: """ mo_ids = [x["id"] for x in data] if not mo_ids: return data # Collect interface counts r = Link._get_collection().aggregate([ { "$match": { "linked_objects": { "$in": mo_ids } } }, { "$unwind": "$linked_objects" }, { "$group": { "_id": "$linked_objects", "total": { "$sum": 1 } } }, ]) links_count = dict((x["_id"], x["total"]) for x in r) # Apply interface counts for x in data: x["link_count"] = links_count.get(x["id"]) or 0 return data
def get_links(self): """ Build discovery method summary :return: """ t = defaultdict(int) # object -> count r = defaultdict(int) # object_id, method -> count neighbors = defaultdict(set) # object_id -> {objects} for d in Link._get_collection().find(): method = d.get("discovery_method") linked = d.get("linked_objects", []) for o in linked: r[o, method] += 1 t[o] += 1 neighbors[o].update(linked) return dict( ( o, { "n_neighbors": len(neighbors[o]), "n_links": t[o], "nri_links": r[o, "nri"], "mac_links": r[o, "mac"], "stp_links": r[o, "stp"], "lldp_links": r[o, "lldp"], "cdp_links": r[o, "cdp"], }, ) for o in t )
def get_links(self): """ Build discovery method summary :return: """ def link_data(mo): links_left = t[mo] ld = { "n_neighbors": len(neighbors[mo]) - 1, "n_links": links_left, } for lm, field in self.LD_MAP.items(): n = r.get((mo, lm), 0) ld[field] = n links_left -= n ld["other_links"] = links_left return ld t = defaultdict(int) # object -> count r = defaultdict(int) # object_id, method -> count neighbors = defaultdict(set) # object_id -> {objects} for d in Link._get_collection().find({}, { "_id": 0, "discovery_method": 1, "linked_objects": 1 }): method = d.get("discovery_method") linked = d.get("linked_objects", []) for o in linked: r[o, method] += 1 t[o] += 1 neighbors[o].update(linked) return {o: link_data(o) for o in t}
def get_linked_pops(self): linked = set() self.pops = set(self.get_pop_objects()) self_interfaces = set( Interface.objects.filter( managed_object__in=self.pops).values_list("id")) r_ifaces = set() for ld in Link._get_collection().find( {"interfaces": { "$in": list(self_interfaces) }}, { "_id": 0, "interfaces": 1 }): r_ifaces |= set(ld.get("interfaces", [])) r_ifaces -= self_interfaces r_mos = set(i["managed_object"] for i in Interface._get_collection().find( {"_id": { "$in": list(r_ifaces) }}, { "_id": 0, "managed_object": 1 })) for o in Object.objects.filter( data__match={ "interface": "management", "attr": "managed_object", "value__in": list(r_mos), }): pop = o.get_pop() if pop: linked.add(pop) return linked
def iter_ids_batch(): match = {} while True: print(match) cursor = (Link._get_collection().find( match, { "_id": 1 }, no_cursor_timeout=True).sort("_id").limit(BATCH_SIZE)) # for d in cursor: yield [d["_id"] for d in cursor] if match and match["_id"]["$gt"] == d["_id"]: break match = {"_id": {"$gt": d["_id"]}}
def consume_objects(self, src: NetworkSegment, dst: NetworkSegment) -> None: """ Move all objects from src to dst :param src: :param dst: :return: """ self.logger.info("%s consumes objects from %s", dst.name, src.name) objects: List[ManagedObject] = self.get_objects(src) if not objects: self.logger.info("Nothing to consume. Giving up.") return self.logger.info("Moving %d objects from %s to %s", len(objects), src.name, dst.name) dp = 0 dst_pwr = self.get_power(dst) for mo in objects: self.logger.info("Moving %s from %s to %s", mo.name, src.name, dst.name) mo.segment = dst mo.save() mo._reset_caches() dp += mo.object_profile.level self.logger.info( "%s power is increased from %d to %d (+%d)", dst.name, dst_pwr, dst_pwr + dp, dp ) # Adjust power caches self.set_power(src, 0) self.set_power(dst, dst_pwr + dp) # Update link segment information Link._get_collection().update_many( {"linked_segments": src.id}, {"$pull": {"linked_segments": src.id}} ) # Eliminate source segment when possible self.destroy_segment(src) # Force topology rebuild if moved to persistent segment if dst.profile.is_persistent: self.refresh_topology(dst)
def iter_ids_batch(): match = {} while True: print(match) cursor = (Link._get_collection().find( match, { "_id": 1 }, no_cursor_timeout=True).sort("_id").limit(BATCH_SIZE)) d = [d["_id"] for d in cursor] if not d: break for link in Link.objects.filter(id__in=d).timeout(False): yield link # if match and match["_id"]["$gt"] == d[-1]: # break match = {"_id": {"$gt": d[-1]}}
def _apply_interfaces(mo: ManagedObject, r): # id -> (object id, name) ifcache = {} # Get interfaces interfaces = sorted( Interface._get_collection().find({"managed_object": mo.id}), key=lambda x: alnum_key(x["name"]), ) # Populate cache for i in interfaces: ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Get subs subs = defaultdict(list) for s in SubInterface._get_collection().find({"managed_object": mo.id}): subs[s["interface"]] += [s] # Get links links = defaultdict(list) for link in Link._get_collection().find({"linked_objects": mo.id}): for li in link.get("interfaces", []): links[li] += [link] # Populate cache with linked interfaces if links: for i in Interface._get_collection().find( {"_id": {"$in": list(links)}}, {"_id": 1, "managed_object": 1, "name": 1} ): ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Get services svc_ids = [i["service"] for i in interfaces if i.get("service")] if svc_ids: services = {svc.id: svc for svc in Service.objects.filter(id__in=svc_ids)} else: services = {} # Populate r["interfaces"] = [ ManagedObjectDataStream._get_interface( i, subs[i["_id"]], links[i["_id"]], ifcache, set(mo.data.uplinks), services ) for i in interfaces ]
def get_data(self, **kwargs): data = [] # Managed objects summary data += [SectionRow("Managed Objects")] d = [] j_box = 0 j_box_sec = 0.0 j_periodic = 0 j_periodic_sec = 0.0 for p in ManagedObjectProfile.objects.all(): o_count = ManagedObject.objects.filter(object_profile=p).count() d += [[p.name, o_count]] if p.enable_box_discovery: j_box += o_count j_box_sec += float(o_count) / p.box_discovery_interval if p.enable_periodic_discovery: j_periodic += o_count j_periodic_sec += float( o_count) / p.periodic_discovery_interval data += sorted(d, key=lambda x: -x[1]) # Interface summary d = [] data += [SectionRow("Interfaces")] d_count = Interface.objects.count() for p in InterfaceProfile.objects.all(): n = Interface.objects.filter(profile=p).count() d += [[p.name, n]] d_count -= n data += [["-", d_count]] data += sorted(d, key=lambda x: -x[1]) # Links summary data += [SectionRow("Links")] r = Link._get_collection().aggregate([{ "$group": { "_id": "$discovery_method", "count": { "$sum": 1 } } }, { "$sort": { "count": -1 } }]) d = [(x["_id"], x["count"]) for x in r] data += sorted(d, key=lambda x: -x[1]) # Discovery jobs data += [SectionRow("Discovery jobs summary")] data += [["Box", j_box]] data += [["Periodic", j_periodic]] data += [SectionRow("Jobs per second")] data += [["Box", j_box_sec]] data += [["Periodic", j_periodic_sec]] return self.from_dataset(title=self.title, columns=[ "", TableColumn("Count", align="right", format="integer", total="sum", total_label="Total") ], data=data)
def get_data(self, request, pool, obj_profile=None, **kwargs): problems = {} # id -> problem if not obj_profile: # Get all managed objects mos = dict( (mo.id, mo) for mo in ManagedObject.objects.filter(is_managed=True, pool=pool) ) if not request.user.is_superuser: mos = dict( (mo.id, mo) for mo in ManagedObject.objects.filter(is_managed=True, pool=pool, administrative_domain__in=UserAccess.get_domains(request.user)) ) else: # Get all managed objects mos = dict( (mo.id, mo) for mo in ManagedObject.objects.filter(is_managed=True, pool=pool, object_profile=obj_profile) ) if not request.user.is_superuser: mos = dict( (mo.id, mo) for mo in ManagedObject.objects.filter(is_managed=True, pool=pool, object_profile=obj_profile, administrative_domain__in=UserAccess.get_domains(request.user)) ) mos_set = set(mos) # Get all managed objects with generic profile for mo in mos: if mos[mo].profile.name == GENERIC_PROFILE: problems[mo] = _("Profile check failed") # Get all managed objects without interfaces if_mo = dict( (x["_id"], x.get("managed_object")) for x in Interface._get_collection().find( {}, {"_id": 1, "managed_object": 1} ) ) for mo in (mos_set - set(problems) - set(if_mo.itervalues())): problems[mo] = _("No interfaces") # Get all managed objects without links linked_mos = set() for d in Link._get_collection().find({}): for i in d["interfaces"]: linked_mos.add(if_mo.get(i)) for mo in (mos_set - set(problems) - linked_mos): problems[mo] = _("No links") # Get all managed objects without uplinks uplinks = {} for d in ObjectData._get_collection().find(): nu = len(d.get("uplinks", [])) if nu: uplinks[d["_id"]] = nu for mo in (mos_set - set(problems) - set(uplinks)): problems[mo] = _("No uplinks") # data = [] for mo_id in problems: if mo_id not in mos: continue mo = mos[mo_id] data += [[ mo.name, mo.address, mo.profile.name, mo.platform.name if mo.platform else "", mo.segment.name if mo.segment else "", problems[mo_id] ]] data = sorted(data) return self.from_dataset( title=self.title, columns=[ "Name", "Address", "Profile", "Platform", "Segment", "Problem" ], data=data, enumerate=True )
def get_data(self, request, pool=None, obj_profile=None, **kwargs): problems = {} # id -> problem mos = ManagedObject.objects.filter(is_managed=True, pool=pool) if not request.user.is_superuser: mos = mos.filter(administrative_domain__in=UserAccess.get_domains(request.user)) if obj_profile: # Get all managed objects mos = mos.filter(object_profile=obj_profile) mos = { mo[0]: (mo[1], mo[2], Profile.get_by_id(mo[3]), mo[4], mo[5]) for mo in mos.values_list("id", "name", "address", "profile", "platform", "segment") } mos_set = set(mos) # Get all managed objects with generic profile for mo in mos: if mos[mo][2] == GENERIC_PROFILE: problems[mo] = _("Profile check failed") # Get all managed objects without interfaces if_mo = dict( (x["_id"], x.get("managed_object")) for x in Interface._get_collection().find({}, {"_id": 1, "managed_object": 1}) ) for mo in mos_set - set(problems) - set(six.itervalues(if_mo)): problems[mo] = _("No interfaces") # Get all managed objects without links linked_mos = set() for d in Link._get_collection().find({}): for i in d["interfaces"]: linked_mos.add(if_mo.get(i)) for mo in mos_set - set(problems) - linked_mos: problems[mo] = _("No links") # Get all managed objects without uplinks uplinks = {} for d in ObjectData._get_collection().find(): nu = len(d.get("uplinks", [])) if nu: uplinks[d["_id"]] = nu for mo in mos_set - set(problems) - set(uplinks): problems[mo] = _("No uplinks") # data = [] for mo_id in problems: if mo_id not in mos: continue name, address, profile, platform, segment = mos[mo_id] data += [ [ name, address, profile.name, Platform.get_by_id(platform).name if platform else "", NetworkSegment.get_by_id(segment).name if segment else "", problems[mo_id], ] ] data = sorted(data) return self.from_dataset( title=self.title, columns=["Name", "Address", "Profile", "Platform", "Segment", "Problem"], data=data, enumerate=True, )