def get_links(self): """ Build discovery method summary :return: """ def link_data(mo): links_left = t[mo] ld = { "n_neighbors": len(neighbors[mo]) - 1, "n_links": links_left, } for lm, field in self.LD_MAP.items(): n = r.get((mo, lm), 0) ld[field] = n links_left -= n ld["other_links"] = links_left return ld t = defaultdict(int) # object -> count r = defaultdict(int) # object_id, method -> count neighbors = defaultdict(set) # object_id -> {objects} for d in Link._get_collection().find({}, { "_id": 0, "discovery_method": 1, "linked_objects": 1 }): method = d.get("discovery_method") linked = d.get("linked_objects", []) for o in linked: r[o, method] += 1 t[o] += 1 neighbors[o].update(linked) return {o: link_data(o) for o in t}
def get_linked_pops(self): linked = set() self.pops = set(self.get_pop_objects()) self_interfaces = set( Interface.objects.filter( managed_object__in=self.pops).values_list("id")) r_ifaces = set() for ld in Link._get_collection().find( {"interfaces": { "$in": list(self_interfaces) }}, { "_id": 0, "interfaces": 1 }): r_ifaces |= set(ld.get("interfaces", [])) r_ifaces -= self_interfaces r_mos = set(i["managed_object"] for i in Interface._get_collection().find( {"_id": { "$in": list(r_ifaces) }}, { "_id": 0, "managed_object": 1 })) for o in Object.objects.filter( data__match={ "interface": "management", "attr": "managed_object", "value__in": list(r_mos), }): pop = o.get_pop() if pop: linked.add(pop) return linked
def get_links(self): """ Build discovery method summary :return: """ t = defaultdict(int) # object -> count r = defaultdict(int) # object_id, method -> count neighbors = defaultdict(set) # object_id -> {objects} for d in Link._get_collection().find(): method = d.get("discovery_method") linked = d.get("linked_objects", []) for o in linked: r[o, method] += 1 t[o] += 1 neighbors[o].update(linked) return dict( ( o, { "n_neighbors": len(neighbors[o]), "n_links": t[o], "nri_links": r[o, "nri"], "mac_links": r[o, "mac"], "stp_links": r[o, "stp"], "lldp_links": r[o, "lldp"], "cdp_links": r[o, "cdp"], }, ) for o in t )
def _apply_interfaces(mo, r): # id -> (object id, name) ifcache = {} # Get interfaces interfaces = sorted( Interface._get_collection().find({"managed_object": mo.id}), key=lambda x: split_alnum(x["name"]), ) # Populate cache for i in interfaces: ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Get subs subs = defaultdict(list) for s in SubInterface._get_collection().find({"managed_object": mo.id}): subs[s["interface"]] += [s] # Get links links = defaultdict(list) for l in Link._get_collection().find({"linked_objects": mo.id}): for li in l.get("interfaces", []): links[li] += [l] # Populate cache with linked interfaces if links: for i in Interface._get_collection().find( {"_id": {"$in": list(links)}}, {"_id": 1, "managed_object": 1, "name": 1} ): ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Populate r["interfaces"] = [ ManagedObjectDataStream._get_interface(i, subs[i["_id"]], links[i["_id"]], ifcache) for i in interfaces ]
def api_links(self, request, id): o = self.get_object_or_404(ManagedObject, id=id) if not o.has_access(request.user): return self.response_forbidden("Access denied") # Get links result = [] for link in Link.object_links(o): ifaces = [] r = [] for i in link.interfaces: if i.managed_object.id == o.id: ifaces += [i] else: r += [i] for li, ri in zip(ifaces, r): result += [{ "link_id": str(link.id), "local_interface": str(li.id), "local_interface__label": li.name, "remote_object": ri.managed_object.id, "remote_object__label": ri.managed_object.name, "remote_platform": ri.managed_object.platform.name if ri.managed_object.platform else "", "remote_interface": str(ri.id), "remote_interface__label": ri.name, "discovery_method": link.discovery_method, "local_description": li.description, "remote_description": ri.description, "first_discovered": link.first_discovered.isoformat() if link.first_discovered else None, "last_seen": link.last_seen.isoformat() if link.last_seen else None }] return result
def bulk_field_link_count(self, data): """ Apply link_count fields :param data: :return: """ mo_ids = [x["id"] for x in data] if not mo_ids: return data # Collect interface counts r = Link._get_collection().aggregate([ { "$match": { "linked_objects": { "$in": mo_ids } } }, { "$unwind": "$linked_objects" }, { "$group": { "_id": "$linked_objects", "total": { "$sum": 1 } } }, ]) links_count = dict((x["_id"], x["total"]) for x in r) # Apply interface counts for x in data: x["link_count"] = links_count.get(x["id"]) or 0 return data
def api_links(self, request, id): o = self.get_object_or_404(ManagedObject, id=id) if not o.has_access(request.user): return self.response_forbidden("Access denied") # Get links result = [] for link in Link.object_links(o): l = [] r = [] for i in link.interfaces: if i.managed_object.id == o.id: l += [i] else: r += [i] for li, ri in zip(l, r): result += [{ "id": str(link.id), "local_interface": str(li.id), "local_interface__label": li.name, "remote_object": ri.managed_object.id, "remote_object__label": ri.managed_object.name, "remote_interface": str(ri.id), "remote_interface__label": ri.name, "discovery_method": link.discovery_method, "commited": True, "local_description": li.description, "remote_description": ri.description }] # Get pending links q = MQ(local_object=o.id) | MQ(remote_object=o.id) for link in PendingLinkCheck.objects.filter(q): if link.local_object.id == o.id: ro = link.remote_object lin = link.local_interface rin = link.remote_interface else: ro = link.local_object lin = link.remote_interface rin = link.local_interface li = Interface.objects.filter(managed_object=o.id, name=lin).first() if not li: continue ri = Interface.objects.filter(managed_object=ro.id, name=rin).first() if not ri: continue result += [{ "id": str(link.id), "local_interface": str(li.id), "local_interface__label": li.name, "remote_object": ro.id, "remote_object__label": ro.name, "remote_interface": str(ri.id), "remote_interface__label": ri.name, "discovery_method": link.method, "commited": False, "local_description": li.description, "remote_description": ri.description }] return result
def iter_interfaces_meta(self): # Get all interfaces ifaces = { iface.name: iface for iface in Interface.objects.filter( managed_object=self.object.id) } own_ifaces = set(ifaces[iface].id for iface in ifaces) # Get all links links = {} # interface -> object -> [remote_interface, ...] for link in Link.object_links(self.object): local_interfaces = set() remote_interfaces = {} # object -> [interfaces] for i in link.interfaces: if i.id in own_ifaces: local_interfaces.add(i.name) else: if i.managed_object not in remote_interfaces: remote_interfaces[i.managed_object] = [] remote_interfaces[i.managed_object] += [i.name] for li in local_interfaces: links[li] = remote_interfaces # Yield meta for all interfaces for ctx in self.confdb.query("Match('interfaces', ifname)"): iface = ifaces.get(ctx["ifname"]) if not iface: continue # interfaces X meta profile if iface.profile: yield "interfaces", iface.name, "meta", "profile", "id", str( iface.profile.id) yield "interfaces", iface.name, "meta", "profile", "name", iface.profile.name if iface.ifindex is not None: yield "interfaces", iface.name, "meta", "ifindex", int( iface.ifindex) if iface.mac: yield "interfaces", iface.name, "meta", "mac", str(iface.mac) # interfaces X meta hints if iface.hints: for hint in iface.hints: yield "interfaces", iface.name, "meta", "hints", hint # interfaces X meta link if iface.name in links: for n, ro in enumerate(sorted(links[iface.name], key=str)): n = str(n) yield "interfaces", iface.name, "meta", "link", n, "object", "id", str( ro.id) yield "interfaces", iface.name, "meta", "link", n, "object", "name", ro.name yield "interfaces", iface.name, "meta", "link", n, "object", "profile", "id", str( ro.object_profile.id) yield "interfaces", iface.name, "meta", "link", n, "object", "profile", "name", ro.object_profile.name yield "interfaces", iface.name, "meta", "link", n, "object", "profile", "level", ro.object_profile.level for ri in sorted(links[iface.name][ro]): yield "interfaces", iface.name, "meta", "link", n, "interface", ri
def iter_ids_batch(): match = {} while True: print(match) cursor = (Link._get_collection().find( match, { "_id": 1 }, no_cursor_timeout=True).sort("_id").limit(BATCH_SIZE)) # for d in cursor: yield [d["_id"] for d in cursor] if match and match["_id"]["$gt"] == d["_id"]: break match = {"_id": {"$gt": d["_id"]}}
def consume_objects(self, src: NetworkSegment, dst: NetworkSegment) -> None: """ Move all objects from src to dst :param src: :param dst: :return: """ self.logger.info("%s consumes objects from %s", dst.name, src.name) objects: List[ManagedObject] = self.get_objects(src) if not objects: self.logger.info("Nothing to consume. Giving up.") return self.logger.info("Moving %d objects from %s to %s", len(objects), src.name, dst.name) dp = 0 dst_pwr = self.get_power(dst) for mo in objects: self.logger.info("Moving %s from %s to %s", mo.name, src.name, dst.name) mo.segment = dst mo.save() mo._reset_caches() dp += mo.object_profile.level self.logger.info( "%s power is increased from %d to %d (+%d)", dst.name, dst_pwr, dst_pwr + dp, dp ) # Adjust power caches self.set_power(src, 0) self.set_power(dst, dst_pwr + dp) # Update link segment information Link._get_collection().update_many( {"linked_segments": src.id}, {"$pull": {"linked_segments": src.id}} ) # Eliminate source segment when possible self.destroy_segment(src) # Force topology rebuild if moved to persistent segment if dst.profile.is_persistent: self.refresh_topology(dst)
def iter_ids_batch(): match = {} while True: print(match) cursor = (Link._get_collection().find( match, { "_id": 1 }, no_cursor_timeout=True).sort("_id").limit(BATCH_SIZE)) d = [d["_id"] for d in cursor] if not d: break for link in Link.objects.filter(id__in=d).timeout(False): yield link # if match and match["_id"]["$gt"] == d[-1]: # break match = {"_id": {"$gt": d[-1]}}
def load_existing_links(self, object): for l in Link.object_links(object): if l.is_ptp: i1, i2 = l.interfaces if l.is_loop: # Loop to self self.submited.add((i1.name, object, i2.name)) self.submited.add((i2.name, object, i1.name)) else: # p2p link if i1.managed_object == object: self.submited.add( (i1.name, i2.managed_object, i2.name)) else: self.submited.add( (i2.name, i1.managed_object, i1.name))
def handle_split_floating(self, profile, ids, *args, **options): connect() p = NetworkSegmentProfile.objects.filter(name=profile).first() if not p: self.die("Profile not found") if p.is_persistent: self.die("Segment profile cannot be persistent") for seg_id in ids: seg = NetworkSegment.get_by_id(seg_id) if not seg: self.print("@@@ %s - not found. Skipping" % seg_id) continue self.print("@@@ Splitting %s (%s)" % (seg.name, seg_id)) objects = list( ManagedObject.objects.filter(is_managed=True, segment=seg_id)) for mo in objects: new_segment = NetworkSegment( name=mo.administrative_domain.get_bioseg_floating_name(mo) or "Bubble for %s" % mo.name, profile=p, parent=mo.administrative_domain. get_bioseg_floating_parent_segment(), ) new_segment.save() self.print(" Moving '%s' to segment '%s'" % (mo.name, new_segment.name)) mo.segment = new_segment mo.save() # Establish trials self.print("@@@ Scheduling trials") for mo in objects: for link in Link.object_links(mo): for ro in link.managed_objects: if ro == mo: continue self.print(" '%s' challenging '%s' over %s -- %s" % (mo.segment.name, ro.segment.name, mo.name, ro.name)) BioSegTrial.schedule_trial(mo.segment, ro.segment, mo, ro, reason="link")
def api_discovery(self, request, id): from noc.core.scheduler.job import Job o = self.get_object_or_404(ManagedObject, id=id) if not o.has_access(request.user): return self.response_forbidden("Access denied") link_count = defaultdict(int) for link in Link.object_links(o): m = link.discovery_method or "" if "+" in m: m = m.split("+")[0] link_count[m] += 1 r = [{ "name": "ping", "enable_profile": o.object_profile.enable_ping, "status": o.get_status(), "last_run": None, "last_status": None, "next_run": None, "jcls": None, }] for name, jcls in self.DISCOVERY_JOBS: job = Job.get_job_data( "discovery", jcls=jcls, key=o.id, pool=o.pool.name) or {} d = { "name": name, "enable_profile": getattr(o.object_profile, "enable_%s_discovery" % name), "status": job.get(Job.ATTR_STATUS), "last_run": self.to_json(job.get(Job.ATTR_LAST)), "last_status": job.get(Job.ATTR_LAST_STATUS), "next_run": self.to_json(job.get(Job.ATTR_TS)), "jcls": jcls, } r += [d] return r
def handle_reactivate_floating(self, ids, profile=None, allow_persistent=False, *args, **options): connect() nsp = NetworkSegmentProfile.objects.fillter(is_persistent=False) if profile: nsp.filter(name=profile) if ids: ns = NetworkSegment.objects.filter(id__in=ids) elif nsp.count() > 0: ns = NetworkSegment.objects.filter(profile__in=nsp) else: self.die("Setting segment filter condition") if profile: p = NetworkSegmentProfile.objects.get(name=profile) ns = ns.filter(profile=p) for seg_id in ns.scalar("id"): seg = NetworkSegment.get_by_id(seg_id) if not seg: self.print("@@@ %s - not found. Skipping" % seg_id) continue self.print("@@@ Reactivating %s (%s)" % (seg.name, seg_id)) objects = list( ManagedObject.objects.filter(is_managed=True, segment=seg_id)) # Establish trials for mo in objects: for link in Link.object_links(mo): for ro in link.managed_objects: if ro == mo: continue self.print(" '%s' challenging '%s' over %s -- %s" % (mo.segment.name, ro.segment.name, mo.name, ro.name)) BioSegTrial.schedule_trial( mo.segment, ro.segment, mo, ro, reason="link", )
def _apply_interfaces(mo: ManagedObject, r): # id -> (object id, name) ifcache = {} # Get interfaces interfaces = sorted( Interface._get_collection().find({"managed_object": mo.id}), key=lambda x: alnum_key(x["name"]), ) # Populate cache for i in interfaces: ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Get subs subs = defaultdict(list) for s in SubInterface._get_collection().find({"managed_object": mo.id}): subs[s["interface"]] += [s] # Get links links = defaultdict(list) for link in Link._get_collection().find({"linked_objects": mo.id}): for li in link.get("interfaces", []): links[li] += [link] # Populate cache with linked interfaces if links: for i in Interface._get_collection().find( {"_id": {"$in": list(links)}}, {"_id": 1, "managed_object": 1, "name": 1} ): ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Get services svc_ids = [i["service"] for i in interfaces if i.get("service")] if svc_ids: services = {svc.id: svc for svc in Service.objects.filter(id__in=svc_ids)} else: services = {} # Populate r["interfaces"] = [ ManagedObjectDataStream._get_interface( i, subs[i["_id"]], links[i["_id"]], ifcache, set(mo.data.uplinks), services ) for i in interfaces ]
def api_discovery(self, request, id): o = self.get_object_or_404(ManagedObject, id=id) if not o.has_access(request.user): return self.response_forbidden("Access denied") link_count = defaultdict(int) for link in Link.object_links(o): m = link.discovery_method or "" if "+" in m: m = m.split("+")[0] link_count[m] += 1 r = [{ "name": "ping", "enable_profile": o.object_profile.enable_ping, "status": o.get_status(), "last_run": None, "last_status": None, "next_run": None, "link_count": None }] for name in get_active_discovery_methods(): job = get_job("inv.discovery", name, o.id) or {} if name.endswith("_discovery"): lcmethod = name[:-10] else: lcmethod = None d = { "name": name, "enable_profile": getattr(o.object_profile, "enable_%s" % name), "status": job.get("s"), "last_run": self.to_json(job.get("last")), "last_status": job.get("ls"), "next_run": self.to_json(job.get("ts")), "link_count": link_count.get(lcmethod, "") } r += [d] return r
def get_data(self, **kwargs): data = [] # Managed objects summary data += [SectionRow("Managed Objects")] d = [] j_box = 0 j_box_sec = 0.0 j_periodic = 0 j_periodic_sec = 0.0 for p in ManagedObjectProfile.objects.all(): o_count = ManagedObject.objects.filter(object_profile=p).count() d += [[p.name, o_count]] if p.enable_box_discovery: j_box += o_count j_box_sec += float(o_count) / p.box_discovery_interval if p.enable_periodic_discovery: j_periodic += o_count j_periodic_sec += float( o_count) / p.periodic_discovery_interval data += sorted(d, key=lambda x: -x[1]) # Interface summary d = [] data += [SectionRow("Interfaces")] d_count = Interface.objects.count() for p in InterfaceProfile.objects.all(): n = Interface.objects.filter(profile=p).count() d += [[p.name, n]] d_count -= n data += [["-", d_count]] data += sorted(d, key=lambda x: -x[1]) # Links summary data += [SectionRow("Links")] r = Link._get_collection().aggregate([{ "$group": { "_id": "$discovery_method", "count": { "$sum": 1 } } }, { "$sort": { "count": -1 } }]) d = [(x["_id"], x["count"]) for x in r] data += sorted(d, key=lambda x: -x[1]) # Discovery jobs data += [SectionRow("Discovery jobs summary")] data += [["Box", j_box]] data += [["Periodic", j_periodic]] data += [SectionRow("Jobs per second")] data += [["Box", j_box_sec]] data += [["Periodic", j_periodic_sec]] return self.from_dataset(title=self.title, columns=[ "", TableColumn("Count", align="right", format="integer", total="sum", total_label="Total") ], data=data)
def get_data(self, request, pool, obj_profile=None, **kwargs): problems = {} # id -> problem if not obj_profile: # Get all managed objects mos = dict( (mo.id, mo) for mo in ManagedObject.objects.filter(is_managed=True, pool=pool) ) if not request.user.is_superuser: mos = dict( (mo.id, mo) for mo in ManagedObject.objects.filter(is_managed=True, pool=pool, administrative_domain__in=UserAccess.get_domains(request.user)) ) else: # Get all managed objects mos = dict( (mo.id, mo) for mo in ManagedObject.objects.filter(is_managed=True, pool=pool, object_profile=obj_profile) ) if not request.user.is_superuser: mos = dict( (mo.id, mo) for mo in ManagedObject.objects.filter(is_managed=True, pool=pool, object_profile=obj_profile, administrative_domain__in=UserAccess.get_domains(request.user)) ) mos_set = set(mos) # Get all managed objects with generic profile for mo in mos: if mos[mo].profile.name == GENERIC_PROFILE: problems[mo] = _("Profile check failed") # Get all managed objects without interfaces if_mo = dict( (x["_id"], x.get("managed_object")) for x in Interface._get_collection().find( {}, {"_id": 1, "managed_object": 1} ) ) for mo in (mos_set - set(problems) - set(if_mo.itervalues())): problems[mo] = _("No interfaces") # Get all managed objects without links linked_mos = set() for d in Link._get_collection().find({}): for i in d["interfaces"]: linked_mos.add(if_mo.get(i)) for mo in (mos_set - set(problems) - linked_mos): problems[mo] = _("No links") # Get all managed objects without uplinks uplinks = {} for d in ObjectData._get_collection().find(): nu = len(d.get("uplinks", [])) if nu: uplinks[d["_id"]] = nu for mo in (mos_set - set(problems) - set(uplinks)): problems[mo] = _("No uplinks") # data = [] for mo_id in problems: if mo_id not in mos: continue mo = mos[mo_id] data += [[ mo.name, mo.address, mo.profile.name, mo.platform.name if mo.platform else "", mo.segment.name if mo.segment else "", problems[mo_id] ]] data = sorted(data) return self.from_dataset( title=self.title, columns=[ "Name", "Address", "Profile", "Platform", "Segment", "Problem" ], data=data, enumerate=True )
def get_data(self, request, pool=None, obj_profile=None, **kwargs): problems = {} # id -> problem mos = ManagedObject.objects.filter(is_managed=True, pool=pool) if not request.user.is_superuser: mos = mos.filter(administrative_domain__in=UserAccess.get_domains(request.user)) if obj_profile: # Get all managed objects mos = mos.filter(object_profile=obj_profile) mos = { mo[0]: (mo[1], mo[2], Profile.get_by_id(mo[3]), mo[4], mo[5]) for mo in mos.values_list("id", "name", "address", "profile", "platform", "segment") } mos_set = set(mos) # Get all managed objects with generic profile for mo in mos: if mos[mo][2] == GENERIC_PROFILE: problems[mo] = _("Profile check failed") # Get all managed objects without interfaces if_mo = dict( (x["_id"], x.get("managed_object")) for x in Interface._get_collection().find({}, {"_id": 1, "managed_object": 1}) ) for mo in mos_set - set(problems) - set(six.itervalues(if_mo)): problems[mo] = _("No interfaces") # Get all managed objects without links linked_mos = set() for d in Link._get_collection().find({}): for i in d["interfaces"]: linked_mos.add(if_mo.get(i)) for mo in mos_set - set(problems) - linked_mos: problems[mo] = _("No links") # Get all managed objects without uplinks uplinks = {} for d in ObjectData._get_collection().find(): nu = len(d.get("uplinks", [])) if nu: uplinks[d["_id"]] = nu for mo in mos_set - set(problems) - set(uplinks): problems[mo] = _("No uplinks") # data = [] for mo_id in problems: if mo_id not in mos: continue name, address, profile, platform, segment = mos[mo_id] data += [ [ name, address, profile.name, Platform.get_by_id(platform).name if platform else "", NetworkSegment.get_by_id(segment).name if segment else "", problems[mo_id], ] ] data = sorted(data) return self.from_dataset( title=self.title, columns=["Name", "Address", "Profile", "Platform", "Segment", "Problem"], data=data, enumerate=True, )
def get_data(self): intervals = ( ("y", 31557617), # 60 * 60 * 24 * 7 * 52 ("w", 604800), # 60 * 60 * 24 * 7 ("d", 86400), # 60 * 60 * 24 ("h", 3600), # 60 * 60 ("m", 60), ("s", 1), ) def display_time(seconds): result = [] for name, count in intervals: value = seconds // count if value: seconds -= value * count if value == 1: name = name.rstrip("s") result.append("{}{}".format(value, name)) return ", ".join(result[:-1]) def sortdict(dct): kys = sorted(dct.keys()) res = OrderedDict() for x in kys: for k, v in dct.items(): if k == x: res[k] = v return res def get_container_path(self): # Get container path if not self.object: return None cp = [] if self.object.container: c = self.object.container.id while c: try: o = Object.objects.get(id=c) # @todo: Address data if o.container: cp.insert(0, {"id": o.id, "name": o.name}) c = o.container.id if o.container else None except DoesNotExist: metrics["error", ("type", "no_such_object")] += 1 break return cp if not self.object: return None # @todo: Stage # @todo: Service range # @todo: Open TT now = datetime.datetime.now() # Get object status and uptime alarms = list(ActiveAlarm.objects.filter(managed_object=self.object.id)) current_start = None duration = None if self.object.is_managed: if self.object.get_status(): if alarms: current_state = "alarm" else: current_state = "up" uptime = Uptime.objects.filter(object=self.object.id, stop=None).first() if uptime: current_start = uptime.start else: current_state = "down" outage = Outage.objects.filter(object=self.object.id, stop=None).first() if outage is not None: current_start = outage.start else: current_state = "unmanaged" if current_start: duration = now - current_start cp = get_container_path(self) # MAC addresses macs = [] o_macs = DiscoveryID.macs_for_object(self.object) if o_macs: for f, l in o_macs: if f == l: macs += [f] else: macs += ["%s - %s" % (f, l)] # Hostname hostname = "" did = DiscoveryID.objects.filter(object=self.object.id).first() if did and did.hostname: hostname = did.hostname # Links uplinks = set(self.object.data.uplinks) if len(uplinks) > 1: if self.object.segment.lost_redundancy: redundancy = "L" else: redundancy = "R" else: redundancy = "N" links = [] for _link in Link.object_links(self.object): local_interfaces = [] remote_interfaces = [] remote_objects = set() for iface in _link.interfaces: if iface.managed_object.id == self.object.id: local_interfaces += [iface] else: remote_interfaces += [iface] remote_objects.add(iface.managed_object) if len(remote_objects) == 1: ro = remote_objects.pop() if ro.id in uplinks: role = "uplink" else: role = "downlink" links += [ { "id": _link.id, "role": role, "local_interface": sorted( local_interfaces, key=lambda x: alnum_key(x.name) ), "remote_object": ro, "remote_interface": sorted( remote_interfaces, key=lambda x: alnum_key(x.name) ), "remote_status": "up" if ro.get_status() else "down", } ] links = sorted( links, key=lambda x: (x["role"] != "uplink", alnum_key(x["local_interface"][0].name)), ) # Build global services summary service_summary = ServiceSummary.get_object_summary(self.object) # Interfaces interfaces = [] mo = ManagedObject.objects.filter(id=self.object.id) mo = mo[0] ifaces_metrics, last_ts = get_interface_metrics(mo) ifaces_metrics = ifaces_metrics[mo] objects_metrics, last_time = get_objects_metrics(mo) objects_metrics = objects_metrics.get(mo) # Sensors sensors_metrics = None s_metrics = None sensors = {} s_meta = [] STATUS = {0: "OK", 1: "Alarm"} meric_map = {} if mo.get_caps().get("Sensor | Controller"): for mc in MetricType.objects.filter(scope=MetricScope.objects.get(name="Environment")): if meric_map: meric_map["map"].update({mc.field_name: mc.name}) else: meric_map = {"table_name": mc.scope.table_name, "map": {mc.field_name: mc.name}} sensors_metrics, last_ts = get_interface_metrics(mo, meric_map) sensors_metrics = sensors_metrics[mo] m_tp = {} if mo.object_profile.metrics: for mt in mo.object_profile.metrics: if mt.get("threshold_profile"): threshold_profile = ThresholdProfile.get_by_id(mt.get("threshold_profile")) m_tp[MetricType.get_by_id(mt.get("metric_type")).name] = threshold_profile data = {} meta = [] metric_type_name = dict(MetricType.objects.filter().scalar("name", "measure")) metric_type_field = dict(MetricType.objects.filter().scalar("field_name", "measure")) if objects_metrics: for path, mres in objects_metrics.items(): t_v = False for key in mres: m_path = path if any(path.split("|")) else key m_path = " | ".join(kk.strip() for kk in m_path.split("|")) if m_tp.get(key): t_v = self.get_threshold_config(m_tp.get(key), int(mres[key])) val = { "name": m_path, "type": "" if m_path == "Object | SysUptime" else metric_type_name[key], "value": display_time(int(mres[key])) if m_path == "Object | SysUptime" else mres[key], "threshold": t_v, } if data.get(key): data[key] += [val] else: data[key] = [val] data = sortdict(data) for k, d in data.items(): collapsed = False if len(d) == 1: collapsed = True for dd in d: isdanger = False if dd["threshold"]: isdanger = True collapsed = True meta.append({"name": k, "value": d, "collapsed": collapsed, "isdanger": isdanger}) for i in Interface.objects.filter(managed_object=self.object.id, type="physical"): load_in = "-" load_out = "-" errors_in = "-" errors_out = "-" iface_metrics = ifaces_metrics.get(str(i.name)) if iface_metrics: for key, value in iface_metrics.items(): metric_type = metric_type_name.get(key) or metric_type_field.get(key) if key == "Interface | Load | In": load_in = ( "%s%s" % (self.humanize_speed(value, metric_type), metric_type) if value else "-" ) if key == "Interface | Load | Out": load_out = ( "%s%s" % (self.humanize_speed(value, metric_type), metric_type) if value else "-" ) if key == "Interface | Errors | In": errors_in = value if value else "-" if key == "Interface | Errors | Out": errors_out = value if value else "-" interfaces += [ { "id": i.id, "name": i.name, "admin_status": i.admin_status, "oper_status": i.oper_status, "mac": i.mac or "", "full_duplex": i.full_duplex, "load_in": load_in, "load_out": load_out, "errors_in": errors_in, "errors_out": errors_out, "speed": max([i.in_speed or 0, i.out_speed or 0]) / 1000, "untagged_vlan": None, "tagged_vlan": None, "profile": i.profile, "service": i.service, "service_summary": service_summary.get("interface").get(i.id, {}), "description": i.description, } ] if sensors_metrics: s_metrics = sensors_metrics.get(str(i.name)) if s_metrics: sens_metrics = [] for i_metrics in i.profile.metrics: sens_metrics.append(i_metrics.metric_type.name) for key, value in s_metrics.items(): if key not in sens_metrics: continue val = { "name": key, "type": metric_type_name[key], "value": STATUS.get(value) if metric_type_name[key] == " " else value, "threshold": None, } if sensors.get(i.name): sensors[i.name] += [val] else: sensors[i.name] = [val] si = list(i.subinterface_set.filter(enabled_afi="BRIDGE")) if len(si) == 1: si = si[0] interfaces[-1]["untagged_vlan"] = si.untagged_vlan interfaces[-1]["tagged_vlans"] = list_to_ranges(si.tagged_vlans).replace(",", ", ") if sensors: sensors = sortdict(sensors) for k, d in sensors.items(): for dd in d: isdanger = False if dd["threshold"]: isdanger = True s_meta.append({"name": k, "value": d, "isdanger": isdanger}) interfaces = sorted(interfaces, key=lambda x: alnum_key(x["name"])) # Resource groups # Service groups (i.e. server) static_services = set(self.object.static_service_groups) service_groups = [] for rg_id in self.object.effective_service_groups: rg = ResourceGroup.get_by_id(rg_id) service_groups += [ { "id": rg_id, "name": rg.name, "technology": rg.technology, "is_static": rg_id in static_services, } ] # Client groups (i.e. client) static_clients = set(self.object.static_client_groups) client_groups = [] for rg_id in self.object.effective_client_groups: rg = ResourceGroup.get_by_id(rg_id) client_groups += [ { "id": rg_id, "name": rg.name, "technology": rg.technology, "is_static": rg_id in static_clients, } ] # @todo: Administrative domain path # Alarms alarm_list = [] for a in alarms: alarm_list += [ { "id": a.id, "root_id": self.get_root(alarms), "timestamp": a.timestamp, "duration": now - a.timestamp, "subject": a.subject, "managed_object": a.managed_object, "service_summary": { "service": SummaryItem.items_to_dict(a.total_services), "subscriber": SummaryItem.items_to_dict(a.total_subscribers), }, "alarm_class": a.alarm_class, } ] alarm_list = sorted(alarm_list, key=operator.itemgetter("timestamp")) # Maintenance maintenance = [] for m in Maintenance.objects.filter( affected_objects__object=self.object.id, is_completed=False, start__lte=now + datetime.timedelta(hours=1), ): maintenance += [ { "maintenance": m, "id": m.id, "subject": m.subject, "start": m.start, "stop": m.stop, "in_progress": m.start <= now, } ] # Get Inventory inv = [] for p in self.object.get_inventory(): c = self.get_nested_inventory(p) c["name"] = p.name or self.object.name inv += [c] # Build result if self.object.platform is not None: platform = self.object.platform.name else: platform = "Unknown" if self.object.version is not None: version = self.object.version.version else: version = "" r = { "id": self.object.id, "object": self.object, "name": self.object.name, "address": self.object.address, "platform": platform, # self.object.platform.name if self.object.platform else "Unknown", "version": version, # self.object.version.version if self.object.version else "", "description": self.object.description, "object_profile": self.object.object_profile.id, "object_profile_name": self.object.object_profile.name, "hostname": hostname, "macs": ", ".join(sorted(macs)), "segment": self.object.segment, "firmware_status": FirmwarePolicy.get_status(self.object.platform, self.object.version), "firmware_recommended": FirmwarePolicy.get_recommended_version(self.object.platform), "service_summary": service_summary, "container_path": cp, "current_state": current_state, # Start of uptime/downtime "current_start": current_start, # Current uptime/downtime "current_duration": duration, "service_groups": service_groups, "client_groups": client_groups, "tt": [], "links": links, "alarms": alarm_list, "interfaces": interfaces, "metrics": meta, "sensors": s_meta, "maintenance": maintenance, "redundancy": redundancy, "inventory": self.flatten_inventory(inv), "serial_number": self.object.get_attr("Serial Number"), "attributes": list( ManagedObjectAttribute.objects.filter(managed_object=self.object.id) ), "confdb": None, } try: r["confdb"] = self.object.get_confdb() except (SyntaxError, ValueError): pass return r
def get_data(self): def get_container_path(self): # Get container path if not self.object: return None cp = [] if self.object.container: c = self.object.container.id while c: try: o = Object.objects.get(id=c) # @todo: Address data if o.container: cp.insert(0, {"id": o.id, "name": o.name}) c = o.container.id if o.container else None except DoesNotExist: metrics["error", ("type", "no_such_object")] += 1 break return cp if not self.object: return None # @todo: Stage # @todo: Service range # @todo: Open TT now = datetime.datetime.now() # Get object status and uptime alarms = list( ActiveAlarm.objects.filter(managed_object=self.object.id)) current_start = None duration = None if self.object.is_managed: if self.object.get_status(): if alarms: current_state = "alarm" else: current_state = "up" uptime = Uptime.objects.filter(object=self.object.id, stop=None).first() if uptime: current_start = uptime.start else: current_state = "down" outage = Outage.objects.filter(object=self.object.id, stop=None).first() if outage is not None: current_start = outage.start else: current_state = "unmanaged" if current_start: duration = now - current_start cp = get_container_path(self) # MAC addresses macs = [] o_macs = DiscoveryID.macs_for_object(self.object) if o_macs: for f, l in o_macs: if f == l: macs += [f] else: macs += ["%s - %s" % (f, l)] # Links uplinks = set(self.object.data.uplinks) if len(uplinks) > 1: if self.object.segment.lost_redundancy: redundancy = "L" else: redundancy = "R" else: redundancy = "N" links = [] for l in Link.object_links(self.object): local_interfaces = [] remote_interfaces = [] remote_objects = set() for i in l.interfaces: if i.managed_object.id == self.object.id: local_interfaces += [i] else: remote_interfaces += [i] remote_objects.add(i.managed_object) if len(remote_objects) == 1: ro = remote_objects.pop() if ro.id in uplinks: role = "uplink" else: role = "downlink" links += [{ "id": l.id, "role": role, "local_interface": sorted(local_interfaces, key=lambda x: split_alnum(x.name)), "remote_object": ro, "remote_interface": sorted(remote_interfaces, key=lambda x: split_alnum(x.name)), "remote_status": "up" if ro.get_status() else "down", }] links = sorted( links, key=lambda x: (x["role"] != "uplink", split_alnum(x["local_interface"][0].name)), ) # Build global services summary service_summary = ServiceSummary.get_object_summary(self.object) # Interfaces interfaces = [] metrics_map = [ "Interface | Load | In", "Interface | Load | Out", "Interface | Errors | In", "Interface | Errors | Out", ] mo = ManagedObject.objects.filter(id=self.object.id) ifaces_metrics, last_ts = get_interface_metrics(mo[0]) ifaces_metrics = ifaces_metrics[mo[0]] objects_metrics, last_time = get_objects_metrics(mo[0]) objects_metrics = objects_metrics.get(mo[0]) meta = {} metric_type_name = dict(MetricType.objects.filter().scalar( "name", "measure")) metric_type_field = dict(MetricType.objects.filter().scalar( "field_name", "measure")) if objects_metrics: for path, mres in six.iteritems(objects_metrics): for key in mres: metric_name = "%s | %s" % (key, path) if any( path.split("|")) else key meta[metric_name] = { "type": metric_type_name[key], "value": mres[key] } for i in Interface.objects.filter(managed_object=self.object.id, type="physical"): load_in = "-" load_out = "-" errors_in = "-" errors_out = "-" iface_metrics = ifaces_metrics.get(str(i.name)) if iface_metrics: for key, value in six.iteritems(iface_metrics): if key not in metrics_map: continue metric_type = metric_type_name.get( key) or metric_type_field.get(key) if key == "Interface | Load | In": load_in = ("%s%s" % (self.humanize_speed(value, metric_type), metric_type) if value else "-") if key == "Interface | Load | Out": load_out = ("%s%s" % (self.humanize_speed(value, metric_type), metric_type) if value else "-") if key == "Interface | Errors | In": errors_in = value if value else "-" if key == "Interface | Errors | Out": errors_out = value if value else "-" interfaces += [{ "id": i.id, "name": i.name, "admin_status": i.admin_status, "oper_status": i.oper_status, "mac": i.mac or "", "full_duplex": i.full_duplex, "load_in": load_in, "load_out": load_out, "errors_in": errors_in, "errors_out": errors_out, "speed": max([i.in_speed or 0, i.out_speed or 0]) / 1000, "untagged_vlan": None, "tagged_vlan": None, "profile": i.profile, "service": i.service, "service_summary": service_summary.get("interface").get(i.id, {}), "description": i.description, }] si = list(i.subinterface_set.filter(enabled_afi="BRIDGE")) if len(si) == 1: si = si[0] interfaces[-1]["untagged_vlan"] = si.untagged_vlan interfaces[-1]["tagged_vlans"] = list_to_ranges( si.tagged_vlans).replace(",", ", ") interfaces = sorted(interfaces, key=lambda x: split_alnum(x["name"])) # Resource groups # Service groups (i.e. server) static_services = set(self.object.static_service_groups) service_groups = [] for rg_id in self.object.effective_service_groups: rg = ResourceGroup.get_by_id(rg_id) service_groups += [{ "id": rg_id, "name": rg.name, "technology": rg.technology, "is_static": rg_id in static_services, }] # Client groups (i.e. client) static_clients = set(self.object.static_client_groups) client_groups = [] for rg_id in self.object.effective_client_groups: rg = ResourceGroup.get_by_id(rg_id) client_groups += [{ "id": rg_id, "name": rg.name, "technology": rg.technology, "is_static": rg_id in static_clients, }] # @todo: Administrative domain path # Alarms alarm_list = [] for a in alarms: alarm_list += [{ "id": a.id, "root_id": self.get_root(alarms), "timestamp": a.timestamp, "duration": now - a.timestamp, "subject": a.subject, "managed_object": a.managed_object, "service_summary": { "service": SummaryItem.items_to_dict(a.total_services), "subscriber": SummaryItem.items_to_dict(a.total_subscribers), }, "alarm_class": a.alarm_class, }] alarm_list = sorted(alarm_list, key=operator.itemgetter("timestamp")) # Maintenance maintenance = [] for m in Maintenance.objects.filter( affected_objects__object=self.object.id, is_completed=False, start__lte=now + datetime.timedelta(hours=1), ): maintenance += [{ "maintenance": m, "id": m.id, "subject": m.subject, "start": m.start, "stop": m.stop, "in_progress": m.start <= now, }] # Get Inventory inv = [] for p in self.object.get_inventory(): c = self.get_nested_inventory(p) c["name"] = p.name or self.object.name inv += [c] # Build result if self.object.platform is not None: platform = self.object.platform.name else: platform = "Unknown" if self.object.version is not None: version = self.object.version.version else: version = "" r = { "id": self.object.id, "object": self.object, "name": self.object.name, "address": self.object.address, "platform": platform, # self.object.platform.name if self.object.platform else "Unknown", "version": version, # self.object.version.version if self.object.version else "", "description": self.object.description, "object_profile": self.object.object_profile.id, "object_profile_name": self.object.object_profile.name, "macs": ", ".join(sorted(macs)), "segment": self.object.segment, "firmware_status": FirmwarePolicy.get_status(self.object.platform, self.object.version), "firmware_recommended": FirmwarePolicy.get_recommended_version(self.object.platform), "service_summary": service_summary, "container_path": cp, "current_state": current_state, # Start of uptime/downtime "current_start": current_start, # Current uptime/downtime "current_duration": duration, "service_groups": service_groups, "client_groups": client_groups, "tt": [], "links": links, "alarms": alarm_list, "interfaces": interfaces, "metrics": meta, "maintenance": maintenance, "redundancy": redundancy, "inventory": self.flatten_inventory(inv), "serial_number": self.object.get_attr("Serial Number"), "attributes": list( ManagedObjectAttribute.objects.filter( managed_object=self.object.id)), "confdb": self.object.get_confdb(), } return r
def field_link_count(self, o): return Link.object_links_count(o)