def _apply_interfaces(mo, r): # id -> (object id, name) ifcache = {} # Get interfaces interfaces = sorted( Interface._get_collection().find({"managed_object": mo.id}), key=lambda x: split_alnum(x["name"]), ) # Populate cache for i in interfaces: ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Get subs subs = defaultdict(list) for s in SubInterface._get_collection().find({"managed_object": mo.id}): subs[s["interface"]] += [s] # Get links links = defaultdict(list) for l in Link._get_collection().find({"linked_objects": mo.id}): for li in l.get("interfaces", []): links[li] += [l] # Populate cache with linked interfaces if links: for i in Interface._get_collection().find( {"_id": {"$in": list(links)}}, {"_id": 1, "managed_object": 1, "name": 1} ): ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Populate r["interfaces"] = [ ManagedObjectDataStream._get_interface(i, subs[i["_id"]], links[i["_id"]], ifcache) for i in interfaces ]
def unbind_interface(self): from noc.inv.models.interface import Interface Interface._get_collection().update({"service": self.id}, {"$unset": { "service": "" }}) self._refresh_managed_object()
def submit_interface( self, name, type, default_name=None, mac=None, description=None, aggregated_interface=None, enabled_protocols=None, ifindex=None, hints=None, ): enabled_protocols = enabled_protocols or [] iface = self.get_interface_by_name(name) if iface: ignore_empty = ["ifindex"] if self.is_confdb_source: ignore_empty = ["ifindex", "mac"] # Interface exists changes = self.update_if_changed( iface, { "default_name": default_name, "type": type, "mac": mac, "description": description, "aggregated_interface": aggregated_interface, "enabled_protocols": enabled_protocols, "ifindex": ifindex, "hints": hints or [], }, ignore_empty=ignore_empty, ) self.log_changes("Interface '%s' has been changed" % name, changes) else: # Create interface self.logger.info("Creating interface '%s'", name) iface = Interface( managed_object=self.object.id, name=name, type=type, mac=mac, description=description, aggregated_interface=aggregated_interface, enabled_protocols=enabled_protocols, ifindex=ifindex, ) iface.save() self.set_interface(name, iface) if mac: self.interface_macs.add(mac) return iface
def extract(self): mos_id = dict(ManagedObject.objects.filter().values_list("id", "bi_id")) iface_prof = { i[0]: (i[1], int(i[2] or 0)) for i in InterfaceProfile.objects.filter().values_list("id", "name", "is_uni") } ifs = Interface._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED ) for iface in ifs.find( {"type": "physical"}, { "_id": 0, "managed_object": 1, "name": 1, "description": 1, "profile": 1, "in_speed": 1, }, ).sort("managed_object"): yield ( mos_id[iface["managed_object"]], iface["name"], iface.get("description", ""), iface_prof[iface["profile"]][0], abs(iface.get("in_speed", 0)) * 1000, # iface speed in kbit/s convert to bit/s, # some vendors set speed -1 for iface down iface_prof[iface["profile"]][1], )
def set_nri_aliases(self, mo): """ Fill interface alias cache with nri names :param mo: :return: """ if mo in self.seen_neighbors: return seen = False for d in Interface._get_collection().find( { "managed_object": mo.id, "nri_name": { "$exists": True } }, { "_id": 0, "name": 1, "nri_name": 1 }, ): self.set_interface_alias(mo, d["name"], d["nri_name"]) seen = True self.seen_neighbors.add(mo) if not seen: self.logger.info( "[%s] Object has no nri interface name. Topology may be incomplete", mo.name)
def get_linked_pops(self): linked = set() self.pops = set(self.get_pop_objects()) self_interfaces = set( Interface.objects.filter( managed_object__in=self.pops).values_list("id")) r_ifaces = set() for ld in Link._get_collection().find( {"interfaces": { "$in": list(self_interfaces) }}, { "_id": 0, "interfaces": 1 }): r_ifaces |= set(ld.get("interfaces", [])) r_ifaces -= self_interfaces r_mos = set(i["managed_object"] for i in Interface._get_collection().find( {"_id": { "$in": list(r_ifaces) }}, { "_id": 0, "managed_object": 1 })) for o in Object.objects.filter( data__match={ "interface": "management", "attr": "managed_object", "value__in": list(r_mos), }): pop = o.get_pop() if pop: linked.add(pop) return linked
def _get_loopback_addresses(cls, mo_id): from noc.inv.models.interface import Interface from noc.inv.models.subinterface import SubInterface # Get all loopbacks if_ids = [] for d in Interface._get_collection().find( { "managed_object": int(mo_id), "type": "loopback" }, {"_id": 1}): if_ids += [d["_id"]] if not if_ids: return [] # Get loopback's addresses r = [] for d in SubInterface._get_collection().find( { "managed_object": int(mo_id), "interface": { "$in": if_ids }, "ipv4_addresses": { "$exists": True }, }, { "_id": 0, "ipv4_addresses": 1 }, ): for a in d.get("ipv4_addresses", []): r += [str(a).split("/")[0]] return r
def load(mo_ids, zero, def_profile, interface_profile): match = { "managed_object": {"$in": mo_ids}, "type": {"$in": ["physical"]}, "admin_status": True, } if interface_profile: match["profile"] = {"$in": [ObjectId(str(interface_profile))]} if zero: match["oper_status"] = True if def_profile and interface_profile is None: def_prof = [pr.id for pr in InterfaceProfile.objects.filter(name__contains="default")] match["profile"] = {"$nin": def_prof} lookup = { "from": "noc.subinterfaces", "localField": "_id", "foreignField": "interface", "as": "subs", } result = ( Interface._get_collection() .with_options(read_preference=ReadPreference.SECONDARY_PREFERRED) .aggregate([{"$match": match}, {"$lookup": lookup}]) ) return result
def bulk_field_interface_count(self, data): """ Apply interface_count fields :param data: :return: """ mo_ids = [x["id"] for x in data] if not mo_ids: return data # Collect interface counts r = Interface._get_collection().aggregate([ { "$match": { "managed_object": { "$in": mo_ids }, "type": "physical" } }, { "$group": { "_id": "$managed_object", "total": { "$sum": 1 } } }, ]) ifcount = dict((x["_id"], x["total"]) for x in r) # Apply interface counts for x in data: x["interface_count"] = ifcount.get(x["id"]) or 0 return data
def extract(self): mos_id = dict(ManagedObject.objects.filter().values_list( "id", "bi_id")) iface_prof = { i[0]: (i[1], int(i[2] or 0)) for i in InterfaceProfile.objects.filter().values_list( "id", "name", "is_uni") } ifs = Interface._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED) for iface in ifs.find({ "description": { "$exists": True } }, { "_id": 0, "managed_object": 1, "name": 1, "description": 1, "profile": 1, "in_speed": 1 }).sort("managed_object"): yield (mos_id[iface["managed_object"]], iface["name"], iface.get("description", ""), iface_prof[iface["profile"]][0], iface.get("in_speed", 0), iface_prof[iface["profile"]][1])
def handler(self): self.logger.info("NRI Portmapper") if not self.object.remote_system: self.logger.info("Created directly. No NRI integration. Skipping check") return nri = self.object.remote_system.name # Check object has interfaces if not self.has_capability("DB | Interfaces"): self.logger.info("No interfaces discovered. " "Skipping interface status check") return # Get portmapper instance portmapper = portmapper_loader.get_loader(self.object.remote_system.name)(self.object) if not portmapper: self.logger.info("[%s] No portmapper for NRI. Skipping checks", nri) return # Process interfaces bulk = [] icol = Interface._get_collection() for d in icol.find( {"managed_object": self.object.id, "type": "physical"}, {"_id": 1, "name": 1, "nri_name": 1}, ): nri_name = portmapper.to_remote(d["name"]) self.logger.debug("[%s] Port mapping %s <-> %s", nri, d["name"], nri_name) if not nri_name: self.logger.info("[%s] Cannot map port name '%s'", nri, d["name"]) elif d.get("nri_name") != nri_name: self.logger.info("[%s] Mapping '%s' to '%s'", nri, nri_name, d["name"]) bulk += [UpdateOne({"_id": d["_id"]}, {"$set": {"nri_name": nri_name}})] if bulk: self.logger.info("Sending %d updates", len(bulk)) icol.bulk_write(bulk)
def refine_ifindexes(self): missed_ifindexes = [ x["name"] for x in Interface._get_collection().find( { "managed_object": self.object.id, "ifindex": None }, {"name": 1}) ] if not missed_ifindexes: return self.logger.info("Missed ifindexes for: %s", ", ".join(missed_ifindexes)) r = self.object.scripts.get_ifindexes() if not r: return updates = {} for n in missed_ifindexes: if n in r: updates[n] = r[n] if not updates: return for n, i in updates.iteritems(): iface = Interface.objects.filter(managed_object=self.object.id, name=n).first() if iface: self.logger.info("Set infindex for %s: %s", n, i) iface.ifindex = i iface.save() # Signals will be sent
def get_interface_metrics(self): """ Populate metrics list with interface metrics :return: """ subs = None metrics = [] for i in (Interface._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED).find( { "managed_object": self.object.id, "type": "physical" }, { "_id": 1, "name": 1, "ifindex": 1, "profile": 1 }, )): ipr = self.get_interface_profile_metrics(i["profile"]) self.logger.debug("Interface %s. ipr=%s", i["name"], ipr) if not ipr: continue # No metrics configured i_profile = InterfaceProfile.get_by_id(i["profile"]) if i_profile.allow_subinterface_metrics and subs is None: # Resolve subinterfaces subs = self.get_subinterfaces() ifindex = i.get("ifindex") for metric in ipr: if (self.is_box and not ipr[metric].enable_box) or ( self.is_periodic and not ipr[metric].enable_periodic): continue m_id = next(self.id_count) m = { "id": m_id, "metric": metric, "path": ["", "", "", i["name"]] } if ifindex is not None: m["ifindex"] = ifindex metrics += [m] self.id_metrics[m_id] = ipr[metric] if i_profile.allow_subinterface_metrics: for si in subs[i["_id"]]: m_id = next(self.id_count) m = { "id": m_id, "metric": metric, "path": ["", "", "", i["name"], si["name"]], } if si["ifindex"] is not None: m["ifindex"] = si["ifindex"] metrics += [m] self.id_metrics[m_id] = ipr[metric] if not metrics: self.logger.info("Interface metrics are not configured. Skipping") return metrics
def interface_tags_to_id(cls, object_name, interface_name): mo = cls.managedobject_name_to_id(object_name) i = Interface._get_collection().find_one({ "managed_object": mo, "name": interface_name }) if i: return i["_id"] return None
def handle_add(self, *args, **options): """ Add link :param args: :param options: :return: """ if len(args) != 2: raise CommandError("Usage: ./noc link --add <iface1> <iface2>") i1 = Interface.get_interface(args[0]) if not i1: raise CommandError("Invalid interface: %s" % args[0]) i2 = Interface.get_interface(args[1]) if not i2: raise CommandError("Invalid interface: %s" % args[1]) try: i1.link_ptp(i2) except ValueError as why: raise CommandError(str(why))
def api_metrics(self, request, metrics): def q(s): if isinstance(s, str): s = s.encode("utf-8") return s def qt(t): return "|".join(["%s=%s" % (v, t[v]) for v in sorted(t)]) # Build query tag_id = {} # object, interface -> id if_ids = {} # id -> port id mlst = [] # (metric, object, interface) for m in metrics: if "object" in m["tags"] and "interface" in m["tags"]: if not m["tags"]["object"]: continue try: if_ids[ self.interface_tags_to_id(m["tags"]["object"], m["tags"]["interface"]) ] = m["id"] object = ManagedObject.objects.get(name=m["tags"]["object"]) tag_id[object, m["tags"]["interface"]] = m["id"] mlst += [(m["metric"], object, m["tags"]["interface"])] except KeyError: pass # @todo: Get last values from cache if not mlst: return {} r = {} # Apply interface statuses for d in Interface._get_collection().find( {"_id": {"$in": list(if_ids)}}, {"_id": 1, "admin_status": 1, "oper_status": 1} ): r[if_ids[d["_id"]]] = { "admin_status": d.get("admin_status", True), "oper_status": d.get("oper_status", True), } metric_map, last_ts = get_interface_metrics([m[1] for m in mlst]) # Apply metrics for rq_mo, rq_iface in tag_id: pid = tag_id.get((rq_mo, rq_iface)) if not pid: continue if pid not in r: r[pid] = {} if rq_mo not in metric_map: continue if rq_iface not in metric_map[rq_mo]: continue r[pid]["Interface | Load | In"] = metric_map[rq_mo][rq_iface]["Interface | Load | In"] r[pid]["Interface | Load | Out"] = metric_map[rq_mo][rq_iface]["Interface | Load | Out"] return r
def handle_remove(self, *args, **options): """ Remove link :param args: :param options: :return: """ for i in args: iface = Interface.get_interface(i) if iface: iface.unlink()
def get_interfaces(self): """ Build interface counts :return: """ r = Interface._get_collection().aggregate( [ {"$match": {"type": "physical"}}, {"$group": {"_id": "$managed_object", "total": {"$sum": 1}}}, ] ) return dict((d["_id"], {"n_interfaces": d["total"]}) for d in r)
def refresh_interface_profiles(sp_id, ip_id): from .service import Service from noc.inv.models.interface import Interface svc = [ x["_id"] for x in Service._get_collection().find({"profile": sp_id}, {"_id": 1}) ] if not svc: return collection = Interface._get_collection() bulk = [] bulk += [UpdateOne({"_id": {"$in": svc}}, {"$set": {"profile": ip_id}})] collection.bulk_write(bulk, ordered=False)
def maybe_create_interface(self, mo: ManagedObject, name: str) -> Optional[Interface]: """ Auto-create remote interface, if possible :param mo: :param name: :return: """ if self.object.object_profile.ifdesc_symmetric: return None # Meaningless for symmetric ifdesc if (mo.object_profile.enable_box_discovery_interface or not mo.object_profile.enable_interface_autocreation): return None # Auto-creation is disabled # Create interface self.logger.info("Auto-creating interface %s:%s", mo.name, name) iface = Interface(managed_object=mo, type="physical", name=name) iface.save() # Adjust cache if mo.id in self.if_cache: self.if_cache[mo.id][iface.name] = iface return iface
def _apply_interfaces(mo: ManagedObject, r): # id -> (object id, name) ifcache = {} # Get interfaces interfaces = sorted( Interface._get_collection().find({"managed_object": mo.id}), key=lambda x: alnum_key(x["name"]), ) # Populate cache for i in interfaces: ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Get subs subs = defaultdict(list) for s in SubInterface._get_collection().find({"managed_object": mo.id}): subs[s["interface"]] += [s] # Get links links = defaultdict(list) for link in Link._get_collection().find({"linked_objects": mo.id}): for li in link.get("interfaces", []): links[li] += [link] # Populate cache with linked interfaces if links: for i in Interface._get_collection().find( {"_id": {"$in": list(links)}}, {"_id": 1, "managed_object": 1, "name": 1} ): ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Get services svc_ids = [i["service"] for i in interfaces if i.get("service")] if svc_ids: services = {svc.id: svc for svc in Service.objects.filter(id__in=svc_ids)} else: services = {} # Populate r["interfaces"] = [ ManagedObjectDataStream._get_interface( i, subs[i["_id"]], links[i["_id"]], ifcache, set(mo.data.uplinks), services ) for i in interfaces ]
def handle_show(self, *args, **options): show_method = options.get("show_method") if args: # Show single link for i in args: iface = Interface.get_interface(i) if iface: link = Link.objects.filter(interfaces=iface.id).first() if link: self.show_link(link, show_method) else: # Show all links for l in Link.objects.all(): self.show_link(l, show_method)
def submit_interface(self, name, type, mac=None, description=None, aggregated_interface=None, enabled_protocols=None, ifindex=None): enabled_protocols = enabled_protocols or [] iface = Interface.objects.filter(managed_object=self.object.id, name=name).first() if iface: # Interface exists changes = self.update_if_changed(iface, { "type": type, "mac": mac, "description": description, "aggregated_interface": aggregated_interface, "enabled_protocols": enabled_protocols, "ifindex": ifindex }, ignore_empty=["ifindex"]) self.log_changes("Interface '%s' has been changed" % name, changes) else: # Create interface self.info("Creating interface '%s'" % name) iface = Interface(managed_object=self.object.id, name=name, type=type, mac=mac, description=description, aggregated_interface=aggregated_interface, enabled_protocols=enabled_protocols, ifindex=ifindex) iface.save() return iface
def handle_portmap(self, portmap_objects=[]): for po in portmap_objects: for o in ManagedObjectSelector.get_objects_from_expression(po): if not o.remote_system: self.stdout.write("%s (%s, %s) NRI: N/A\n" % (o.name, o.address, o.platform)) continue portmapper = loader.get_loader(o.remote_system.name)(o) nri = o.remote_system.name self.stdout.write("%s (%s, %s) NRI: %s\n" % (o.name, o.address, o.platform, nri)) r = [] for i in Interface._get_collection().find( { "managed_object": o.id, "type": "physical" }, { "_id": 1, "name": 1, "nri_name": 1 }, ): rn = portmapper.to_remote(i["name"]) or self.PORT_ERROR if rn == self.PORT_ERROR: ln = self.PORT_ERROR else: ln = portmapper.to_local(rn) or self.PORT_ERROR if i.get("nri_name") == rn and ln != self.PORT_ERROR: status = "OK" elif not i.get("nri_name") and ln != self.PORT_ERROR: status = "Not in database" elif rn == self.PORT_ERROR: status = "Failed to convert to remote name" else: self.print(ln, rn, i.get("nri_name")) status = "Failed to convert to local name" r += [(i["name"], rn, i.get("nri_name", "--"), status)] r = [("Local", "Remote", "Interface NRI", "Status")] + list( sorted(r, key=lambda x: alnum_key(x[0]))) self.stdout.write( "%s\n" % format_table([0, 0, 0, 0], r, sep=" | ", hsep="-+-"))
def resolve_remote_interface(self, iface: Interface) -> Optional[Interface]: direction = "local" if iface.managed_object.id == self.object.id else "remote" if not iface.description or not iface.description.strip(): self.logger.info("%s interface %s has no description. Ignoring", direction, iface.name) return None if not iface.type == "physical": self.logger.info("%s interface %s has invalid type %s. Ignoring", direction, iface.name, iface.type) return None # Try Interface Profile Handler if_prof = iface.get_profile() if if_prof.ifdesc_handler: ri = self.resolve_via_handler(if_prof.ifdesc_handler, iface) if ri: return ri # Try Interface Profile Patterns if if_prof.ifdesc_patterns: ri = self.resolve_via_patterns(if_prof.ifdesc_patterns, iface) if ri: return ri # Try Object Profile Handler if self.object.object_profile.ifdesc_handler: ri = self.resolve_via_handler( self.object.object_profile.ifdesc_handler, iface) if ri: return ri # Try Object Profile Patterns if self.object.object_profile.ifdesc_patterns: ri = self.resolve_via_patterns( self.object.object_profile.ifdesc_patterns, iface) if ri: return ri # Not found return None
def _get_interface(object, name): Interface._get_collection().update({"managed_object": object.id})
def get_ajax_data(self, **kwargs): # Parse query params query = self.decode_query( self.handler.get_argument("key") ) # type: List[Tuple[int, int, str]] # Get metrics from_ts = datetime.datetime.now() - datetime.timedelta(seconds=1800) from_ts = from_ts.replace(microsecond=0) interface_sql = """ SELECT managed_object, path[4] AS iface, argMax(load_in, ts) AS load_in, argMax(load_out, ts) AS load_out, argMax(packets_in, ts) AS packets_in, argMax(packets_out, ts) AS packets_out, argMax(errors_in, ts) AS errors_in, argMax(errors_out, ts) AS errors_out FROM interface WHERE date >= toDate('%s') AND ts >= toDateTime('%s') AND (%s) GROUP BY managed_object, iface """ % ( from_ts.date().isoformat(), from_ts.isoformat(sep=" "), " OR ".join( "(managed_object=%d AND path[4]='%s')" % (q[1], q[2].replace("'", "''")) for q in query ), ) # Get data metrics = [] # type: List[Tuple[int, str, str, str, str, str]] ch = ch_connection() try: for ( mo, iface, load_in, load_out, packets_in, packets_out, errors_in, errors_out, ) in ch.execute(post=interface_sql): if_hash = str(bi_hash(iface)) metrics += [ # (mo, if_hash, "speed", self.humanize_metric(speed)), (mo, if_hash, "load_in", self.humanize_metric(load_in)), (mo, if_hash, "load_out", self.humanize_metric(load_out)), (mo, if_hash, "packets_in", self.humanize_metric(packets_in)), (mo, if_hash, "packets_out", self.humanize_metric(packets_out)), (mo, if_hash, "errors_in", self.humanize_metric(errors_in)), (mo, if_hash, "errors_out", self.humanize_metric(errors_out)), ] except ClickhouseError: pass # Set defaults m_index = set() # type: Set[Tuple[int, str]] for mo_bi_id, iface, _, _ in metrics: m_index.add((int(mo_bi_id), iface)) interface_metrics = { "speed", "load_in", "load_out", "packets_in", "packets_out", "errors_in", "errors_out", } for _, mo_bi_id, iface in query: if (int(mo_bi_id), str(bi_hash(iface))) not in m_index: for metric in interface_metrics: metrics += [(str(mo_bi_id), str(bi_hash(iface)), metric, "-")] # managed object id -> bi id mo_map = {q[0]: q[1] for q in query} # type: Dict[int, int] # Get interface statuses for doc in Interface._get_collection().find( {"$or": [{"managed_object": q[0], "name": q[2]} for q in query]}, { "_id": 0, "managed_object": 1, "name": 1, "admin_status": 1, "oper_status": 1, "in_speed": 1, "out_speed": 1, "full_duplex": 1, }, ): mo = str(mo_map[doc["managed_object"]]) if_hash = str(bi_hash(doc["name"])) status = 0 if "admin_status" in doc and doc["admin_status"]: status = 2 if doc["oper_status"] else 1 duplex = "-" if "full_duplex" in doc: duplex = "Full" if doc["full_duplex"] else "Half" speed = "-" if "in_speed" in doc: speed = self.humanize_metric(doc["in_speed"] * 1000) metrics += [ (mo, if_hash, "speed", speed), (mo, if_hash, "duplex", duplex), (mo, if_hash, "status", status), ] # Get current object statuses obj_statuses = ObjectStatus.get_statuses(list(mo_map)) statuses = {str(mo_map[mo_id]): obj_statuses.get(mo_id, True) for mo_id in obj_statuses} return {"metrics": metrics, "statuses": list(statuses.items())}
def get_data(self, request, pool, obj_profile=None, **kwargs): problems = {} # id -> problem if not obj_profile: # Get all managed objects mos = dict( (mo.id, mo) for mo in ManagedObject.objects.filter(is_managed=True, pool=pool) ) if not request.user.is_superuser: mos = dict( (mo.id, mo) for mo in ManagedObject.objects.filter(is_managed=True, pool=pool, administrative_domain__in=UserAccess.get_domains(request.user)) ) else: # Get all managed objects mos = dict( (mo.id, mo) for mo in ManagedObject.objects.filter(is_managed=True, pool=pool, object_profile=obj_profile) ) if not request.user.is_superuser: mos = dict( (mo.id, mo) for mo in ManagedObject.objects.filter(is_managed=True, pool=pool, object_profile=obj_profile, administrative_domain__in=UserAccess.get_domains(request.user)) ) mos_set = set(mos) # Get all managed objects with generic profile for mo in mos: if mos[mo].profile.name == GENERIC_PROFILE: problems[mo] = _("Profile check failed") # Get all managed objects without interfaces if_mo = dict( (x["_id"], x.get("managed_object")) for x in Interface._get_collection().find( {}, {"_id": 1, "managed_object": 1} ) ) for mo in (mos_set - set(problems) - set(if_mo.itervalues())): problems[mo] = _("No interfaces") # Get all managed objects without links linked_mos = set() for d in Link._get_collection().find({}): for i in d["interfaces"]: linked_mos.add(if_mo.get(i)) for mo in (mos_set - set(problems) - linked_mos): problems[mo] = _("No links") # Get all managed objects without uplinks uplinks = {} for d in ObjectData._get_collection().find(): nu = len(d.get("uplinks", [])) if nu: uplinks[d["_id"]] = nu for mo in (mos_set - set(problems) - set(uplinks)): problems[mo] = _("No uplinks") # data = [] for mo_id in problems: if mo_id not in mos: continue mo = mos[mo_id] data += [[ mo.name, mo.address, mo.profile.name, mo.platform.name if mo.platform else "", mo.segment.name if mo.segment else "", problems[mo_id] ]] data = sorted(data) return self.from_dataset( title=self.title, columns=[ "Name", "Address", "Profile", "Platform", "Segment", "Problem" ], data=data, enumerate=True )
def handler(self): def get_interface(name): if_name = interfaces.get(name) if if_name: return if_name for iname in self.object.get_profile().get_interface_names( i["interface"]): if_name = interfaces.get(iname) if if_name: return if_name return None has_interfaces = "DB | Interfaces" in self.object.get_caps() if not has_interfaces: self.logger.info("No interfaces discovered. " "Skipping interface status check") return self.logger.info("Checking interface statuses") interfaces = dict((i.name, i) for i in Interface.objects.filter( managed_object=self.object.id, type="physical", profile__in=self.get_profiles(None), read_preference=ReadPreference.SECONDARY_PREFERRED, )) if not interfaces: self.logger.info( "No interfaces with status discovery enabled. Skipping") return hints = [{ "interface": key, "ifindex": v.ifindex } for key, v in six.iteritems(interfaces) if getattr(v, "ifindex", None) is not None] or None result = self.object.scripts.get_interface_status_ex(interfaces=hints) collection = Interface._get_collection() bulk = [] for i in result: iface = get_interface(i["interface"]) if not iface: continue kwargs = { "admin_status": i.get("admin_status"), "full_duplex": i.get("full_duplex"), "in_speed": i.get("in_speed"), "out_speed": i.get("out_speed"), "bandwidth": i.get("bandwidth"), } changes = self.update_if_changed(iface, kwargs, ignore_empty=list( six.iterkeys(kwargs)), bulk=bulk) self.log_changes( "Interface %s status has been changed" % i["interface"], changes) ostatus = i.get("oper_status") if iface.oper_status != ostatus and ostatus is not None: self.logger.info("[%s] set oper status to %s", i["interface"], ostatus) iface.set_oper_status(ostatus) if bulk: self.logger.info("Committing changes to database") try: collection.bulk_write(bulk, ordered=False) # 1 bulk operations complete in 0ms: inserted=0, updated=1, removed=0 self.logger.info("Database has been synced") except BulkWriteError as e: self.logger.error("Bulk write error: '%s'", e.details)
def handler(self): self.logger.info("NRI Service Mapper") if not self.object.remote_system: self.logger.info( "Created directly. No NRI integration. Skipping check") return if not self.object.remote_system.enable_service: self.logger.info( "NRI does not provide link information. Skipping check") return # Check object has interfaces if not self.has_capability("DB | Interfaces"): self.logger.info( "No interfaces discovered. Skipping interface status check") return # Get services related to Managed object scol = Service._get_collection() slist = [ s for s in scol.find( { "managed_object": self.object.id, "nri_port": { "$exists": True } }, { "_id": 1, "nri_port": 1, "profile": 1 }, ) ] # nri_port -> service_id smap = {s["nri_port"]: s["_id"] for s in slist} # service id -> service profile prof_map = { s["_id"]: ServiceProfile.get_by_id(s["profile"]) for s in slist } icol = Interface._get_collection() nmap = {} bulk = [] for i in icol.find({ "managed_object": self.object.id, "nri_name": { "$exists": True } }): if not i.get("nri_name"): continue if i["nri_name"] in smap: svc = smap[i["nri_name"]] p = prof_map.get(svc) if svc != i.get("service"): self.logger.info("Binding service %s to interface %s", svc, i["name"]) op = {"service": svc} if p and p.interface_profile: op["profile"] = p.interface_profile.id bulk += [UpdateOne({"_id": i["_id"]}, {"$set": op})] elif p and p.interface_profile and p.interface_profile.id != i[ "profile"]: self.logger.info("Replace profile to %s on intertace %s", p.interface_profile, i["name"]) bulk += [ UpdateOne( {"_id": i["_id"]}, {"$set": { "profile": p.interface_profile.id }}) ] del smap[i["nri_name"]] elif i.get("service"): self.logger.info("Removing service %s from interface %s", i["service"], i["name"]) op = {"$unset": {"service": ""}} if i["service"] in prof_map: op["$set"] = { "profile": InterfaceProfile.get_default_profile().id } bulk += [UpdateOne({"_id": i["_id"]}, op)] nmap[i["nri_name"]] = i # Report hanging interfaces for n in smap: svc = smap[n] if n not in nmap: self.logger.info( "Cannot bind service %s. Cannot find NRI interface %s", svc, n) continue i = nmap[n] self.logger.info("Binding service %s to interface %s", svc, i["name"]) op = {"service": svc} p = prof_map.get(svc) if p: op["profile"] = p.interface_profile.id bulk += [UpdateOne({"_id": i["_id"]}, {"$set": op})] if bulk: self.logger.info("Sending %d updates", len(bulk)) icol.bulk_write(bulk) ServiceSummary.refresh_object(self.object.id) change_tracker.register([("managedobject", self.object.id)])